From 60c8eb795d936fb55f7dd32ccebe12125ad85f97 Mon Sep 17 00:00:00 2001 From: Luke McCrone Date: Wed, 17 Jul 2024 18:25:18 -0300 Subject: [PATCH] test: improve command coverage for tools, retention bypass --- tests/commands/abort_multipart_upload.sh | 2 +- tests/commands/complete_multipart_upload.sh | 2 +- tests/commands/copy_object.sh | 2 +- tests/commands/create_bucket.sh | 2 +- tests/commands/delete_bucket_policy.sh | 1 + tests/commands/delete_object.sh | 14 +- tests/commands/get_object_legal_hold.sh | 2 +- tests/commands/get_object_retention.sh | 5 +- tests/commands/list_objects_v2.sh | 18 ++ tests/commands/list_parts.sh | 14 ++ tests/commands/put_bucket_tagging.sh | 24 +++ .../commands/put_object_lock_configuration.sh | 27 +++ tests/commands/put_object_tagging.sh | 24 +++ tests/commands/upload_part.sh | 19 +++ tests/report.sh | 5 + tests/test_aws.sh | 40 ++++- tests/test_aws_root_inner.sh | 92 +++++++--- tests/test_common.sh | 62 +++---- tests/test_mc.sh | 9 +- tests/test_s3.sh | 8 + tests/test_s3cmd.sh | 6 +- tests/util.sh | 158 +++++++----------- 22 files changed, 367 insertions(+), 169 deletions(-) create mode 100644 tests/commands/list_objects_v2.sh create mode 100644 tests/commands/list_parts.sh create mode 100644 tests/commands/put_bucket_tagging.sh create mode 100644 tests/commands/put_object_lock_configuration.sh create mode 100644 tests/commands/put_object_tagging.sh create mode 100644 tests/commands/upload_part.sh diff --git a/tests/commands/abort_multipart_upload.sh b/tests/commands/abort_multipart_upload.sh index 2f9ff63..f40b753 100644 --- a/tests/commands/abort_multipart_upload.sh +++ b/tests/commands/abort_multipart_upload.sh @@ -14,11 +14,11 @@ abort_multipart_upload() { } abort_multipart_upload_with_user() { - record_command "abort-multipart-upload" "client:s3api" if [ $# -ne 5 ]; then log 2 "'abort multipart upload' command requires bucket, key, upload ID, username, password" return 1 fi + record_command "abort-multipart-upload" "client:s3api" if ! abort_multipart_upload_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then log 2 "Error aborting upload: $abort_multipart_upload_error" export abort_multipart_upload_error diff --git a/tests/commands/complete_multipart_upload.sh b/tests/commands/complete_multipart_upload.sh index 21a6e51..47f4e96 100644 --- a/tests/commands/complete_multipart_upload.sh +++ b/tests/commands/complete_multipart_upload.sh @@ -1,12 +1,12 @@ #!/usr/bin/env bash complete_multipart_upload() { - record_command "complete-multipart-upload" "client:s3api" if [[ $# -ne 4 ]]; then log 2 "'complete multipart upload' command requires bucket, key, upload ID, parts list" return 1 fi log 5 "complete multipart upload id: $3, parts: $4" + record_command "complete-multipart-upload" "client:s3api" error=$(aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" --multipart-upload '{"Parts": '"$4"'}' 2>&1) || local completed=$? if [[ $completed -ne 0 ]]; then log 2 "error completing multipart upload: $error" diff --git a/tests/commands/copy_object.sh b/tests/commands/copy_object.sh index f52293a..233490a 100644 --- a/tests/commands/copy_object.sh +++ b/tests/commands/copy_object.sh @@ -1,13 +1,13 @@ #!/usr/bin/env bash copy_object() { - record-command "copy-object" "client:$1" if [ $# -ne 4 ]; then echo "copy object command requires command type, source, bucket, key" return 1 fi local exit_code=0 local error + record_command "copy-object" "client:$1" if [[ $1 == 's3' ]]; then error=$(aws --no-verify-ssl s3 cp "$2" s3://"$3/$4" 2>&1) || exit_code=$? elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then diff --git a/tests/commands/create_bucket.sh b/tests/commands/create_bucket.sh index b4b3c23..cbf19f9 100644 --- a/tests/commands/create_bucket.sh +++ b/tests/commands/create_bucket.sh @@ -6,12 +6,12 @@ source ./tests/report.sh # param: bucket name # return 0 for success, 1 for failure create_bucket() { - record_command "create-bucket" "client:$1" if [ $# -ne 2 ]; then log 2 "create bucket missing command type, bucket name" return 1 fi + record_command "create-bucket" "client:$1" local exit_code=0 local error log 6 "create bucket" diff --git a/tests/commands/delete_bucket_policy.sh b/tests/commands/delete_bucket_policy.sh index 93ea687..fa83940 100644 --- a/tests/commands/delete_bucket_policy.sh +++ b/tests/commands/delete_bucket_policy.sh @@ -6,6 +6,7 @@ delete_bucket_policy() { log 2 "delete bucket policy command requires command type, bucket" return 1 fi + local delete_result=0 if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then error=$(aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2" 2>&1) || delete_result=$? elif [[ $1 == 's3cmd' ]]; then diff --git a/tests/commands/delete_object.sh b/tests/commands/delete_object.sh index a4d9465..c96fbf9 100644 --- a/tests/commands/delete_object.sh +++ b/tests/commands/delete_object.sh @@ -28,6 +28,18 @@ delete_object() { return 0 } +delete_object_bypass_retention() { + if [[ $# -ne 4 ]]; then + log 2 "'delete-object with bypass retention' requires bucket, key, user, password" + return 1 + fi + if ! delete_object_error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --bypass-governance-retention 2>&1); then + log 2 "error deleting object with bypass retention: $delete_object_error" + return 1 + fi + return 0 +} + delete_object_with_user() { record_command "delete-object" "client:$1" if [ $# -ne 5 ]; then @@ -38,7 +50,7 @@ delete_object_with_user() { if [[ $1 == 's3' ]]; then delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$? elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then - delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$? + delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" --bypass-governance-retention 2>&1) || exit_code=$? elif [[ $1 == 's3cmd' ]]; then delete_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$? else diff --git a/tests/commands/get_object_legal_hold.sh b/tests/commands/get_object_legal_hold.sh index 07b143a..bf02c93 100644 --- a/tests/commands/get_object_legal_hold.sh +++ b/tests/commands/get_object_legal_hold.sh @@ -1,11 +1,11 @@ #!/usr/bin/env bash get_object_legal_hold() { - record_command "get-object-legal-hold" "client:s3api" if [[ $# -ne 2 ]]; then log 2 "'get object legal hold' command requires bucket, key" return 1 fi + record_command "get-object-legal-hold" "client:s3api" legal_hold=$(aws --no-verify-ssl s3api get-object-legal-hold --bucket "$1" --key "$2" 2>&1) || local get_result=$? if [[ $get_result -ne 0 ]]; then log 2 "error getting object legal hold: $legal_hold" diff --git a/tests/commands/get_object_retention.sh b/tests/commands/get_object_retention.sh index ec41ced..c32bea8 100644 --- a/tests/commands/get_object_retention.sh +++ b/tests/commands/get_object_retention.sh @@ -6,9 +6,10 @@ get_object_retention() { log 2 "'get object retention' command requires bucket, key" return 1 fi - retention=$(aws --no-verify-ssl s3api get-object-retention --bucket "$1" --key "$2" 2>&1) || local get_result=$? - if [[ $get_result -ne 0 ]]; then + if ! retention=$(aws --no-verify-ssl s3api get-object-retention --bucket "$1" --key "$2" 2>&1); then log 2 "error getting object retention: $retention" + get_object_retention_error=$retention + export get_object_retention_error return 1 fi export retention diff --git a/tests/commands/list_objects_v2.sh b/tests/commands/list_objects_v2.sh new file mode 100644 index 0000000..a22b7f5 --- /dev/null +++ b/tests/commands/list_objects_v2.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# list objects in bucket, v2 +# param: bucket +# export objects on success, return 1 for failure +list_objects_v2() { + if [ $# -ne 1 ]; then + echo "list objects command missing bucket and/or path" + return 1 + fi + record_command "list-objects-v2 client:s3api" + objects=$(aws --no-verify-ssl s3api list-objects-v2 --bucket "$1") || local result=$? + if [[ $result -ne 0 ]]; then + echo "error listing objects: $objects" + return 1 + fi + export objects +} \ No newline at end of file diff --git a/tests/commands/list_parts.sh b/tests/commands/list_parts.sh new file mode 100644 index 0000000..7bc4748 --- /dev/null +++ b/tests/commands/list_parts.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +list_parts() { + if [[ $# -ne 3 ]]; then + log 2 "'list-parts' command requires bucket, key, upload ID" + return 1 + fi + record_command "list-parts" "client:s3api" + if ! listed_parts=$(aws --no-verify-ssl s3api list-parts --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then + log 2 "Error listing multipart upload parts: $listed_parts" + return 1 + fi + export listed_parts +} \ No newline at end of file diff --git a/tests/commands/put_bucket_tagging.sh b/tests/commands/put_bucket_tagging.sh new file mode 100644 index 0000000..88a2ff6 --- /dev/null +++ b/tests/commands/put_bucket_tagging.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +put_bucket_tagging() { + if [ $# -ne 4 ]; then + echo "bucket tag command missing command type, bucket name, key, value" + return 1 + fi + local error + local result + record_command "put-bucket-tagging" "client:$1" + if [[ $1 == 'aws' ]]; then + error=$(aws --no-verify-ssl s3api put-bucket-tagging --bucket "$2" --tagging "TagSet=[{Key=$3,Value=$4}]") || result=$? + elif [[ $1 == 'mc' ]]; then + error=$(mc --insecure tag set "$MC_ALIAS"/"$2" "$3=$4" 2>&1) || result=$? + else + log 2 "invalid command type $1" + return 1 + fi + if [[ $result -ne 0 ]]; then + echo "Error adding bucket tag: $error" + return 1 + fi + return 0 +} \ No newline at end of file diff --git a/tests/commands/put_object_lock_configuration.sh b/tests/commands/put_object_lock_configuration.sh new file mode 100644 index 0000000..87ac782 --- /dev/null +++ b/tests/commands/put_object_lock_configuration.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +put_object_lock_configuration() { + if [[ $# -ne 4 ]]; then + log 2 "'put-object-lock-configuration' command requires bucket name, enabled, mode, period" + return 1 + fi + local config="{\"ObjectLockEnabled\": \"$2\", \"Rule\": {\"DefaultRetention\": {\"Mode\": \"$3\", \"Days\": $4}}}" + if ! error=$(aws --no-verify-ssl s3api put-object-lock-configuration --bucket "$1" --object-lock-configuration "$config" 2>&1); then + log 2 "error putting object lock configuration: $error" + return 1 + fi + return 0 +} + +put_object_lock_configuration_disabled() { + if [[ $# -ne 1 ]]; then + log 2 "'put-object-lock-configuration' disable command requires bucket name" + return 1 + fi + local config="{\"ObjectLockEnabled\": \"Enabled\"}" + if ! error=$(aws --no-verify-ssl s3api put-object-lock-configuration --bucket "$1" --object-lock-configuration "$config" 2>&1); then + log 2 "error putting object lock configuration: $error" + return 1 + fi + return 0 +} diff --git a/tests/commands/put_object_tagging.sh b/tests/commands/put_object_tagging.sh new file mode 100644 index 0000000..d54c1ea --- /dev/null +++ b/tests/commands/put_object_tagging.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +put_object_tagging() { + if [ $# -ne 5 ]; then + log 2 "'put-object-tagging' command missing command type, object name, file, key, and/or value" + return 1 + fi + local error + local result + record_command "put-object-tagging" "client:$1" + if [[ $1 == 'aws' ]]; then + error=$(aws --no-verify-ssl s3api put-object-tagging --bucket "$2" --key "$3" --tagging "TagSet=[{Key=$4,Value=$5}]" 2>&1) || result=$? + elif [[ $1 == 'mc' ]]; then + error=$(mc --insecure tag set "$MC_ALIAS"/"$2"/"$3" "$4=$5" 2>&1) || result=$? + else + log 2 "invalid command type $1" + return 1 + fi + if [[ $result -ne 0 ]]; then + log 2 "Error adding object tag: $error" + return 1 + fi + return 0 +} \ No newline at end of file diff --git a/tests/commands/upload_part.sh b/tests/commands/upload_part.sh new file mode 100644 index 0000000..8b0aacd --- /dev/null +++ b/tests/commands/upload_part.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +upload_part() { + if [ $# -ne 5 ]; then + log 2 "upload multipart part function must have bucket, key, upload ID, file name, part number" + return 1 + fi + local etag_json + record_command "upload-part" "client:s3api" + if ! etag_json=$(aws --no-verify-ssl s3api upload-part --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --body "$4-$(($5-1))" 2>&1); then + log 2 "Error uploading part $5: $etag_json" + return 1 + fi + if ! etag=$(echo "$etag_json" | grep -v "InsecureRequestWarning" | jq '.ETag' 2>&1); then + log 2 "error obtaining etag: $etag" + return 1 + fi + export etag +} \ No newline at end of file diff --git a/tests/report.sh b/tests/report.sh index 8b9fa88..a640401 100644 --- a/tests/report.sh +++ b/tests/report.sh @@ -30,6 +30,7 @@ record_command() { check_and_create_database log 5 "command to record: $1" client="" + #role="root" for arg in "${@:2}"; do log 5 "Argument: $arg" if [[ $arg != *":"* ]]; then @@ -40,6 +41,10 @@ record_command() { case $header in "client") client=$(echo "$arg" | awk -F: '{print $2}') + ;; + #"role") + # role=$(echo "$arg" | awk -F: '{print $2}') + # ;; esac done if ! error=$(sqlite3 "$COVERAGE_DB" "INSERT INTO entries (command, client, count) VALUES(\"$1\", \"$client\", 1) ON CONFLICT(command, client) DO UPDATE SET count = count + 1" 2>&1); then diff --git a/tests/test_aws.sh b/tests/test_aws.sh index 57dcc9b..e28ecbb 100755 --- a/tests/test_aws.sh +++ b/tests/test_aws.sh @@ -27,6 +27,7 @@ source ./tests/commands/put_bucket_policy.sh source ./tests/commands/put_bucket_versioning.sh source ./tests/commands/put_object.sh source ./tests/commands/put_object_legal_hold.sh +source ./tests/commands/put_object_lock_configuration.sh source ./tests/commands/put_object_retention.sh source ./tests/commands/select_object_content.sh @@ -140,13 +141,13 @@ export RUN_USERS=true test_common_list_objects "aws" } -#@test "test_get_put_object_legal_hold" { -# test_get_put_object_legal_hold_aws_root -#} +@test "test_get_put_object_legal_hold" { + test_get_put_object_legal_hold_aws_root +} -#@test "test_get_put_object_retention" { -# test_get_put_object_retention_aws_root -#} +@test "test_get_put_object_retention" { + test_get_put_object_retention_aws_root +} @test "test_put_bucket_acl" { test_common_put_bucket_acl "s3api" @@ -881,6 +882,10 @@ EOF fi } +@test "test_retention_bypass" { + test_retention_bypass_aws_root +} + @test "test_head_bucket_doesnt_exist" { setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$? [[ $setup_result -eq 0 ]] || fail "error setting up bucket" @@ -982,3 +987,26 @@ EOF delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME" delete_test_files "$policy_file" "$test_file" } + +@test "test_put_object_lock_configuration" { + bucket_name=$BUCKET_ONE_NAME + if [[ $RECREATE_BUCKETS == "true" ]]; then + delete_bucket "s3api" "$bucket_name" || fail "error deleting bucket" + create_bucket_object_lock_enabled "$bucket_name" || fail "error setting up bucket" + fi + local enabled="Enabled" + local governance="GOVERNANCE" + local days="1" + put_object_lock_configuration "$bucket_name" "$enabled" "$governance" "$days" || fail "error putting object lock configuration" + get_object_lock_configuration "$bucket_name" || fail "error getting object lock configuration" + log 5 "LOCK CONFIG: $lock_config" + object_lock_configuration=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration" 2>&1) || fail "error getting ObjectLockConfiguration: $object_lock_configuration" + object_lock_enabled=$(echo "$object_lock_configuration" | jq -r ".ObjectLockEnabled" 2>&1) || fail "error getting ObjectLockEnabled: $object_lock_enabled" + [[ $object_lock_enabled == "$enabled" ]] || fail "incorrect ObjectLockEnabled value: $object_lock_enabled" + default_retention=$(echo "$object_lock_configuration" | jq -r ".Rule.DefaultRetention" 2>&1) || fail "error getting DefaultRetention: $default_retention" + mode=$(echo "$default_retention" | jq -r ".Mode" 2>&1) || fail "error getting Mode: $mode" + [[ $mode == "$governance" ]] || fail "incorrect Mode value: $mode" + returned_days=$(echo "$default_retention" | jq -r ".Days" 2>&1) || fail "error getting Days: $returned_days" + [[ $returned_days == "1" ]] || fail "incorrect Days value: $returned_days" + delete_bucket_or_contents "aws" "$bucket_name" +} diff --git a/tests/test_aws_root_inner.sh b/tests/test_aws_root_inner.sh index e8a827f..c30f7e6 100755 --- a/tests/test_aws_root_inner.sh +++ b/tests/test_aws_root_inner.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bats source ./tests/commands/delete_objects.sh +source ./tests/commands/list_objects_v2.sh +source ./tests/commands/list_parts.sh test_abort_multipart_upload_aws_root() { local bucket_file="bucket-file" @@ -139,15 +141,18 @@ test_delete_objects_aws_root() { } test_get_bucket_acl_aws_root() { - setup_bucket "aws" "$BUCKET_ONE_NAME" || local created=$? - [[ $created -eq 0 ]] || fail "Error creating bucket" + # TODO remove when able to assign bucket ownership back to root + if [[ $RECREATE_BUCKETS == "false" ]]; then + skip + fi + setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "error creating bucket" - get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || local result=$? - [[ $result -eq 0 ]] || fail "Error retrieving acl" + get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || fail "error retreving ACL" # shellcheck disable=SC2154 - id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq '.Owner.ID') - [[ $id == '"'"$AWS_ACCESS_KEY_ID"'"' ]] || fail "Acl mismatch" + log 5 "ACL: $acl" + id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Owner.ID') + [[ $id == "$AWS_ACCESS_KEY_ID" ]] || fail "Acl mismatch" delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME" } @@ -274,15 +279,15 @@ test_get_put_object_legal_hold_aws_root() { } test_get_put_object_retention_aws_root() { - # bucket must be created with lock for legal hold - if [[ $RECREATE_BUCKETS == false ]]; then - return - fi - bucket_file="bucket_file" username="ABCDEFG" secret_key="HIJKLMN" + # TODO remove after able to change bucket owner back to root user + if [[ $RECREATE_BUCKETS == "false" ]]; then + skip + fi + legal_hold_retention_setup "$username" "$secret_key" "$bucket_file" get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration" @@ -291,10 +296,11 @@ test_get_put_object_retention_aws_root() { [[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'" if [[ "$OSTYPE" == "darwin"* ]]; then - retention_date=$(date -v+2d +"%Y-%m-%dT%H:%M:%S") + retention_date=$(TZ="UTC" date -v+5S +"%Y-%m-%dT%H:%M:%S") else - retention_date=$(date -d "+2 days" +"%Y-%m-%dT%H:%M:%S") + retention_date=$(TZ="UTC" date -d "+5 seconds" +"%Y-%m-%dT%H:%M:%S") fi + log 5 "retention date: $retention_date" put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date" || fail "failed to add object retention" get_object_retention "$BUCKET_ONE_NAME" "$bucket_file" || fail "failed to get object retention" log 5 "$retention" @@ -308,14 +314,60 @@ test_get_put_object_retention_aws_root() { put_object_with_user "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local put_result=$? [[ $put_result -ne 0 ]] || fail "able to overwrite object with hold" # shellcheck disable=SC2154 - [[ $error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error" + [[ $put_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error" delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local delete_result=$? [[ $delete_result -ne 0 ]] || fail "able to delete object with hold" - [[ $error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error" + [[ $delete_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error" + + sleep 5 delete_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error deleting object" - delete_bucket_recursive "s3api" "$BUCKET_ONE_NAME" + delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME" + delete_test_files "$bucket_file" +} + +test_retention_bypass_aws_root() { + bucket_file="bucket_file" + username="ABCDEFG" + secret_key="HIJKLMN" + policy_file="policy_file" + + legal_hold_retention_setup "$username" "$secret_key" "$bucket_file" + + get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration" + log 5 "$lock_config" + enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled") + [[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'" + + if [[ "$OSTYPE" == "darwin"* ]]; then + retention_date=$(TZ="UTC" date -v+30S +"%Y-%m-%dT%H:%M:%S") + else + retention_date=$(TZ="UTC" date -d "+30 seconds" +"%Y-%m-%dT%H:%M:%S") + fi + log 5 "retention date: $retention_date" + put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date" || fail "failed to add object retention" + if delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file"; then + log 2 "able to delete object despite retention" + return 1 + fi + cat < "$test_file_folder/$policy_file" +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "$username", + "Action": ["s3:BypassGovernanceRetention","s3:DeleteObject"], + "Resource": "arn:aws:s3:::$BUCKET_ONE_NAME/*" + } + ] +} +EOF + put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting bucket policy" + delete_object_bypass_retention "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || fail "error deleting object and bypassing retention" + delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME" + delete_test_files "$bucket_file" "$policy_file" } legal_hold_retention_setup() { @@ -326,7 +378,9 @@ legal_hold_retention_setup() { create_test_files "$3" || fail "error creating test files" #create_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error creating bucket" - create_bucket_object_lock_enabled "$BUCKET_ONE_NAME" || fail "error creating bucket" + if [[ $RECREATE_BUCKETS == "true" ]]; then + create_bucket_object_lock_enabled "$BUCKET_ONE_NAME" || fail "error creating bucket" + fi change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$1" || fail "error changing bucket ownership" get_bucket_policy "s3api" "$BUCKET_ONE_NAME" || fail "error getting bucket policy" # shellcheck disable=SC2154 @@ -383,7 +437,7 @@ test_s3api_list_objects_v2_aws_root() { put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_object_two=$? [[ $copy_object_two -eq 0 ]] || fail "Failed to add object $object_two" - list_objects_s3api_v2 "$BUCKET_ONE_NAME" + list_objects_v2 "$BUCKET_ONE_NAME" || fail "error listing objects (v2)" key_one=$(echo "$objects" | jq -r '.Contents[0].Key') [[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)" size_one=$(echo "$objects" | jq -r '.Contents[0].Size') @@ -404,7 +458,7 @@ test_multipart_upload_list_parts_aws_root() { dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file" setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "failed to create bucket '$BUCKET_ONE_NAME'" - list_parts "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "listing multipart upload parts failed" + start_multipart_upload_and_list_parts "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "listing multipart upload parts failed" declare -a parts_map # shellcheck disable=SC2154 diff --git a/tests/test_common.sh b/tests/test_common.sh index d4cbbb2..ab9f84b 100644 --- a/tests/test_common.sh +++ b/tests/test_common.sh @@ -14,6 +14,8 @@ source ./tests/commands/get_object.sh source ./tests/commands/get_object_tagging.sh source ./tests/commands/list_buckets.sh source ./tests/commands/put_bucket_acl.sh +source ./tests/commands/put_bucket_tagging.sh +source ./tests/commands/put_object_tagging.sh source ./tests/commands/put_object.sh test_common_multipart_upload() { @@ -82,6 +84,7 @@ test_common_copy_object() { delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME" delete_bucket_or_contents "$1" "$BUCKET_TWO_NAME" + delete_test_files "$object_name" "$object_name-copy" } test_common_put_object_with_data() { @@ -267,7 +270,7 @@ test_common_set_get_delete_bucket_tags() { check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || fail "error checking if bucket tags are empty" - put_bucket_tag "$1" "$BUCKET_ONE_NAME" $key $value + put_bucket_tagging "$1" "$BUCKET_ONE_NAME" $key $value || fail "error putting bucket tags" get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags second time" local tag_set_key @@ -301,15 +304,11 @@ test_common_set_get_object_tags() { local key="test_key" local value="test_value" - create_test_files "$bucket_file" || local created=$? - [[ $created -eq 0 ]] || fail "Error creating test files" - setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$? - [[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'" - put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$? - [[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'" + create_test_files "$bucket_file" || fail "error creating test files" + setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "Failed to create bucket '$BUCKET_ONE_NAME'" + put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'" - get_object_tagging "$1" "$BUCKET_ONE_NAME" $bucket_file || local get_result=$? - [[ $get_result -eq 0 ]] || fail "Error getting object tags" + get_object_tagging "$1" "$BUCKET_ONE_NAME" $bucket_file || fail "Error getting object tags" if [[ $1 == 'aws' ]]; then tag_set=$(echo "$tags" | jq '.TagSet') [[ $tag_set == "[]" ]] || [[ $tag_set == "" ]] || fail "Error: tags not empty" @@ -317,9 +316,8 @@ test_common_set_get_object_tags() { fail "no tags found (tags: $tags)" fi - put_object_tag "$1" "$BUCKET_ONE_NAME" $bucket_file $key $value - get_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || local get_result_two=$? - [[ $get_result_two -eq 0 ]] || fail "Error getting object tags" + put_object_tagging "$1" "$BUCKET_ONE_NAME" $bucket_file $key $value || fail "error putting object tagging" + get_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting object tags" if [[ $1 == 'aws' ]]; then tag_set_key=$(echo "$tags" | jq -r '.TagSet[0].Key') tag_set_value=$(echo "$tags" | jq -r '.TagSet[0].Value') @@ -397,26 +395,19 @@ test_common_delete_object_tagging() { tag_key="key" tag_value="value" - create_test_files "$bucket_file" || local created=$? - [[ $created -eq 0 ]] || fail "Error creating test files" + create_test_files "$bucket_file" || fail "Error creating test files" - setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$? - [[ $setup_result -eq 0 ]] || fail "error setting up bucket" + setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "error setting up bucket" - put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$? - [[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket" + put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket" - put_object_tag "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || put_result=$? - [[ $put_result -eq 0 ]] || fail "failed to add tags to object" + put_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || fail "failed to add tags to object" - get_and_verify_object_tags "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || get_result=$? - [[ $get_result -eq 0 ]] || fail "failed to get tags" + get_and_verify_object_tags "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || fail "failed to get tags" - delete_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || delete_result=$? - [[ $delete_result -eq 0 ]] || fail "error deleting object tagging" + delete_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error deleting object tagging" - check_object_tags_empty "$1" "$BUCKET_ONE_NAME" "$bucket_file" || get_result=$? - [[ $get_result -eq 0 ]] || fail "failed to get tags" + check_object_tags_empty "$1" "$BUCKET_ONE_NAME" "$bucket_file" || fail "failed to get tags" delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME" delete_test_files "$bucket_file" @@ -424,9 +415,9 @@ test_common_delete_object_tagging() { test_common_get_bucket_location() { [[ $# -eq 1 ]] || fail "test common get bucket location missing command type" - setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$? + setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$? [[ $setup_result -eq 0 ]] || fail "error setting up bucket" - get_bucket_location "aws" "$BUCKET_ONE_NAME" + get_bucket_location "$1" "$BUCKET_ONE_NAME" # shellcheck disable=SC2154 [[ $bucket_location == "null" ]] || [[ $bucket_location == "us-east-1" ]] || fail "wrong location: '$bucket_location'" } @@ -436,16 +427,17 @@ test_common_put_bucket_acl() { setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "error creating bucket" put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls" - setup_user "ABCDEFG" "HIJKLMN" "user" || fail "error creating user" + username="ABCDEFG" + setup_user "$username" "HIJKLMN" "user" || fail "error creating user" get_bucket_acl "$1" "$BUCKET_ONE_NAME" || fail "error retrieving acl" log 5 "Initial ACLs: $acl" - id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq '.Owner.ID' 2>&1) || fail "error getting ID: $id" - if [[ $id != '"'"$AWS_ACCESS_KEY_ID"'"' ]]; then + id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Owner.ID' 2>&1) || fail "error getting ID: $id" + if [[ $id != "$username" ]]; then # for direct, ID is canonical user ID rather than AWS_ACCESS_KEY_ID canonical_id=$(aws --no-verify-ssl s3api list-buckets --query 'Owner.ID' 2>&1) || fail "error getting caononical ID: $canonical_id" - [[ $id == "$canonical_id" ]] || fail "acl ID doesn't match AWS key or canonical ID" + [[ $id == "$AWS_ACCESS_KEY_ID" ]] || fail "acl ID doesn't match AWS key or canonical ID" fi acl_file="test-acl" @@ -456,7 +448,7 @@ cat < "$test_file_folder"/"$acl_file" "Grants": [ { "Grantee": { - "ID": "ABCDEFG", + "ID": "$username", "Type": "CanonicalUser" }, "Permission": "READ" @@ -472,7 +464,7 @@ EOF if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then put_bucket_acl "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$acl_file" || fail "error putting first acl" else - put_bucket_acl "$1" "$BUCKET_ONE_NAME" "ABCDEFG" || fail "error putting first acl" + put_bucket_acl "$1" "$BUCKET_ONE_NAME" "$username" || fail "error putting first acl" fi get_bucket_acl "$1" "$BUCKET_ONE_NAME" || fail "error retrieving second ACL" @@ -487,7 +479,7 @@ cat < "$test_file_folder"/"$acl_file" "Grants": [ { "Grantee": { - "ID": "ABCDEFG", + "ID": "$username", "Type": "CanonicalUser" }, "Permission": "FULL_CONTROL" diff --git a/tests/test_mc.sh b/tests/test_mc.sh index 267f7a4..b5863fe 100755 --- a/tests/test_mc.sh +++ b/tests/test_mc.sh @@ -24,7 +24,14 @@ export RUN_MC=true test_common_create_delete_bucket "mc" } -# delete-bucket - test_create_delete_bucket +# delete-bucket +@test "test_delete_bucket" { + if [[ $RECREATE_BUCKETS == "false" ]]; then + skip "will not test bucket deletion in static bucket test config" + fi + setup_bucket "mc" "$BUCKET_ONE_NAME" || fail "error setting up bucket" + delete_bucket "mc" "$BUCKET_ONE_NAME" || fail "error deleting bucket" +} # delete-bucket-policy @test "test_get_put_delete_bucket_policy" { diff --git a/tests/test_s3.sh b/tests/test_s3.sh index 83b645a..c5d51c1 100755 --- a/tests/test_s3.sh +++ b/tests/test_s3.sh @@ -39,3 +39,11 @@ source ./tests/test_common.sh @test "test_list_objects_file_count" { test_common_list_objects_file_count "s3" } + +@test "test_delete_bucket" { + if [[ $RECREATE_BUCKETS == "false" ]]; then + skip "will not test bucket deletion in static bucket test config" + fi + setup_bucket "s3" "$BUCKET_ONE_NAME" || fail "error setting up bucket" + delete_bucket "s3" "$BUCKET_ONE_NAME" || fail "error deleting bucket" +} diff --git a/tests/test_s3cmd.sh b/tests/test_s3cmd.sh index 9a804ce..6a8f03a 100755 --- a/tests/test_s3cmd.sh +++ b/tests/test_s3cmd.sh @@ -18,9 +18,9 @@ export RUN_USERS=true } # copy-object -#@test "test_copy_object" { -# test_common_copy_object "s3cmd" -#} +@test "test_copy_object" { + test_common_copy_object "s3cmd" +} # create-bucket @test "test_create_delete_bucket" { diff --git a/tests/util.sh b/tests/util.sh index 4fa5cd3..eca81cc 100644 --- a/tests/util.sh +++ b/tests/util.sh @@ -17,9 +17,12 @@ source ./tests/commands/get_object_tagging.sh source ./tests/commands/head_bucket.sh source ./tests/commands/head_object.sh source ./tests/commands/list_objects.sh +source ./tests/commands/list_parts.sh source ./tests/commands/put_bucket_acl.sh source ./tests/commands/put_bucket_ownership_controls.sh +source ./tests/commands/put_object_lock_configuration.sh source ./tests/commands/upload_part_copy.sh +source ./tests/commands/upload_part.sh # recursively delete an AWS bucket # param: bucket name @@ -56,11 +59,32 @@ delete_bucket_recursive() { return 0 } -delete_bucket_recursive_s3api() { +add_governance_bypass_policy() { if [[ $# -ne 1 ]]; then - log 2 "delete bucket recursive command for s3api requires bucket name" + log 2 "'add governance bypass policy' command requires command ID" return 1 fi + test_file_folder=$PWD + if [[ -z "$GITHUB_ACTIONS" ]]; then + create_test_file_folder + fi + cat < "$test_file_folder/policy-bypass-governance.txt" +{ + "Version": "dummy", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:BypassGovernanceRetention", + "Resource": "arn:aws:s3:::$1/*" + } + ] +} +EOF + put_bucket_policy "s3api" "$1" "$test_file_folder/policy-bypass-governance.txt" || fail "error putting bucket policy" +} + +clear_bucket_s3api() { if ! list_objects 's3api' "$1"; then log 2 "error listing objects" return 1 @@ -75,7 +99,25 @@ delete_bucket_recursive_s3api() { log 2 "error removing object legal hold" return 1 fi - if ! delete_object 's3api' "$1" "$object"; then + sleep 1 + if [[ $LOG_LEVEL_INT -ge 5 ]]; then + if ! get_object_legal_hold "$1" "$object"; then + log 2 "error getting object legal hold status" + return 1 + fi + log 5 "LEGAL HOLD: $legal_hold" + if ! get_object_retention "$1" "$object"; then + log 2 "error getting object retention" + if [[ $get_object_retention_error != *"NoSuchObjectLockConfiguration"* ]]; then + return 1 + fi + fi + log 5 "RETENTION: $retention" + get_bucket_policy "s3api" "$1" || fail "error getting bucket policy" + log 5 "BUCKET POLICY: $bucket_policy" + fi + add_governance_bypass_policy "$1" || fail "error adding governance bypass policy" + if ! delete_object_bypass_retention "$1" "$object" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"; then log 2 "error deleting object after legal hold removal" return 1 fi @@ -84,6 +126,19 @@ delete_bucket_recursive_s3api() { return 1 fi done + delete_bucket_policy "s3api" "$1" || fail "error deleting bucket policy" + put_bucket_canned_acl "$1" "private" || fail "error deleting bucket ACLs" + put_object_lock_configuration_disabled "$1" || fail "error removing object lock config" + #change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$AWS_ACCESS_KEY_ID" || fail "error changing bucket owner" +} + +delete_bucket_recursive_s3api() { + if [[ $# -ne 1 ]]; then + log 2 "delete bucket recursive command for s3api requires bucket name" + return 1 + fi + + clear_bucket_s3api "$1" || fail "error clearing bucket" delete_bucket 's3api' "$1" || local delete_bucket_result=$? if [[ $delete_bucket_result -ne 0 ]]; then @@ -105,7 +160,7 @@ delete_bucket_contents() { local exit_code=0 local error if [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then - error=$(aws --no-verify-ssl s3 rm s3://"$2" --recursive 2>&1) || exit_code="$?" + clear_bucket_s3api "$2" || exit_code="$?" elif [[ $1 == "s3cmd" ]]; then error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate del s3://"$2" --recursive --force 2>&1) || exit_code="$?" elif [[ $1 == "mc" ]]; then @@ -165,11 +220,6 @@ delete_bucket_or_contents() { log 2 "error getting object ownership rule" return 1 fi - # shellcheck disable=SC2154 - #if [[ "$object_ownership_rule" != "BucketOwnerEnforced" ]]; then - # get_bucket_acl "$1" "$2" || fail "error getting bucket acl" - # log 5 "ACL: $acl" - #fi log 5 "object ownership rule: $object_ownership_rule" if [[ "$object_ownership_rule" != "BucketOwnerEnforced" ]] && ! put_bucket_canned_acl "$2" "private"; then log 2 "error resetting bucket ACLs" @@ -464,33 +514,6 @@ get_object_acl() { export acl } -# add tags to bucket -# params: bucket, key, value -# return: 0 for success, 1 for error -put_bucket_tag() { - if [ $# -ne 4 ]; then - echo "bucket tag command missing command type, bucket name, key, value" - return 1 - fi - local error - local result - if [[ $1 == 'aws' ]]; then - error=$(aws --no-verify-ssl s3api put-bucket-tagging --bucket "$2" --tagging "TagSet=[{Key=$3,Value=$4}]") || result=$? - elif [[ $1 == 'mc' ]]; then - error=$(mc --insecure tag set "$MC_ALIAS"/"$2" "$3=$4" 2>&1) || result=$? - else - log 2 "invalid command type $1" - return 1 - fi - if [[ $result -ne 0 ]]; then - echo "Error adding bucket tag: $error" - return 1 - fi - return 0 -} - - - check_tags_empty() { if [[ $# -ne 1 ]]; then echo "check tags empty requires command type" @@ -541,31 +564,6 @@ check_bucket_tags_empty() { return $check_result } -# add tags to object -# params: object, key, value -# return: 0 for success, 1 for error -put_object_tag() { - if [ $# -ne 5 ]; then - echo "object tag command missing command type, object name, file, key, and/or value" - return 1 - fi - local error - local result - if [[ $1 == 'aws' ]]; then - error=$(aws --no-verify-ssl s3api put-object-tagging --bucket "$2" --key "$3" --tagging "TagSet=[{Key=$4,Value=$5}]" 2>&1) || result=$? - elif [[ $1 == 'mc' ]]; then - error=$(mc --insecure tag set "$MC_ALIAS"/"$2"/"$3" "$4=$5" 2>&1) || result=$? - else - echo "invalid command type $1" - return 1 - fi - if [[ $result -ne 0 ]]; then - echo "Error adding object tag: $error" - return 1 - fi - return 0 -} - get_and_verify_object_tags() { if [[ $# -ne 5 ]]; then echo "get and verify object tags missing command type, bucket, key, tag key, tag value" @@ -615,40 +613,6 @@ list_objects_s3api_v1() { export objects } -# list objects in bucket, v2 -# param: bucket -# export objects on success, return 1 for failure -list_objects_s3api_v2() { - if [ $# -ne 1 ]; then - echo "list objects command missing bucket and/or path" - return 1 - fi - objects=$(aws --no-verify-ssl s3api list-objects-v2 --bucket "$1") || local result=$? - if [[ $result -ne 0 ]]; then - echo "error listing objects: $objects" - return 1 - fi - export objects -} - -# upload a single part of a multipart upload -# params: bucket, key, upload ID, original (unsplit) file name, part number -# return: 0 for success, 1 for failure -upload_part() { - if [ $# -ne 5 ]; then - echo "upload multipart part function must have bucket, key, upload ID, file name, part number" - return 1 - fi - local etag_json - etag_json=$(aws --no-verify-ssl s3api upload-part --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --body "$4-$(($5-1))") || local uploaded=$? - if [[ $uploaded -ne 0 ]]; then - echo "Error uploading part $5: $etag_json" - return 1 - fi - etag=$(echo "$etag_json" | jq '.ETag') - export etag -} - # perform all parts of a multipart upload before completion command # params: bucket, key, file to split and upload, number of file parts to upload # return: 0 for success, 1 for failure @@ -866,7 +830,7 @@ copy_file() { # list parts of an unfinished multipart upload # params: bucket, key, local file location, and parts to split into before upload # export parts on success, return 1 for error -list_parts() { +start_multipart_upload_and_list_parts() { if [ $# -ne 4 ]; then log 2 "list multipart upload parts command requires bucket, key, file, and part count" return 1 @@ -877,7 +841,7 @@ list_parts() { return 1 fi - if ! listed_parts=$(aws --no-verify-ssl s3api list-parts --bucket "$1" --key "$2" --upload-id "$upload_id" 2>&1); then + if ! list_parts "$1" "$2" "$upload_id"; then log 2 "Error listing multipart upload parts: $listed_parts" return 1 fi