test: multipart upload REST testing (complete, list, upload)

This commit is contained in:
Luke McCrone
2024-10-15 18:33:46 -03:00
parent 7fdfecf7f9
commit bf4fc71bba
19 changed files with 763 additions and 383 deletions

View File

@@ -31,3 +31,22 @@ upload_part() {
fi
export etag
}
upload_part_and_get_etag_rest() {
if [ $# -ne 5 ]; then
log 2 "'upload_part_rest' requires bucket name, key, part number, upload ID, part"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" PART_NUMBER="$4" UPLOAD_ID="$3" DATA_FILE="$5" OUTPUT_FILE="$TEST_FILE_FOLDER/etag.txt" ./tests/rest_scripts/upload_part.sh); then
log 2 "error sending upload-part REST command: $result"
return 1
fi
if [[ "$result" != "200" ]]; then
log 2 "upload-part command returned error $result: $(cat "$TEST_FILE_FOLDER/etag.txt")"
return 1
fi
log 5 "$(cat "$TEST_FILE_FOLDER/etag.txt")"
etag=$(grep -i "etag" "$TEST_FILE_FOLDER/etag.txt" | awk '{print $2}' | tr -d '\r')
log 5 "etag: $etag"
return 0
}

View File

@@ -39,7 +39,7 @@ UNSIGNED-PAYLOAD"
create_canonical_hash_sts_and_signature
curl_command+=(curl -ks -w "\"%{http_code}\"" -X DELETE "https://$host/$bucket_name/$key?uploadId=$upload_id"
curl_command+=(curl -ks -w "\"%{http_code}\"" -X DELETE "$AWS_ENDPOINT_URL/$bucket_name/$key?uploadId=$upload_id"
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
-H "\"x-amz-date: $current_date_time\""

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env bash
# Copyright 2024 Versity Software
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
source ./tests/rest_scripts/rest.sh
# Fields
# shellcheck disable=SC2153
bucket_name="$BUCKET_NAME"
# shellcheck disable=SC2153
key="$OBJECT_KEY"
# shellcheck disable=SC2153,SC2034
upload_id="$UPLOAD_ID"
# shellcheck disable=SC2153
parts="$PARTS"
payload="<CompleteMultipartUpload xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">$parts</CompleteMultipartUpload>"
payload_hash="$(echo -n "$payload" | sha256sum | awk '{print $1}')"
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
canonical_request="POST
/$bucket_name/$key
uploadId=$UPLOAD_ID
host:$host
x-amz-content-sha256:$payload_hash
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
$payload_hash"
create_canonical_hash_sts_and_signature
curl_command+=(curl -ks -w "\"%{http_code}\"" -X POST "$AWS_ENDPOINT_URL/$bucket_name/$key?uploadId=$upload_id"
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
-H "\"x-amz-content-sha256: $payload_hash\""
-H "\"x-amz-date: $current_date_time\""
-H "\"Content-Type: application/xml\""
-d "\"${payload//\"/\\\"}\""
-o "$OUTPUT_FILE")
# shellcheck disable=SC2154
eval "${curl_command[*]}" 2>&1

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bash
# Copyright 2024 Versity Software
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
source ./tests/rest_scripts/rest.sh
# Fields
# shellcheck disable=SC2153
bucket_name="$BUCKET_NAME"
# shellcheck disable=SC2153,SC2034
upload_id="$UPLOAD_ID"
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
# shellcheck disable=SC2034
canonical_request="GET
/$bucket_name/
uploads=
host:$host
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
UNSIGNED-PAYLOAD"
create_canonical_hash_sts_and_signature
curl_command+=(curl -ks -w "\"%{http_code}\"" "https://$host/$bucket_name/$key?uploads="
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
-H "\"x-amz-date: $current_date_time\""
-o "$OUTPUT_FILE")
# shellcheck disable=SC2154
eval "${curl_command[*]}" 2>&1

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Copyright 2024 Versity Software
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
source ./tests/rest_scripts/rest.sh
# shellcheck disable=SC2153
bucket_name="$BUCKET_NAME"
# shellcheck disable=SC2153
key="$OBJECT_KEY"
# shellcheck disable=SC2153
part_number="$PART_NUMBER"
# shellcheck disable=SC2153
upload_id="$UPLOAD_ID"
# shellcheck disable=SC2153
data=$DATA_FILE
payload_hash="$(sha256sum "$data" | awk '{print $1}')"
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
# shellcheck disable=SC2034
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="PUT
/$bucket_name/$key
partNumber=$part_number&uploadId=$upload_id
host:$aws_endpoint_url_address
x-amz-content-sha256:$payload_hash
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
$payload_hash"
create_canonical_hash_sts_and_signature
sleep 5
curl_command+=(curl -isk -w "\"%{http_code}\"" "\"$AWS_ENDPOINT_URL/$bucket_name/$key?partNumber=$part_number&uploadId=$upload_id\""
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
-H "\"x-amz-content-sha256: $payload_hash\""
-H "\"x-amz-date: $current_date_time\""
-o "\"$OUTPUT_FILE\""
-T "\"$data\"")
# shellcheck disable=SC2154
eval "${curl_command[*]}" 2>&1

View File

@@ -62,10 +62,10 @@ teardown() {
echo "**********************************************************************************"
fi
# shellcheck disable=SC2154
if ! delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"; then
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
log 3 "error deleting bucket $BUCKET_ONE_NAME or contents"
fi
if ! delete_bucket_or_contents_if_exists "s3api" "$BUCKET_TWO_NAME"; then
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_TWO_NAME"; then
log 3 "error deleting bucket $BUCKET_TWO_NAME or contents"
fi
if [ "$REMOVE_TEST_FILE_FOLDER" == "true" ]; then

View File

@@ -87,7 +87,7 @@ test_create_multipart_upload_properties_aws_root() {
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
assert_success
run delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
assert_success
# in static bucket config, bucket will still exist
if ! bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
@@ -360,7 +360,7 @@ test_retention_bypass_aws_root() {
legal_hold_retention_setup() {
assert [ $# -eq 3 ]
run delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
assert_success
run setup_user "$1" "$2" "user"

View File

@@ -65,7 +65,7 @@ test_common_multipart_upload() {
run download_and_compare_file "$1" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER/$bucket_file-copy"
assert_success
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
bucket_cleanup "$1" "$BUCKET_ONE_NAME"
delete_test_files $bucket_file
}
@@ -85,7 +85,7 @@ test_common_create_delete_bucket() {
run bucket_exists "$1" "$BUCKET_ONE_NAME"
assert_success
run delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
run bucket_cleanup "$1" "$BUCKET_ONE_NAME"
assert_success
}

View File

@@ -32,6 +32,7 @@ source ./tests/util.sh
source ./tests/util_legal_hold.sh
source ./tests/util_list_buckets.sh
source ./tests/util_list_objects.sh
source ./tests/util_list_parts.sh
source ./tests/util_lock_config.sh
source ./tests/util_rest.sh
source ./tests/util_tags.sh
@@ -117,7 +118,7 @@ source ./tests/util_versioning.sh
test_key="TestKey"
test_value="TestValue"
run delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
assert_success
# in static bucket config, bucket will still exist
if ! bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
@@ -169,7 +170,7 @@ source ./tests/util_versioning.sh
run check_no_object_lock_config_rest "$BUCKET_ONE_NAME"
assert_success
run delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
assert_success
# in static bucket config, bucket will still exist
@@ -291,3 +292,26 @@ source ./tests/util_versioning.sh
run create_abort_multipart_upload_rest "$BUCKET_ONE_NAME" "$test_file"
assert_success
}
@test "REST - multipart upload create, list parts" {
test_file="test_file"
run create_large_file "$test_file"
assert_success
run split_file "$TEST_FILE_FOLDER/$test_file" 4
assert_success
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
run upload_check_parts "$BUCKET_ONE_NAME" "$test_file" \
"$TEST_FILE_FOLDER/$test_file-0" "$TEST_FILE_FOLDER/$test_file-1" "$TEST_FILE_FOLDER/$test_file-2" "$TEST_FILE_FOLDER/$test_file-3"
assert_success
run get_object "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
assert_success
run compare_files "$TEST_FILE_FOLDER/$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
assert_success
}

View File

@@ -283,7 +283,7 @@ export RUN_USERS=true
fi
[[ object_size -eq $((range_max*4+4)) ]] || fail "object size mismatch ($object_size, $((range_max*4+4)))"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files $bucket_file
}
@@ -314,7 +314,7 @@ export RUN_USERS=true
key=$(echo "${objects[@]}" | jq -r ".Contents[0].Key" 2>&1) || fail "error getting key from object list: $key"
[[ $key == "$folder_name/$object_name" ]] || fail "key doesn't match (expected $key, actual $folder_name/$object_name)"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files $folder_name
}
@@ -350,7 +350,7 @@ export RUN_USERS=true
log 5 "INFO: $bucket_info"
region=$(echo "$bucket_info" | grep -v "InsecureRequestWarning" | jq -r ".BucketRegion" 2>&1) || fail "error getting bucket region: $region"
[[ $region != "" ]] || fail "empty bucket region"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
}
@test "test_retention_bypass" {
@@ -364,7 +364,7 @@ export RUN_USERS=true
head_bucket "aws" "$BUCKET_ONE_NAME"a || local info_result=$?
[[ $info_result -eq 1 ]] || fail "bucket info for non-existent bucket returned"
[[ $bucket_info == *"404"* ]] || fail "404 not returned for non-existent bucket info"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
}
@test "test_add_object_metadata" {
@@ -388,7 +388,7 @@ export RUN_USERS=true
[[ $key == "$test_key" ]] || fail "keys doesn't match (expected $key, actual \"$test_key\")"
[[ $value == "$test_value" ]] || fail "values doesn't match (expected $value, actual \"$test_value\")"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$object_one"
}
@@ -410,7 +410,7 @@ export RUN_USERS=true
run get_and_check_object_lock_config "$bucket_name" "$enabled" "$governance" "$days"
assert_success "error getting and checking object lock config"
delete_bucket_or_contents "aws" "$bucket_name"
bucket_cleanup "aws" "$bucket_name"
}
@test "test_ls_directory_object" {

View File

@@ -187,14 +187,15 @@ test_s3api_policy_invalid_action() {
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
check_for_empty_policy "s3api" "$BUCKET_ONE_NAME" || fail "policy not empty"
run check_for_empty_policy "s3api" "$BUCKET_ONE_NAME"
assert_success
if put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file"; then
fail "put succeeded despite malformed policy"
fi
# shellcheck disable=SC2154
[[ "$put_bucket_policy_error" == *"MalformedPolicy"*"invalid action"* ]] || fail "invalid policy error: $put_bucket_policy_error"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file"
}
@@ -236,7 +237,7 @@ test_s3api_policy_get_object_with_user() {
run download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"
assert_success
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
}
test_s3api_policy_get_object_specific_file() {
@@ -273,7 +274,7 @@ test_s3api_policy_get_object_specific_file() {
fi
# shellcheck disable=SC2154
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
}
test_s3api_policy_get_object_file_wildcard() {
@@ -314,7 +315,7 @@ test_s3api_policy_get_object_file_wildcard() {
fi
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
}
test_s3api_policy_get_object_folder_wildcard() {
@@ -335,17 +336,23 @@ test_s3api_policy_get_object_folder_wildcard() {
action="s3:GetObject"
resource="arn:aws:s3:::$BUCKET_ONE_NAME/$test_folder/*"
setup_user "$username" "$password" "user" || fail "error creating user"
run setup_user "$username" "$password" "user"
assert_success
setup_bucket "s3api" "$BUCKET_ONE_NAME"
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
put_object "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" || fail "error copying object to bucket"
run setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource"
assert_success
download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password" || fail "error downloading and comparing file"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
delete_test_files "$test_folder/$test_file" "$policy_file"
run put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file"
assert_success
run put_object "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file"
assert_success
run download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"
assert_success
}
test_s3api_policy_allow_deny() {
@@ -374,7 +381,7 @@ test_s3api_policy_allow_deny() {
fi
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$test_file" "$test_file-copy" "$policy_file"
}
@@ -406,7 +413,7 @@ test_s3api_policy_deny() {
fail "able to get object despite deny statement"
fi
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$test_file_one" "$test_file_two" "$test_file_one-copy" "$test_file_two-copy" "$policy_file"
}
@@ -445,7 +452,7 @@ test_s3api_policy_put_wildcard() {
fi
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
download_and_compare_file "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$TEST_FILE_FOLDER/$test_file-copy" || fail "files don't match"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$test_folder/$test_file" "$test_file-copy" "$policy_file"
}
@@ -481,7 +488,7 @@ test_s3api_policy_delete() {
# shellcheck disable=SC2154
[[ "$delete_object_error" == *"Access Denied"* ]] || fail "invalid delete object error: $delete_object_error"
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_two" "$username" "$password" || fail "error deleting object despite permissions"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$test_file_one" "$test_file_two" "$policy_file"
}
@@ -515,7 +522,7 @@ test_s3api_policy_get_bucket_policy() {
log 5 "ORIG: $(cat "$TEST_FILE_FOLDER/$policy_file")"
log 5 "COPY: $(cat "$TEST_FILE_FOLDER/$policy_file-copy")"
compare_files "$TEST_FILE_FOLDER/$policy_file" "$TEST_FILE_FOLDER/$policy_file-copy" || fail "policies not equal"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file" "$policy_file-copy"
}
@@ -560,7 +567,7 @@ test_s3api_policy_list_multipart_uploads() {
log 5 "$uploads"
upload_key=$(echo "$uploads" | grep -v "InsecureRequestWarning" | jq -r ".Uploads[0].Key" 2>&1) || fail "error parsing upload key from uploads message: $upload_key"
[[ $upload_key == "$test_file" ]] || fail "upload key doesn't match file marked as being uploaded"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file" "$test_file"
}
@@ -597,7 +604,7 @@ test_s3api_policy_put_bucket_policy() {
log 5 "ORIG: $(cat "$TEST_FILE_FOLDER/$policy_file_two")"
log 5 "COPY: $(cat "$TEST_FILE_FOLDER/$policy_file-copy")"
compare_files "$TEST_FILE_FOLDER/$policy_file_two" "$TEST_FILE_FOLDER/$policy_file-copy" || fail "policies not equal"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file" "$policy_file_two" "$policy_file-copy"
}
@@ -625,7 +632,7 @@ test_s3api_policy_delete_bucket_policy() {
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
delete_bucket_policy_with_user "$BUCKET_ONE_NAME" "$username" "$password" || fail "unable to delete bucket policy"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file"
}
@@ -700,7 +707,7 @@ test_s3api_policy_abort_multipart_upload() {
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
abort_multipart_upload_with_user "$BUCKET_ONE_NAME" "$test_file" "$upload_id" "$username" "$password" || fail "error aborting multipart upload despite permissions"
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file" "$test_file"
}
@@ -736,7 +743,7 @@ test_s3api_policy_two_principals() {
assert_success "error getting object with user $USERNAME_TWO"
delete_test_files "$test_file" "$policy_file" "$TEST_FILE_FOLDER/copy_one" "$TEST_FILE_FOLDER/copy_two"
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
bucket_cleanup "s3api" "$BUCKET_ONE_NAME"
}
test_s3api_policy_put_bucket_tagging() {
@@ -762,7 +769,7 @@ test_s3api_policy_put_bucket_tagging() {
get_and_check_bucket_tags "$BUCKET_ONE_NAME" "$tag_key" "$tag_value"
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
bucket_cleanup "s3api" "$BUCKET_ONE_NAME"
}
test_s3api_policy_put_acl() {
@@ -805,7 +812,7 @@ test_s3api_policy_put_acl() {
id=$(echo "$second_grantee" | jq -r ".ID" 2>&1) || fail "error getting ID: $id"
[[ $id == "all-users" ]] || fail "unexpected ID: $id"
fi
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
bucket_cleanup "aws" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file"
}
@@ -838,7 +845,7 @@ test_s3api_policy_get_bucket_tagging() {
run get_and_check_bucket_tags_with_user "$USERNAME_ONE" "$PASSWORD_ONE" "$BUCKET_ONE_NAME" "$tag_key" "$tag_value"
assert_success "get and check bucket tags failed"
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
bucket_cleanup "s3api" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file"
}
@@ -869,6 +876,6 @@ test_s3api_policy_list_upload_parts() {
run create_upload_and_test_parts_listing "$test_file" "$policy_file"
assert_success "error creating upload and testing parts listing"
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
bucket_cleanup "s3api" "$BUCKET_ONE_NAME"
delete_test_files "$policy_file" "$test_file"
}

View File

@@ -52,7 +52,7 @@ export RUN_USERS=true
[[ "$bucket_create_error" == *"just the bucket name"* ]] || fail "unexpected error: $bucket_create_error"
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
bucket_cleanup "s3cmd" "$BUCKET_ONE_NAME"
}
# delete-bucket - test_create_delete_bucket
@@ -119,7 +119,7 @@ export RUN_USERS=true
head_bucket "s3cmd" "$BUCKET_ONE_NAME"
[[ $bucket_info == *"s3://$BUCKET_ONE_NAME"* ]] || fail "failure to retrieve correct bucket info: $bucket_info"
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
bucket_cleanup "s3cmd" "$BUCKET_ONE_NAME"
}
@test "test_get_bucket_info_doesnt_exist_s3cmd" {
@@ -129,7 +129,7 @@ export RUN_USERS=true
head_bucket "s3cmd" "$BUCKET_ONE_NAME"a || local info_result=$?
[[ $info_result -eq 1 ]] || fail "bucket info for non-existent bucket returned"
[[ $bucket_info == *"404"* ]] || fail "404 not returned for non-existent bucket info"
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
bucket_cleanup "s3cmd" "$BUCKET_ONE_NAME"
}
@test "test_ls_directory_object" {

View File

@@ -136,7 +136,7 @@ export RUN_USERS=true
fi
# shellcheck disable=SC2154
[[ "$get_object_error" == *"NoSuchKey"* ]] || fail "unexpected error message: $get_object_error"
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
bucket_cleanup "s3api" "$BUCKET_ONE_NAME"
delete_test_files "$test_file" "$test_file-copy"
}

View File

@@ -87,7 +87,7 @@ test_user_user() {
password="$PASSWORD_ONE"
setup_user "$username" "$password" "user" || fail "error setting up user"
delete_bucket_or_contents_if_exists "aws" "versity-gwtest-user-bucket"
bucket_cleanup_if_bucket_exists "aws" "versity-gwtest-user-bucket"
run setup_bucket "aws" "$BUCKET_ONE_NAME"
assert_success
@@ -131,7 +131,7 @@ test_userplus_operation() {
username="$USERNAME_ONE"
password="$PASSWORD_ONE"
delete_bucket_or_contents_if_exists "aws" "versity-gwtest-userplus-bucket"
bucket_cleanup_if_bucket_exists "aws" "versity-gwtest-userplus-bucket"
setup_user "$username" "$password" "userplus" || fail "error creating user '$username'"
run setup_bucket "aws" "$BUCKET_ONE_NAME"

View File

@@ -14,6 +14,7 @@
# specific language governing permissions and limitations
# under the License.
source ./tests/util_bucket.sh
source ./tests/util_create_bucket.sh
source ./tests/util_mc.sh
source ./tests/util_multipart.sh
@@ -46,46 +47,6 @@ source ./tests/commands/upload_part_copy.sh
source ./tests/commands/upload_part.sh
source ./tests/util_users.sh
# recursively delete an AWS bucket
# param: client, bucket name
# fail if error
delete_bucket_recursive() {
log 6 "delete_bucket_recursive"
if [ $# -ne 2 ]; then
log 2 "'delete_bucket_recursive' requires client, bucket name"
return 1
fi
local exit_code=0
local error
if [[ $1 == 's3' ]]; then
error=$(aws --no-verify-ssl s3 rb s3://"$2" --force 2>&1) || exit_code="$?"
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
if ! delete_bucket_recursive_s3api "$2"; then
log 2 "error deleting bucket recursively (s3api)"
return 1
fi
return 0
elif [[ $1 == "s3cmd" ]]; then
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" --recursive 2>&1) || exit_code="$?"
elif [[ $1 == "mc" ]]; then
error=$(delete_bucket_recursive_mc "$2" 2>&1) || exit_code="$?"
else
log 2 "invalid client '$1'"
return 1
fi
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
return 0
else
log 2 "error deleting bucket recursively: $error"
return 1
fi
fi
return 0
}
# params: bucket name
# return 0 for success, 1 for error
add_governance_bypass_policy() {
@@ -200,56 +161,6 @@ check_object_lock_config() {
return 0
}
# restore bucket to pre-test state (or prep for deletion)
# param: bucket name
# return 0 on success, 1 on error
clear_bucket_s3api() {
log 6 "clear_bucket_s3api"
if [ $# -ne 1 ]; then
log 2 "'clear_bucket_s3api' requires bucket name"
return 1
fi
if [[ $LOG_LEVEL_INT -ge 5 ]]; then
if ! log_bucket_policy "$1"; then
log 2 "error logging bucket policy"
return 1
fi
fi
if ! check_object_lock_config "$1"; then
log 2 "error checking object lock config"
return 1
fi
if [[ "$DIRECT" != "true" ]] && ! add_governance_bypass_policy "$1"; then
log 2 "error adding governance bypass policy"
return 1
fi
if ! list_and_delete_objects "$1"; then
log 2 "error listing and deleting objects"
return 1
fi
#run check_ownership_rule_and_reset_acl "$1"
#assert_success "error checking ownership rule and resetting acl"
if [[ $lock_config_exists == true ]] && ! put_object_lock_configuration_disabled "$1"; then
log 2 "error disabling object lock config"
return 1
fi
#if ! put_bucket_versioning "s3api" "$1" "Suspended"; then
# log 2 "error suspending bucket versioning"
# return 1
#fi
#if ! change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$AWS_ACCESS_KEY_ID"; then
# log 2 "error changing bucket owner back to root"
# return 1
#fi
}
# params: bucket, object name
# return 0 for success, 1 for error
clear_object_in_bucket() {
@@ -321,77 +232,6 @@ log_worm_protection() {
log 5 "RETENTION: $retention"
}
# params: bucket name
# return 0 if able to delete recursively, 1 if not
delete_bucket_recursive_s3api() {
log 6 "delete_bucket_recursive_s3api"
if [ $# -ne 1 ]; then
log 2 "'delete_bucket_recursive_s3api' requires bucket name"
return 1
fi
if ! clear_bucket_s3api "$1"; then
log 2 "error clearing bucket (s3api)"
return 1
fi
if ! delete_bucket 's3api' "$1"; then
log 2 "error deleting bucket"
return 1
fi
return 0
}
# params: client, bucket name
# return 0 on success, 1 on error
delete_bucket_contents() {
log 6 "delete_bucket_contents"
if [ $# -ne 2 ]; then
log 2 "'delete_bucket_contents' requires client, bucket name"
return 1
fi
local exit_code=0
local error
if [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
if ! clear_bucket_s3api "$2"; then
log 2 "error clearing bucket (s3api)"
return 1
fi
elif [[ $1 == "s3cmd" ]]; then
delete_bucket_recursive "s3cmd" "$1"
elif [[ $1 == "mc" ]]; then
delete_bucket_recursive "mc" "$1"
elif [[ $1 == "s3" ]]; then
delete_bucket_recursive "s3" "$1"
else
log 2 "unrecognized client: '$1'"
return 1
fi
return 0
}
# check if bucket exists
# param: bucket name
# return 0 for true, 1 for false, 2 for error
bucket_exists() {
if [ $# -ne 2 ]; then
log 2 "bucket_exists command requires client, bucket name"
return 2
fi
local exists=0
head_bucket "$1" "$2" || exists=$?
# shellcheck disable=SC2181
if [ $exists -ne 0 ] && [ $exists -ne 1 ]; then
log 2 "unexpected error checking if bucket exists"
return 2
fi
if [ $exists -eq 0 ]; then
return 0
fi
return 1
}
# param: bucket name
# return 1 for failure, 0 for success
get_object_ownership_rule_and_update_acl() {
@@ -410,126 +250,6 @@ get_object_ownership_rule_and_update_acl() {
fi
}
# params: client, bucket name
# return 0 for success, 1 for error
delete_bucket_or_contents() {
log 6 "delete_bucket_or_contents"
if [ $# -ne 2 ]; then
log 2 "'delete_bucket_or_contents' requires client, bucket name"
return 1
fi
if [[ $RECREATE_BUCKETS == "false" ]]; then
if ! delete_bucket_contents "$1" "$2"; then
log 2 "error deleting bucket contents"
return 1
fi
if ! delete_bucket_policy "$1" "$2"; then
log 2 "error deleting bucket policy"
return 1
fi
if ! get_object_ownership_rule_and_update_acl "$2"; then
log 2 "error getting object ownership rule and updating ACL"
return 1
fi
if ! abort_all_multipart_uploads "$2"; then
log 2 "error aborting all multipart uploads"
return 1
fi
if [ "$RUN_USERS" == "true" ] && ! reset_bucket_owner "$2"; then
log 2 "error resetting bucket owner"
return 1
fi
log 5 "bucket contents, policy, ACL deletion success"
return 0
fi
if ! delete_bucket_recursive "$1" "$2"; then
log 2 "error with recursive bucket delete"
return 1
fi
log 5 "bucket deletion success"
return 0
}
# params: client, bucket name
# return 0 for success, 1 for error
delete_bucket_or_contents_if_exists() {
log 6 "delete_bucket_or_contents_if_exists"
if [ $# -ne 2 ]; then
log 2 "'delete_bucket_or_contents_if_exists' requires client, bucket name"
return 1
fi
if bucket_exists "$1" "$2"; then
if ! delete_bucket_or_contents "$1" "$2"; then
log 2 "error deleting bucket and/or contents"
return 1
fi
log 5 "bucket and/or bucket data deletion success"
return 0
fi
return 0
}
# params: client, bucket name(s)
# return 0 for success, 1 for failure
setup_buckets() {
if [ $# -lt 2 ]; then
log 2 "'setup_buckets' command requires client, bucket names"
return 1
fi
for name in "${@:2}"; do
if ! setup_bucket "$1" "$name"; then
log 2 "error setting up bucket $name"
return 1
fi
done
return 0
}
# params: client, bucket name
# return 0 on successful setup, 1 on error
setup_bucket() {
log 6 "setup_bucket"
if [ $# -ne 2 ]; then
log 2 "'setup_bucket' requires client, bucket name"
return 1
fi
if ! bucket_exists "$1" "$2" && [[ $RECREATE_BUCKETS == "false" ]]; then
log 2 "When RECREATE_BUCKETS isn't set to \"true\", buckets should be pre-created by user"
return 1
fi
if ! delete_bucket_or_contents_if_exists "$1" "$2"; then
log 2 "error deleting bucket or contents if they exist"
return 1
fi
log 5 "util.setup_bucket: command type: $1, bucket name: $2"
if [[ $RECREATE_BUCKETS == "true" ]]; then
if ! create_bucket "$1" "$2"; then
log 2 "error creating bucket"
return 1
fi
else
log 5 "skipping bucket re-creation"
fi
if [[ $1 == "s3cmd" ]]; then
log 5 "putting bucket ownership controls"
if bucket_exists "s3cmd" "$2" && ! put_bucket_ownership_controls "$2" "BucketOwnerPreferred"; then
log 2 "error putting bucket ownership controls"
return 1
fi
fi
return 0
}
# check if object exists on S3 via gateway
# param: command, object path
# return 0 for true, 1 for false, 2 for error
@@ -659,27 +379,6 @@ remove_insecure_request_warning() {
export parsed_output
}
# check if bucket info can be retrieved
# param: path of bucket or folder
# return 0 for yes, 1 for no, 2 for error
bucket_is_accessible() {
if [ $# -ne 1 ]; then
echo "bucket accessibility check missing bucket name"
return 2
fi
local exit_code=0
local error
error=$(aws --no-verify-ssl s3api head-bucket --bucket "$1" 2>&1) || exit_code="$?"
if [ $exit_code -eq 0 ]; then
return 0
fi
if [[ "$error" == *"500"* ]]; then
return 1
fi
echo "Error checking bucket accessibility: $error"
return 2
}
# check if object info (etag) is accessible
# param: path of object
# return 0 for yes, 1 for no, 2 for error

View File

@@ -34,22 +34,21 @@ abort_all_multipart_uploads() {
log 5 "Modified upload list: ${modified_upload_list[*]}"
has_uploads=$(echo "${modified_upload_list[*]}" | jq 'has("Uploads")')
if [[ $has_uploads != false ]]; then
lines=$(echo "${modified_upload_list[*]}" | jq -r '.Uploads[] | "--key \(.Key) --upload-id \(.UploadId)"') || lines_result=$?
if [[ $lines_result -ne 0 ]]; then
echo "error getting lines for multipart upload delete: $lines"
if [[ $has_uploads == false ]]; then
return 0
fi
if ! lines=$(echo "${modified_upload_list[*]}" | jq -r '.Uploads[] | "--key \(.Key) --upload-id \(.UploadId)"' 2>&1); then
log 2 "error getting lines for multipart upload delete: $lines"
return 1
fi
log 5 "$lines"
while read -r line; do
# shellcheck disable=SC2086
if ! error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" $line 2>&1); then
echo "error aborting multipart upload: $error"
return 1
fi
log 5 "$lines"
while read -r line; do
# shellcheck disable=SC2086
error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" $line 2>&1) || abort_result=$?
if [[ $abort_result -ne 0 ]]; then
echo "error aborting multipart upload: $error"
return 1
fi
done <<< "$lines"
fi
done <<< "$lines"
return 0
}

304
tests/util_bucket.sh Normal file
View File

@@ -0,0 +1,304 @@
#!/usr/bin/env bash
# recursively delete an AWS bucket
# param: client, bucket name
# fail if error
delete_bucket_recursive() {
log 6 "delete_bucket_recursive"
if [ $# -ne 2 ]; then
log 2 "'delete_bucket_recursive' requires client, bucket name"
return 1
fi
local exit_code=0
local error
if [[ $1 == 's3' ]]; then
error=$(aws --no-verify-ssl s3 rb s3://"$2" --force 2>&1) || exit_code="$?"
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
if ! delete_bucket_recursive_s3api "$2"; then
log 2 "error deleting bucket recursively (s3api)"
return 1
fi
return 0
elif [[ $1 == "s3cmd" ]]; then
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" --recursive 2>&1) || exit_code="$?"
elif [[ $1 == "mc" ]]; then
error=$(delete_bucket_recursive_mc "$2" 2>&1) || exit_code="$?"
else
log 2 "invalid client '$1'"
return 1
fi
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
return 0
else
log 2 "error deleting bucket recursively: $error"
return 1
fi
fi
return 0
}
# restore bucket to pre-test state (or prep for deletion)
# param: bucket name
# return 0 on success, 1 on error
clear_bucket_s3api() {
log 6 "clear_bucket_s3api"
if [ $# -ne 1 ]; then
log 2 "'clear_bucket_s3api' requires bucket name"
return 1
fi
if [[ $LOG_LEVEL_INT -ge 5 ]]; then
if ! log_bucket_policy "$1"; then
log 2 "error logging bucket policy"
return 1
fi
fi
if ! check_object_lock_config "$1"; then
log 2 "error checking object lock config"
return 1
fi
if [[ "$DIRECT" != "true" ]] && ! add_governance_bypass_policy "$1"; then
log 2 "error adding governance bypass policy"
return 1
fi
if ! list_and_delete_objects "$1"; then
log 2 "error listing and deleting objects"
return 1
fi
#run check_ownership_rule_and_reset_acl "$1"
#assert_success "error checking ownership rule and resetting acl"
# shellcheck disable=SC2154
if [[ $lock_config_exists == true ]] && ! put_object_lock_configuration_disabled "$1"; then
log 2 "error disabling object lock config"
return 1
fi
#if ! put_bucket_versioning "s3api" "$1" "Suspended"; then
# log 2 "error suspending bucket versioning"
# return 1
#fi
#if ! change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$AWS_ACCESS_KEY_ID"; then
# log 2 "error changing bucket owner back to root"
# return 1
#fi
}
# params: bucket name
# return 0 if able to delete recursively, 1 if not
delete_bucket_recursive_s3api() {
log 6 "delete_bucket_recursive_s3api"
if [ $# -ne 1 ]; then
log 2 "'delete_bucket_recursive_s3api' requires bucket name"
return 1
fi
if ! clear_bucket_s3api "$1"; then
log 2 "error clearing bucket (s3api)"
return 1
fi
if ! delete_bucket 's3api' "$1"; then
log 2 "error deleting bucket"
return 1
fi
return 0
}
# params: client, bucket name
# return 0 on success, 1 on error
delete_bucket_contents() {
log 6 "delete_bucket_contents"
if [ $# -ne 2 ]; then
log 2 "'delete_bucket_contents' requires client, bucket name"
return 1
fi
local exit_code=0
local error
if [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
if ! clear_bucket_s3api "$2"; then
log 2 "error clearing bucket (s3api)"
return 1
fi
elif [[ $1 == "s3cmd" ]]; then
delete_bucket_recursive "s3cmd" "$1"
elif [[ $1 == "mc" ]]; then
delete_bucket_recursive "mc" "$1"
elif [[ $1 == "s3" ]]; then
delete_bucket_recursive "s3" "$1"
else
log 2 "unrecognized client: '$1'"
return 1
fi
return 0
}
# check if bucket exists
# param: bucket name
# return 0 for true, 1 for false, 2 for error
bucket_exists() {
if [ $# -ne 2 ]; then
log 2 "bucket_exists command requires client, bucket name"
return 2
fi
local exists=0
head_bucket "$1" "$2" || exists=$?
# shellcheck disable=SC2181
if [ $exists -ne 0 ] && [ $exists -ne 1 ]; then
log 2 "unexpected error checking if bucket exists"
return 2
fi
if [ $exists -eq 0 ]; then
return 0
fi
return 1
}
# params: client, bucket name
# return 0 for success, 1 for error
bucket_cleanup() {
log 6 "bucket_cleanup"
if [ $# -ne 2 ]; then
log 2 "'bucket_cleanup' requires client, bucket name"
return 1
fi
if [[ $RECREATE_BUCKETS == "false" ]]; then
if ! delete_bucket_contents "$1" "$2"; then
log 2 "error deleting bucket contents"
return 1
fi
if ! delete_bucket_policy "$1" "$2"; then
log 2 "error deleting bucket policy"
return 1
fi
if ! get_object_ownership_rule_and_update_acl "$2"; then
log 2 "error getting object ownership rule and updating ACL"
return 1
fi
if ! abort_all_multipart_uploads "$2"; then
log 2 "error aborting all multipart uploads"
return 1
fi
if [ "$RUN_USERS" == "true" ] && ! reset_bucket_owner "$2"; then
log 2 "error resetting bucket owner"
return 1
fi
log 5 "bucket contents, policy, ACL deletion success"
return 0
fi
if ! delete_bucket_recursive "$1" "$2"; then
log 2 "error with recursive bucket delete"
return 1
fi
log 5 "bucket deletion success"
return 0
}
# params: client, bucket name
# return 0 for success, 1 for error
bucket_cleanup_if_bucket_exists() {
log 6 "bucket_cleanup_if_bucket_exists"
if [ $# -ne 2 ]; then
log 2 "'bucket_cleanup_if_bucket_exists' requires client, bucket name"
return 1
fi
if bucket_exists "$1" "$2"; then
if ! bucket_cleanup "$1" "$2"; then
log 2 "error deleting bucket and/or contents"
return 1
fi
log 5 "bucket and/or bucket data deletion success"
return 0
fi
return 0
}
# params: client, bucket name(s)
# return 0 for success, 1 for failure
setup_buckets() {
if [ $# -lt 2 ]; then
log 2 "'setup_buckets' command requires client, bucket names"
return 1
fi
for name in "${@:2}"; do
if ! setup_bucket "$1" "$name"; then
log 2 "error setting up bucket $name"
return 1
fi
done
return 0
}
# params: client, bucket name
# return 0 on successful setup, 1 on error
setup_bucket() {
log 6 "setup_bucket"
if [ $# -ne 2 ]; then
log 2 "'setup_bucket' requires client, bucket name"
return 1
fi
if ! bucket_exists "$1" "$2" && [[ $RECREATE_BUCKETS == "false" ]]; then
log 2 "When RECREATE_BUCKETS isn't set to \"true\", buckets should be pre-created by user"
return 1
fi
if ! bucket_cleanup_if_bucket_exists "$1" "$2"; then
log 2 "error deleting bucket or contents if they exist"
return 1
fi
log 5 "util.setup_bucket: command type: $1, bucket name: $2"
if [[ $RECREATE_BUCKETS == "true" ]]; then
if ! create_bucket "$1" "$2"; then
log 2 "error creating bucket"
return 1
fi
else
log 5 "skipping bucket re-creation"
fi
if [[ $1 == "s3cmd" ]]; then
log 5 "putting bucket ownership controls"
if bucket_exists "s3cmd" "$2" && ! put_bucket_ownership_controls "$2" "BucketOwnerPreferred"; then
log 2 "error putting bucket ownership controls"
return 1
fi
fi
return 0
}
# check if bucket info can be retrieved
# param: path of bucket or folder
# return 0 for yes, 1 for no, 2 for error
bucket_is_accessible() {
if [ $# -ne 1 ]; then
echo "bucket accessibility check missing bucket name"
return 2
fi
local exit_code=0
local error
error=$(aws --no-verify-ssl s3api head-bucket --bucket "$1" 2>&1) || exit_code="$?"
if [ $exit_code -eq 0 ]; then
return 0
fi
if [[ "$error" == *"500"* ]]; then
return 1
fi
echo "Error checking bucket accessibility: $error"
return 2
}

116
tests/util_list_parts.sh Normal file
View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bash
check_part_list_rest() {
if [ $# -lt 4 ]; then
log 2 "'check_part_list_rest' requires bucket, file name, upload ID, expected count, etags"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/parts.txt" ./tests/rest_scripts/list_parts.sh); then
log 2 "error listing multipart upload parts: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "list-parts returned response code: $result, reply: $(cat "$TEST_FILE_FOLDER/parts.txt")"
return 1
fi
log 5 "parts list: $(cat "$TEST_FILE_FOLDER/parts.txt")"
if ! parts_upload_id=$(xmllint --xpath '//*[local-name()="UploadId"]/text()' "$TEST_FILE_FOLDER/parts.txt" 2>&1); then
log 2 "error retrieving UploadId: $parts_upload_id"
return 1
fi
if [ "$parts_upload_id" != "$3" ]; then
log 2 "expected '$3', UploadId value is '$parts_upload_id'"
return 1
fi
if ! part_count=$(xmllint --xpath 'count(//*[local-name()="Part"])' "$TEST_FILE_FOLDER/parts.txt" 2>&1); then
log 2 "error retrieving part count: $part_count"
return 1
fi
if [ "$part_count" != "$4" ]; then
log 2 "expected $4, 'Part' count is '$part_count'"
return 1
fi
if [ "$4" == 0 ]; then
return 0
fi
if ! etags=$(xmllint --xpath '//*[local-name()="ETag"]/text()' "$TEST_FILE_FOLDER/parts.txt" | tr '\n' ' ' 2>&1); then
log 2 "error retrieving etags: $etags"
return 1
fi
read -ra etags_array <<< "$etags"
shift 4
idx=0
while [ $# -gt 0 ]; do
if [ "$1" != "${etags_array[$idx]}" ]; then
log 2 "etag mismatch (expected '$1', actual ${etags_array[$idx]})"
return 1
fi
((idx++))
shift
done
return 0
}
upload_check_parts() {
if [ $# -ne 6 ]; then
log 2 "'upload_check_parts' requires bucket, key, part list"
return 1
fi
if ! create_upload_and_get_id_rest "$1" "$2"; then
log 2 "error creating upload"
return 1
fi
# shellcheck disable=SC2154
if ! check_part_list_rest "$1" "$2" "$upload_id" 0; then
log 2 "error checking part list before part upload"
return 1
fi
parts_payload=""
if ! upload_check_part "$1" "$2" "$upload_id" 1 "$3"; then
log 2 "error uploading and checking first part"
return 1
fi
# shellcheck disable=SC2154
etag_one=$etag
if ! upload_check_part "$1" "$2" "$upload_id" 2 "$4" "$etag_one"; then
log 2 "error uploading and checking second part"
return 1
fi
etag_two=$etag
if ! upload_check_part "$1" "$2" "$upload_id" 3 "$5" "$etag_one" "$etag_two"; then
log 2 "error uploading and checking third part"
return 1
fi
etag_three=$etag
if ! upload_check_part "$1" "$2" "$upload_id" 4 "$6" "$etag_one" "$etag_two" "$etag_three"; then
log 2 "error uploading and checking fourth part"
return 1
fi
log 5 "PARTS PAYLOAD: $parts_payload"
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$upload_id" PARTS="$parts_payload" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh); then
log 2 "error completing multipart upload: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "complete multipart upload returned code $result: $(cat "$TEST_FILE_FOLDER/result.txt")"
return 1
fi
return 0
}
upload_check_part() {
if [ $# -lt 5 ]; then
log 2 "'upload_check_part' requires bucket, key, upload ID, part number, part, etags"
return 1
fi
if ! upload_part_and_get_etag_rest "$1" "$2" "$3" "$4" "$5"; then
log 2 "error uploading part $4"
return 1
fi
parts_payload+="<Part><ETag>$etag</ETag><PartNumber>$4</PartNumber></Part>"
# shellcheck disable=SC2068
if ! check_part_list_rest "$1" "$2" "$3" "$4" "${@:6}" "$etag"; then
log 2 "error checking part list after upload $4"
return 1
fi
}

View File

@@ -218,36 +218,32 @@ create_and_list_multipart_uploads() {
multipart_upload_from_bucket() {
if [ $# -ne 4 ]; then
echo "multipart upload from bucket command missing bucket, copy source, key, and/or part count"
log 2 "multipart upload from bucket command missing bucket, copy source, key, and/or part count"
return 1
fi
split_file "$3" "$4" || split_result=$?
if [[ $split_result -ne 0 ]]; then
echo "error splitting file"
if ! split_file "$3" "$4"; then
log 2 "error splitting file"
return 1
fi
for ((i=0;i<$4;i++)) {
echo "key: $3"
put_object "s3api" "$3-$i" "$1" "$2-$i" || copy_result=$?
if [[ $copy_result -ne 0 ]]; then
echo "error copying object"
if ! put_object "s3api" "$3-$i" "$1" "$2-$i"; then
log 2 "error copying object"
return 1
fi
}
create_multipart_upload "$1" "$2-copy" || upload_result=$?
if [[ $upload_result -ne 0 ]]; then
echo "error running first multpart upload"
if ! create_multipart_upload "$1" "$2-copy"; then
log 2 "error running first multpart upload"
return 1
fi
parts="["
for ((i = 1; i <= $4; i++)); do
upload_part_copy "$1" "$2-copy" "$upload_id" "$2" "$i" || local upload_result=$?
if [[ $upload_result -ne 0 ]]; then
echo "error uploading part $i"
if ! upload_part_copy "$1" "$2-copy" "$upload_id" "$2" "$i"; then
log 2 "error uploading part $i"
return 1
fi
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
@@ -257,9 +253,8 @@ multipart_upload_from_bucket() {
done
parts+="]"
error=$(aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2-copy" --upload-id "$upload_id" --multipart-upload '{"Parts": '"$parts"'}') || local completed=$?
if [[ $completed -ne 0 ]]; then
echo "Error completing upload: $error"
if ! error=$(aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2-copy" --upload-id "$upload_id" --multipart-upload '{"Parts": '"$parts"'}' 2>&1); then
log 2 "Error completing upload: $error"
return 1
fi
return 0
@@ -580,18 +575,34 @@ create_abort_multipart_upload_rest() {
log 2 "'create_abort_upload_rest' requires bucket, key"
return 1
fi
if ! list_and_check_upload "$1" "$2"; then
log 2 "error listing multipart uploads before creation"
return 1
fi
log 5 "uploads before upload: $(cat "$TEST_FILE_FOLDER/uploads.txt")"
if ! create_upload_and_get_id_rest "$1" "$2"; then
log 2 "error creating upload"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$upload_id" OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" ./tests/rest_scripts/abort_multipart_upload.sh); then
if ! list_and_check_upload "$1" "$2" "$upload_id"; then
log 2 "error listing multipart uploads after upload creation"
return 1
fi
log 5 "uploads after upload creation: $(cat "$TEST_FILE_FOLDER/uploads.txt")"
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$upload_id" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/abort_multipart_upload.sh); then
log 2 "error aborting multipart upload: $result"
return 1
fi
if [ "$result" != "204" ]; then
log 2 "expected '204' response, actual was '$result' (error: $(cat "$TEST_FILE_FOLDER"/output.txt)"
log 2 "expected '204' response, actual was '$result' (error: $(cat "$TEST_FILE_FOLDER"/result.txt)"
return 1
fi
log 5 "final uploads: $(cat "$TEST_FILE_FOLDER/uploads.txt")"
if ! list_and_check_upload "$1" "$2"; then
log 2 "error listing multipart uploads after abort"
return 1
fi
return 0
}
multipart_upload_range_too_large() {
@@ -610,3 +621,45 @@ multipart_upload_range_too_large() {
fi
return 0
}
list_and_check_upload() {
if [ $# -lt 2 ]; then
log 2 "'list_and_check_upload' requires bucket, key, upload ID (optional)"
return 1
fi
if ! uploads=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/uploads.txt" ./tests/rest_scripts/list_multipart_uploads.sh); then
log 2 "error listing multipart uploads before upload: $result"
return 1
fi
if ! upload_count=$(xmllint --xpath 'count(//*[local-name()="Upload"])' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
log 2 "error retrieving upload count: $upload_count"
return 1
fi
if [[ (( $# == 2 ) && ( $upload_count != 0 )) ]]; then
log 2 "upload count mismatch (expected 0, actual $upload_count)"
return 1
elif [[ (( $# == 3 ) && ( $upload_count != 1 )) ]]; then
log 2 "upload count mismatch (expected 1, actual $upload_count)"
return 1
fi
if [ $# -eq 2 ]; then
return 0
fi
if ! key=$(xmllint --xpath '//*[local-name()="Key"]/text()' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
log 2 "error retrieving key: $key"
return 1
fi
if [ "$key" != "$2" ]; then
log 2 "key mismatch (expected '$2', actual '$key')"
return 1
fi
if ! upload_id=$(xmllint --xpath '//*[local-name()="UploadId"]/text()' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
log 2 "error retrieving upload ID: $upload_id"
return 1
fi
if [ "$upload_id" != "$3" ]; then
log 2 "upload ID mismatch (expected '$3', actual '$upload_id')"
return 1
fi
return 0
}