Compare commits

..

11 Commits
v0.12 ... v0.13

Author SHA1 Message Date
Ben McClelland
6c56307746 Merge pull request #391 from versity/ben/docker_actions
feat: add docker images to release
2024-02-04 10:21:07 -08:00
Ben McClelland
9765eadd84 feat: add docker images to release 2024-02-04 10:17:15 -08:00
Ben McClelland
4619171f86 Merge pull request #389 from versity/test_cmdline_head_data
Test cmdline head data
2024-02-02 11:35:35 -08:00
Luke McCrone
89b4b615ab test: cmdline tests (acls, get bucket/object info) 2024-02-02 11:32:13 -08:00
Jon Austin
0c056f935b ListObjectsV2 start-after prop (#388)
* fix: Fixes #138, Added StartAfter property in ListObjectsV2 action, added couple of integration tests for ListObjectsV2
2024-02-01 11:04:52 -08:00
Ben McClelland
bf1e2c83d5 Merge pull request #385 from versity/bucket-tagging-actions
Bucket tagging actions
2024-01-31 10:15:22 -08:00
Ben McClelland
68794518af fix: remove special proxy handling for bucket acls in posix backend 2024-01-31 10:10:12 -08:00
jonaustin09
3cce3a5201 feat: Added unit and integration test cases for posix bucket tagging related actions 2024-01-31 10:09:48 -08:00
jonaustin09
d70ea61830 feat: Added the following actions support in posix backend: PutBucketTagging, GetBucketTagging, DeleteBucketTagging 2024-01-31 10:09:48 -08:00
Ben McClelland
9d0cf77b25 Merge pull request #387 from versity/bucket-acl-on-creation
Bucket ACL on bucket creation
2024-01-31 09:55:12 -08:00
jonaustin09
0d3a238ceb feat: Implemented logic to add bucket ACL on bucket creation 2024-01-31 09:49:56 -08:00
16 changed files with 1510 additions and 452 deletions

45
.github/workflows/docker.yaml vendored Normal file
View File

@@ -0,0 +1,45 @@
name: Publish Docker image
on:
release:
types: [published]
jobs:
push_to_registries:
name: Push Docker image to multiple registries
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
uses: actions/checkout@v4
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: |
versity/versitygw
ghcr.io/${{ github.repository }}
- name: Build and push Docker images
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -1,4 +1,4 @@
FROM golang:1.20-alpine
FROM golang:latest
WORKDIR /app
@@ -8,6 +8,7 @@ RUN go mod download
COPY ./ ./
WORKDIR /app/cmd/versitygw
ENV CGO_ENABLED=0
RUN go build -o versitygw
FROM alpine:latest

View File

@@ -65,6 +65,11 @@ type Backend interface {
RestoreObject(context.Context, *s3.RestoreObjectInput) error
SelectObjectContent(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer)
// bucket tagging operations
GetBucketTagging(_ context.Context, bucket string) (map[string]string, error)
PutBucketTagging(_ context.Context, bucket string, tags map[string]string) error
DeleteBucketTagging(_ context.Context, bucket string) error
// object tags operations
GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error)
PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error
@@ -179,6 +184,16 @@ func (BackendUnsupported) SelectObjectContent(ctx context.Context, input *s3.Sel
}
}
func (BackendUnsupported) GetBucketTagging(_ context.Context, bucket string) (map[string]string, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketTagging(_ context.Context, bucket string, tags map[string]string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteBucketTagging(_ context.Context, bucket string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}

View File

@@ -1640,7 +1640,15 @@ func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (
}
marker := ""
if input.ContinuationToken != nil {
marker = *input.ContinuationToken
if input.StartAfter != nil {
if *input.StartAfter > *input.ContinuationToken {
marker = *input.StartAfter
} else {
marker = *input.ContinuationToken
}
} else {
marker = *input.ContinuationToken
}
}
delim := ""
if input.Delimiter != nil {
@@ -1720,6 +1728,57 @@ func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]
return b, nil
}
func (p *Posix) PutBucketTagging(_ context.Context, bucket string, tags map[string]string) error {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return fmt.Errorf("stat bucket: %w", err)
}
if tags == nil {
err = xattr.Remove(bucket, "user."+tagHdr)
if err != nil {
return fmt.Errorf("remove tags: %w", err)
}
return nil
}
b, err := json.Marshal(tags)
if err != nil {
return fmt.Errorf("marshal tags: %w", err)
}
err = xattr.Set(bucket, "user."+tagHdr, b)
if err != nil {
return fmt.Errorf("set tags: %w", err)
}
return nil
}
func (p *Posix) GetBucketTagging(_ context.Context, bucket string) (map[string]string, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return nil, fmt.Errorf("stat bucket: %w", err)
}
tags, err := p.getXattrTags(bucket, "")
if err != nil {
return nil, err
}
return tags, nil
}
func (p *Posix) DeleteBucketTagging(ctx context.Context, bucket string) error {
return p.PutBucketTagging(ctx, bucket, nil)
}
func (p *Posix) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {

View File

@@ -268,11 +268,11 @@ func getAction(tf testFunc) func(*cli.Context) error {
func extractIntTests() (commands []*cli.Command) {
tests := integration.GetIntTests()
for key, val := range tests {
testKey := key
k := key
testFunc := val
commands = append(commands, &cli.Command{
Name: testKey,
Usage: fmt.Sprintf("Runs %v integration test", testKey),
Name: k,
Usage: fmt.Sprintf("Runs %v integration test", key),
Action: func(ctx *cli.Context) error {
opts := []integration.Option{
integration.WithAccess(awsID),

View File

@@ -26,6 +26,8 @@ func TestCreateBucket(s *S3Conf) {
CreateBucket_invalid_bucket_name(s)
CreateBucket_existing_bucket(s)
CreateBucket_as_user(s)
CreateBucket_default_acl(s)
CreateBucket_non_default_acl(s)
CreateDeleteBucket_success(s)
}
@@ -46,6 +48,23 @@ func TestDeleteBucket(s *S3Conf) {
DeleteBucket_success_status_code(s)
}
func TestPutBucketTagging(s *S3Conf) {
PutBucketTagging_non_existing_bucket(s)
PutBucketTagging_long_tags(s)
PutBucketTagging_success(s)
}
func TestGetBucketTagging(s *S3Conf) {
GetBucketTagging_non_existing_bucket(s)
GetBucketTagging_success(s)
}
func TestDeleteBucketTagging(s *S3Conf) {
DeleteBucketTagging_non_existing_object(s)
DeleteBucketTagging_success_status(s)
DeleteBucketTagging_success(s)
}
func TestPutObject(s *S3Conf) {
PutObject_non_existing_bucket(s)
PutObject_special_chars(s)
@@ -78,6 +97,13 @@ func TestListObjects(s *S3Conf) {
ListObjects_marker_not_from_obj_list(s)
}
func TestListObjectsV2(s *S3Conf) {
ListObjectsV2_start_after(s)
ListObjectsV2_both_start_after_and_continuation_token(s)
ListObjectsV2_start_after_not_in_list(s)
ListObjectsV2_start_after_empty_result(s)
}
func TestDeleteObject(s *S3Conf) {
DeleteObject_non_existing_object(s)
DeleteObject_success(s)
@@ -197,10 +223,14 @@ func TestFullFlow(s *S3Conf) {
TestHeadBucket(s)
TestListBuckets(s)
TestDeleteBucket(s)
TestPutBucketTagging(s)
TestGetBucketTagging(s)
TestDeleteBucketTagging(s)
TestPutObject(s)
TestHeadObject(s)
TestGetObject(s)
TestListObjects(s)
TestListObjectsV2(s)
TestDeleteObject(s)
TestDeleteObjects(s)
TestCopyObject(s)
@@ -228,128 +258,141 @@ type IntTests map[string]func(s *S3Conf) error
func GetIntTests() IntTests {
return IntTests{
"Authentication_empty_auth_header": Authentication_empty_auth_header,
"Authentication_invalid_auth_header": Authentication_invalid_auth_header,
"Authentication_unsupported_signature_version": Authentication_unsupported_signature_version,
"Authentication_malformed_credentials": Authentication_malformed_credentials,
"Authentication_malformed_credentials_invalid_parts": Authentication_malformed_credentials_invalid_parts,
"Authentication_credentials_terminated_string": Authentication_credentials_terminated_string,
"Authentication_credentials_incorrect_service": Authentication_credentials_incorrect_service,
"Authentication_credentials_incorrect_region": Authentication_credentials_incorrect_region,
"Authentication_credentials_invalid_date": Authentication_credentials_invalid_date,
"Authentication_credentials_future_date": Authentication_credentials_future_date,
"Authentication_credentials_past_date": Authentication_credentials_past_date,
"Authentication_credentials_non_existing_access_key": Authentication_credentials_non_existing_access_key,
"Authentication_invalid_signed_headers": Authentication_invalid_signed_headers,
"Authentication_missing_date_header": Authentication_missing_date_header,
"Authentication_invalid_date_header": Authentication_invalid_date_header,
"Authentication_date_mismatch": Authentication_date_mismatch,
"Authentication_incorrect_payload_hash": Authentication_incorrect_payload_hash,
"Authentication_incorrect_md5": Authentication_incorrect_md5,
"Authentication_signature_error_incorrect_secret_key": Authentication_signature_error_incorrect_secret_key,
"CreateBucket_invalid_bucket_name": CreateBucket_invalid_bucket_name,
"CreateBucket_existing_bucket": CreateBucket_existing_bucket,
"CreateBucket_as_user": CreateBucket_as_user,
"CreateDeleteBucket_success": CreateDeleteBucket_success,
"HeadBucket_non_existing_bucket": HeadBucket_non_existing_bucket,
"HeadBucket_success": HeadBucket_success,
"ListBuckets_as_user": ListBuckets_as_user,
"ListBuckets_as_admin": ListBuckets_as_admin,
"ListBuckets_success": ListBuckets_success,
"DeleteBucket_non_existing_bucket": DeleteBucket_non_existing_bucket,
"DeleteBucket_non_empty_bucket": DeleteBucket_non_empty_bucket,
"DeleteBucket_success_status_code": DeleteBucket_success_status_code,
"PutObject_non_existing_bucket": PutObject_non_existing_bucket,
"PutObject_special_chars": PutObject_special_chars,
"PutObject_invalid_long_tags": PutObject_invalid_long_tags,
"PutObject_success": PutObject_success,
"PutObject_invalid_credentials": PutObject_invalid_credentials,
"HeadObject_non_existing_object": HeadObject_non_existing_object,
"HeadObject_success": HeadObject_success,
"GetObject_non_existing_key": GetObject_non_existing_key,
"GetObject_invalid_ranges": GetObject_invalid_ranges,
"GetObject_with_meta": GetObject_with_meta,
"GetObject_success": GetObject_success,
"GetObject_by_range_success": GetObject_by_range_success,
"ListObjects_non_existing_bucket": ListObjects_non_existing_bucket,
"ListObjects_with_prefix": ListObjects_with_prefix,
"ListObject_truncated": ListObject_truncated,
"ListObjects_invalid_max_keys": ListObjects_invalid_max_keys,
"ListObjects_max_keys_0": ListObjects_max_keys_0,
"ListObjects_delimiter": ListObjects_delimiter,
"ListObjects_max_keys_none": ListObjects_max_keys_none,
"ListObjects_marker_not_from_obj_list": ListObjects_marker_not_from_obj_list,
"DeleteObject_non_existing_object": DeleteObject_non_existing_object,
"DeleteObject_success": DeleteObject_success,
"DeleteObject_success_status_code": DeleteObject_success_status_code,
"DeleteObjects_empty_input": DeleteObjects_empty_input,
"DeleteObjects_non_existing_objects": DeleteObjects_non_existing_objects,
"DeleteObjects_success": DeleteObjects_success,
"CopyObject_non_existing_dst_bucket": CopyObject_non_existing_dst_bucket,
"CopyObject_not_owned_source_bucket": CopyObject_not_owned_source_bucket,
"CopyObject_copy_to_itself": CopyObject_copy_to_itself,
"CopyObject_to_itself_with_new_metadata": CopyObject_to_itself_with_new_metadata,
"CopyObject_success": CopyObject_success,
"PutObjectTagging_non_existing_object": PutObjectTagging_non_existing_object,
"PutObjectTagging_long_tags": PutObjectTagging_long_tags,
"PutObjectTagging_success": PutObjectTagging_success,
"GetObjectTagging_non_existing_object": GetObjectTagging_non_existing_object,
"GetObjectTagging_success": GetObjectTagging_success,
"DeleteObjectTagging_non_existing_object": DeleteObjectTagging_non_existing_object,
"DeleteObjectTagging_success_status": DeleteObjectTagging_success_status,
"DeleteObjectTagging_success": DeleteObjectTagging_success,
"CreateMultipartUpload_non_existing_bucket": CreateMultipartUpload_non_existing_bucket,
"CreateMultipartUpload_success": CreateMultipartUpload_success,
"UploadPart_non_existing_bucket": UploadPart_non_existing_bucket,
"UploadPart_invalid_part_number": UploadPart_invalid_part_number,
"UploadPart_non_existing_key": UploadPart_non_existing_key,
"UploadPart_non_existing_mp_upload": UploadPart_non_existing_mp_upload,
"UploadPart_success": UploadPart_success,
"UploadPartCopy_non_existing_bucket": UploadPartCopy_non_existing_bucket,
"UploadPartCopy_incorrect_uploadId": UploadPartCopy_incorrect_uploadId,
"UploadPartCopy_incorrect_object_key": UploadPartCopy_incorrect_object_key,
"UploadPartCopy_invalid_part_number": UploadPartCopy_invalid_part_number,
"UploadPartCopy_invalid_copy_source": UploadPartCopy_invalid_copy_source,
"UploadPartCopy_non_existing_source_bucket": UploadPartCopy_non_existing_source_bucket,
"UploadPartCopy_non_existing_source_object_key": UploadPartCopy_non_existing_source_object_key,
"UploadPartCopy_success": UploadPartCopy_success,
"UploadPartCopy_by_range_invalid_range": UploadPartCopy_by_range_invalid_range,
"UploadPartCopy_greater_range_than_obj_size": UploadPartCopy_greater_range_than_obj_size,
"UploadPartCopy_by_range_success": UploadPartCopy_by_range_success,
"ListParts_incorrect_uploadId": ListParts_incorrect_uploadId,
"ListParts_incorrect_object_key": ListParts_incorrect_object_key,
"ListParts_success": ListParts_success,
"ListMultipartUploads_non_existing_bucket": ListMultipartUploads_non_existing_bucket,
"ListMultipartUploads_empty_result": ListMultipartUploads_empty_result,
"ListMultipartUploads_invalid_max_uploads": ListMultipartUploads_invalid_max_uploads,
"ListMultipartUploads_max_uploads": ListMultipartUploads_max_uploads,
"ListMultipartUploads_incorrect_next_key_marker": ListMultipartUploads_incorrect_next_key_marker,
"ListMultipartUploads_ignore_upload_id_marker": ListMultipartUploads_ignore_upload_id_marker,
"ListMultipartUploads_success": ListMultipartUploads_success,
"AbortMultipartUpload_non_existing_bucket": AbortMultipartUpload_non_existing_bucket,
"AbortMultipartUpload_incorrect_uploadId": AbortMultipartUpload_incorrect_uploadId,
"AbortMultipartUpload_incorrect_object_key": AbortMultipartUpload_incorrect_object_key,
"AbortMultipartUpload_success": AbortMultipartUpload_success,
"AbortMultipartUpload_success_status_code": AbortMultipartUpload_success_status_code,
"CompletedMultipartUpload_non_existing_bucket": CompletedMultipartUpload_non_existing_bucket,
"CompleteMultipartUpload_invalid_part_number": CompleteMultipartUpload_invalid_part_number,
"CompleteMultipartUpload_invalid_ETag": CompleteMultipartUpload_invalid_ETag,
"CompleteMultipartUpload_success": CompleteMultipartUpload_success,
"PutBucketAcl_non_existing_bucket": PutBucketAcl_non_existing_bucket,
"PutBucketAcl_invalid_acl_canned_and_acp": PutBucketAcl_invalid_acl_canned_and_acp,
"PutBucketAcl_invalid_acl_canned_and_grants": PutBucketAcl_invalid_acl_canned_and_grants,
"PutBucketAcl_invalid_acl_acp_and_grants": PutBucketAcl_invalid_acl_acp_and_grants,
"PutBucketAcl_invalid_owner": PutBucketAcl_invalid_owner,
"PutBucketAcl_success_access_denied": PutBucketAcl_success_access_denied,
"PutBucketAcl_success_grants": PutBucketAcl_success_grants,
"PutBucketAcl_success_canned_acl": PutBucketAcl_success_canned_acl,
"PutBucketAcl_success_acp": PutBucketAcl_success_acp,
"GetBucketAcl_non_existing_bucket": GetBucketAcl_non_existing_bucket,
"GetBucketAcl_access_denied": GetBucketAcl_access_denied,
"GetBucketAcl_success": GetBucketAcl_success,
"PutObject_overwrite_dir_obj": PutObject_overwrite_dir_obj,
"PutObject_overwrite_file_obj": PutObject_overwrite_file_obj,
"PutObject_dir_obj_with_data": PutObject_dir_obj_with_data,
"CreateMultipartUpload_dir_obj": CreateMultipartUpload_dir_obj,
"Authentication_empty_auth_header": Authentication_empty_auth_header,
"Authentication_invalid_auth_header": Authentication_invalid_auth_header,
"Authentication_unsupported_signature_version": Authentication_unsupported_signature_version,
"Authentication_malformed_credentials": Authentication_malformed_credentials,
"Authentication_malformed_credentials_invalid_parts": Authentication_malformed_credentials_invalid_parts,
"Authentication_credentials_terminated_string": Authentication_credentials_terminated_string,
"Authentication_credentials_incorrect_service": Authentication_credentials_incorrect_service,
"Authentication_credentials_incorrect_region": Authentication_credentials_incorrect_region,
"Authentication_credentials_invalid_date": Authentication_credentials_invalid_date,
"Authentication_credentials_future_date": Authentication_credentials_future_date,
"Authentication_credentials_past_date": Authentication_credentials_past_date,
"Authentication_credentials_non_existing_access_key": Authentication_credentials_non_existing_access_key,
"Authentication_invalid_signed_headers": Authentication_invalid_signed_headers,
"Authentication_missing_date_header": Authentication_missing_date_header,
"Authentication_invalid_date_header": Authentication_invalid_date_header,
"Authentication_date_mismatch": Authentication_date_mismatch,
"Authentication_incorrect_payload_hash": Authentication_incorrect_payload_hash,
"Authentication_incorrect_md5": Authentication_incorrect_md5,
"Authentication_signature_error_incorrect_secret_key": Authentication_signature_error_incorrect_secret_key,
"CreateBucket_invalid_bucket_name": CreateBucket_invalid_bucket_name,
"CreateBucket_existing_bucket": CreateBucket_existing_bucket,
"CreateBucket_as_user": CreateBucket_as_user,
"CreateDeleteBucket_success": CreateDeleteBucket_success,
"CreateBucket_default_acl": CreateBucket_default_acl,
"CreateBucket_non_default_acl": CreateBucket_non_default_acl,
"HeadBucket_non_existing_bucket": HeadBucket_non_existing_bucket,
"HeadBucket_success": HeadBucket_success,
"ListBuckets_as_user": ListBuckets_as_user,
"ListBuckets_as_admin": ListBuckets_as_admin,
"ListBuckets_success": ListBuckets_success,
"DeleteBucket_non_existing_bucket": DeleteBucket_non_existing_bucket,
"DeleteBucket_non_empty_bucket": DeleteBucket_non_empty_bucket,
"DeleteBucket_success_status_code": DeleteBucket_success_status_code,
"PutBucketTagging_non_existing_bucket": PutBucketTagging_non_existing_bucket,
"PutBucketTagging_long_tags": PutBucketTagging_long_tags,
"PutBucketTagging_success": PutBucketTagging_success,
"GetBucketTagging_non_existing_bucket": GetBucketTagging_non_existing_bucket,
"GetBucketTagging_success": GetBucketTagging_success,
"DeleteBucketTagging_non_existing_object": DeleteBucketTagging_non_existing_object,
"DeleteBucketTagging_success_status": DeleteBucketTagging_success_status,
"DeleteBucketTagging_success": DeleteBucketTagging_success,
"PutObject_non_existing_bucket": PutObject_non_existing_bucket,
"PutObject_special_chars": PutObject_special_chars,
"PutObject_invalid_long_tags": PutObject_invalid_long_tags,
"PutObject_success": PutObject_success,
"HeadObject_non_existing_object": HeadObject_non_existing_object,
"HeadObject_success": HeadObject_success,
"GetObject_non_existing_key": GetObject_non_existing_key,
"GetObject_invalid_ranges": GetObject_invalid_ranges,
"GetObject_with_meta": GetObject_with_meta,
"GetObject_success": GetObject_success,
"GetObject_by_range_success": GetObject_by_range_success,
"ListObjects_non_existing_bucket": ListObjects_non_existing_bucket,
"ListObjects_with_prefix": ListObjects_with_prefix,
"ListObject_truncated": ListObject_truncated,
"ListObjects_invalid_max_keys": ListObjects_invalid_max_keys,
"ListObjects_max_keys_0": ListObjects_max_keys_0,
"ListObjects_delimiter": ListObjects_delimiter,
"ListObjects_max_keys_none": ListObjects_max_keys_none,
"ListObjects_marker_not_from_obj_list": ListObjects_marker_not_from_obj_list,
"ListObjectsV2_start_after": ListObjectsV2_start_after,
"ListObjectsV2_both_start_after_and_continuation_token": ListObjectsV2_both_start_after_and_continuation_token,
"ListObjectsV2_start_after_not_in_list": ListObjectsV2_start_after_not_in_list,
"ListObjectsV2_start_after_empty_result": ListObjectsV2_start_after_empty_result,
"DeleteObject_non_existing_object": DeleteObject_non_existing_object,
"DeleteObject_success": DeleteObject_success,
"DeleteObject_success_status_code": DeleteObject_success_status_code,
"DeleteObjects_empty_input": DeleteObjects_empty_input,
"DeleteObjects_non_existing_objects": DeleteObjects_non_existing_objects,
"DeleteObjects_success": DeleteObjects_success,
"CopyObject_non_existing_dst_bucket": CopyObject_non_existing_dst_bucket,
"CopyObject_not_owned_source_bucket": CopyObject_not_owned_source_bucket,
"CopyObject_copy_to_itself": CopyObject_copy_to_itself,
"CopyObject_to_itself_with_new_metadata": CopyObject_to_itself_with_new_metadata,
"CopyObject_success": CopyObject_success,
"PutObjectTagging_non_existing_object": PutObjectTagging_non_existing_object,
"PutObjectTagging_long_tags": PutObjectTagging_long_tags,
"PutObjectTagging_success": PutObjectTagging_success,
"GetObjectTagging_non_existing_object": GetObjectTagging_non_existing_object,
"GetObjectTagging_success": GetObjectTagging_success,
"DeleteObjectTagging_non_existing_object": DeleteObjectTagging_non_existing_object,
"DeleteObjectTagging_success_status": DeleteObjectTagging_success_status,
"DeleteObjectTagging_success": DeleteObjectTagging_success,
"CreateMultipartUpload_non_existing_bucket": CreateMultipartUpload_non_existing_bucket,
"CreateMultipartUpload_success": CreateMultipartUpload_success,
"UploadPart_non_existing_bucket": UploadPart_non_existing_bucket,
"UploadPart_invalid_part_number": UploadPart_invalid_part_number,
"UploadPart_non_existing_key": UploadPart_non_existing_key,
"UploadPart_non_existing_mp_upload": UploadPart_non_existing_mp_upload,
"UploadPart_success": UploadPart_success,
"UploadPartCopy_non_existing_bucket": UploadPartCopy_non_existing_bucket,
"UploadPartCopy_incorrect_uploadId": UploadPartCopy_incorrect_uploadId,
"UploadPartCopy_incorrect_object_key": UploadPartCopy_incorrect_object_key,
"UploadPartCopy_invalid_part_number": UploadPartCopy_invalid_part_number,
"UploadPartCopy_invalid_copy_source": UploadPartCopy_invalid_copy_source,
"UploadPartCopy_non_existing_source_bucket": UploadPartCopy_non_existing_source_bucket,
"UploadPartCopy_non_existing_source_object_key": UploadPartCopy_non_existing_source_object_key,
"UploadPartCopy_success": UploadPartCopy_success,
"UploadPartCopy_by_range_invalid_range": UploadPartCopy_by_range_invalid_range,
"UploadPartCopy_greater_range_than_obj_size": UploadPartCopy_greater_range_than_obj_size,
"UploadPartCopy_by_range_success": UploadPartCopy_by_range_success,
"ListParts_incorrect_uploadId": ListParts_incorrect_uploadId,
"ListParts_incorrect_object_key": ListParts_incorrect_object_key,
"ListParts_success": ListParts_success,
"ListMultipartUploads_non_existing_bucket": ListMultipartUploads_non_existing_bucket,
"ListMultipartUploads_empty_result": ListMultipartUploads_empty_result,
"ListMultipartUploads_invalid_max_uploads": ListMultipartUploads_invalid_max_uploads,
"ListMultipartUploads_max_uploads": ListMultipartUploads_max_uploads,
"ListMultipartUploads_incorrect_next_key_marker": ListMultipartUploads_incorrect_next_key_marker,
"ListMultipartUploads_ignore_upload_id_marker": ListMultipartUploads_ignore_upload_id_marker,
"ListMultipartUploads_success": ListMultipartUploads_success,
"AbortMultipartUpload_non_existing_bucket": AbortMultipartUpload_non_existing_bucket,
"AbortMultipartUpload_incorrect_uploadId": AbortMultipartUpload_incorrect_uploadId,
"AbortMultipartUpload_incorrect_object_key": AbortMultipartUpload_incorrect_object_key,
"AbortMultipartUpload_success": AbortMultipartUpload_success,
"AbortMultipartUpload_success_status_code": AbortMultipartUpload_success_status_code,
"CompletedMultipartUpload_non_existing_bucket": CompletedMultipartUpload_non_existing_bucket,
"CompleteMultipartUpload_invalid_part_number": CompleteMultipartUpload_invalid_part_number,
"CompleteMultipartUpload_invalid_ETag": CompleteMultipartUpload_invalid_ETag,
"CompleteMultipartUpload_success": CompleteMultipartUpload_success,
"PutBucketAcl_non_existing_bucket": PutBucketAcl_non_existing_bucket,
"PutBucketAcl_invalid_acl_canned_and_acp": PutBucketAcl_invalid_acl_canned_and_acp,
"PutBucketAcl_invalid_acl_canned_and_grants": PutBucketAcl_invalid_acl_canned_and_grants,
"PutBucketAcl_invalid_acl_acp_and_grants": PutBucketAcl_invalid_acl_acp_and_grants,
"PutBucketAcl_invalid_owner": PutBucketAcl_invalid_owner,
"PutBucketAcl_success_access_denied": PutBucketAcl_success_access_denied,
"PutBucketAcl_success_grants": PutBucketAcl_success_grants,
"PutBucketAcl_success_canned_acl": PutBucketAcl_success_canned_acl,
"PutBucketAcl_success_acp": PutBucketAcl_success_acp,
"GetBucketAcl_non_existing_bucket": GetBucketAcl_non_existing_bucket,
"GetBucketAcl_access_denied": GetBucketAcl_access_denied,
"GetBucketAcl_success": GetBucketAcl_success,
"PutObject_overwrite_dir_obj": PutObject_overwrite_dir_obj,
"PutObject_overwrite_file_obj": PutObject_overwrite_file_obj,
"PutObject_dir_obj_with_data": PutObject_dir_obj_with_data,
"CreateMultipartUpload_dir_obj": CreateMultipartUpload_dir_obj,
}
}

View File

@@ -706,6 +706,116 @@ func CreateBucket_existing_bucket(s *S3Conf) error {
return nil
}
func CreateBucket_default_acl(s *S3Conf) error {
testName := "CreateBucket_default_acl"
runF(testName)
bucket := getBucketName()
client := s3.NewFromConfig(s.Config())
err := setup(s, bucket)
if err != nil {
failF("%v: %v", testName, err)
return fmt.Errorf("%v: %w", testName, err)
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := client.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &bucket})
cancel()
if err != nil {
failF("%v: %v", testName, err)
return fmt.Errorf("%v: %w", testName, err)
}
if *out.Owner.ID != s.awsID {
failF("%v: expected bucket owner to be %v, instead got %v", testName, s.awsID, *out.Owner.ID)
return fmt.Errorf("%v: expected bucket owner to be %v, instead got %v", testName, s.awsID, *out.Owner.ID)
}
if len(out.Grants) != 0 {
failF("%v: expected grants to be empty instead got %v", testName, len(out.Grants))
return fmt.Errorf("%v: expected grants to be empty instead got %v", testName, len(out.Grants))
}
err = teardown(s, bucket)
if err != nil {
failF("%v: %v", err)
return fmt.Errorf("%v: %w", testName, err)
}
passF(testName)
return nil
}
func CreateBucket_non_default_acl(s *S3Conf) error {
testName := "CreateBucket_non_default_acl"
runF(testName)
err := createUsers(s, []user{
{"grt1", "grt1secret", "user"},
{"grt2", "grt2secret", "user"},
{"grt3", "grt3secret", "user"},
})
if err != nil {
failF("%v: %v", err)
return fmt.Errorf("%v: %w", testName, err)
}
grants := []types.Grant{
{
Grantee: &types.Grantee{
ID: getPtr("grt1"),
},
Permission: types.PermissionFullControl,
},
{
Grantee: &types.Grantee{
ID: getPtr("grt2"),
},
Permission: types.PermissionReadAcp,
},
{
Grantee: &types.Grantee{
ID: getPtr("grt3"),
},
Permission: types.PermissionWrite,
},
}
bucket := getBucketName()
client := s3.NewFromConfig(s.Config())
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = client.CreateBucket(ctx, &s3.CreateBucketInput{Bucket: &bucket, GrantFullControl: getPtr("grt1"), GrantReadACP: getPtr("grt2"), GrantWrite: getPtr("grt3")})
cancel()
if err != nil {
failF("%v: %v", err)
return fmt.Errorf("%v: %w", testName, err)
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err := client.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &bucket})
cancel()
if err != nil {
failF("%v: %v", testName, err)
return fmt.Errorf("%v: %w", testName, err)
}
if !compareGrants(out.Grants, grants) {
failF("%v: expected bucket acl grants to be %v, instead got %v", testName, grants, out.Grants)
return fmt.Errorf("%v: expected bucket acl grants to be %v, instead got %v", testName, grants, out.Grants)
}
err = teardown(s, bucket)
if err != nil {
failF("%v: %v", err)
return fmt.Errorf("%v: %w", testName, err)
}
passF(testName)
return nil
}
func HeadBucket_non_existing_bucket(s *S3Conf) error {
testName := "HeadBucket_non_existing_bucket"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
@@ -1018,6 +1128,215 @@ func DeleteBucket_success_status_code(s *S3Conf) error {
return nil
}
func PutBucketTagging_non_existing_bucket(s *S3Conf) error {
testName := "PutBucketTagging_non_existing_bucket"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: getPtr(getBucketName()),
Tagging: &types.Tagging{TagSet: []types.Tag{}},
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucket)); err != nil {
return err
}
return nil
})
}
func PutBucketTagging_long_tags(s *S3Conf) error {
testName := "PutBucketTagging_long_tags"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr(genRandString(200)), Value: getPtr("val")}}}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &tagging})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidTag)); err != nil {
return err
}
tagging = types.Tagging{TagSet: []types.Tag{{Key: getPtr("key"), Value: getPtr(genRandString(300))}}}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &tagging})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidTag)); err != nil {
return err
}
return nil
})
}
func PutBucketTagging_success(s *S3Conf) error {
testName := "PutBucketTagging_success"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr("key1"), Value: getPtr("val2")}, {Key: getPtr("key2"), Value: getPtr("val2")}}}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &tagging})
cancel()
if err != nil {
return err
}
return nil
})
}
func GetBucketTagging_non_existing_bucket(s *S3Conf) error {
testName := "GetBucketTagging_non_existing_object"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
Bucket: getPtr(getBucketName()),
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucket)); err != nil {
return err
}
return nil
})
}
func GetBucketTagging_success(s *S3Conf) error {
testName := "GetBucketTagging_success"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr("key1"), Value: getPtr("val2")}, {Key: getPtr("key2"), Value: getPtr("val2")}}}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &tagging})
cancel()
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
Bucket: &bucket,
})
cancel()
if err != nil {
return nil
}
if !areTagsSame(out.TagSet, tagging.TagSet) {
return fmt.Errorf("expected %v instead got %v", tagging.TagSet, out.TagSet)
}
return nil
})
}
func DeleteBucketTagging_non_existing_object(s *S3Conf) error {
testName := "DeleteBucketTagging_non_existing_object"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.DeleteBucketTagging(ctx, &s3.DeleteBucketTaggingInput{
Bucket: getPtr(getBucketName()),
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucket)); err != nil {
return err
}
return nil
})
}
func DeleteBucketTagging_success_status(s *S3Conf) error {
testName := "DeleteBucketTagging_success_status"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
tagging := types.Tagging{
TagSet: []types.Tag{
{
Key: getPtr("Hello"),
Value: getPtr("World"),
},
},
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &tagging,
})
cancel()
if err != nil {
return err
}
req, err := createSignedReq(http.MethodDelete, s.endpoint, fmt.Sprintf("%v?tagging", bucket), s.awsID, s.awsSecret, "s3", s.awsRegion, nil, time.Now())
if err != nil {
return err
}
client := http.Client{
Timeout: shortTimeout,
}
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("expected response status to be %v, instead got %v", http.StatusNoContent, resp.StatusCode)
}
return nil
})
}
func DeleteBucketTagging_success(s *S3Conf) error {
testName := "DeleteBucketTagging_success"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr("key1"), Value: getPtr("val2")}, {Key: getPtr("key2"), Value: getPtr("val2")}}}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &tagging})
cancel()
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.DeleteBucketTagging(ctx, &s3.DeleteBucketTaggingInput{
Bucket: &bucket,
})
cancel()
if err != nil {
return nil
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
Bucket: &bucket,
})
cancel()
if err != nil {
return nil
}
if len(out.TagSet) > 0 {
return fmt.Errorf("expected empty tag set, instead got %v", out.TagSet)
}
return nil
})
}
func PutObject_non_existing_bucket(s *S3Conf) error {
testName := "PutObject_non_existing_bucket"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
@@ -1598,6 +1917,138 @@ func ListObjects_marker_not_from_obj_list(s *S3Conf) error {
})
}
func ListObjectsV2_start_after(s *S3Conf) error {
testName := "ListObjectsV2_start_after"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: &bucket,
StartAfter: getPtr("bar"),
})
cancel()
if err != nil {
return err
}
if !compareObjects([]string{"baz", "foo"}, out.Contents) {
return fmt.Errorf("expected output to be %v, instead got %v", []string{"baz", "foo"}, out.Contents)
}
return nil
})
}
func ListObjectsV2_both_start_after_and_continuation_token(s *S3Conf) error {
testName := "ListObjectsV2_both_start_after_and_continuation_token"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket)
if err != nil {
return err
}
var maxKeys int32 = 1
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: &bucket,
MaxKeys: &maxKeys,
})
cancel()
if err != nil {
return err
}
if out.IsTruncated == nil || !*out.IsTruncated {
return fmt.Errorf("expected output to be truncated")
}
if *out.MaxKeys != maxKeys {
return fmt.Errorf("expected max-keys to be %v, instead got %v", maxKeys, out.MaxKeys)
}
if *out.NextContinuationToken != "bar" {
return fmt.Errorf("expected next-marker to be baz, instead got %v", *out.NextContinuationToken)
}
if !compareObjects([]string{"bar"}, out.Contents) {
return fmt.Errorf("unexpected output for list objects with max-keys")
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
resp, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: &bucket,
ContinuationToken: out.NextContinuationToken,
StartAfter: getPtr("baz"),
})
cancel()
if err != nil {
return err
}
if !compareObjects([]string{"foo", "quxx"}, resp.Contents) {
return fmt.Errorf("unexpected output for list objects with max-keys")
}
return nil
})
}
func ListObjectsV2_start_after_not_in_list(s *S3Conf) error {
testName := "ListObjectsV2_start_after_not_in_list"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: &bucket,
StartAfter: getPtr("blah"),
})
cancel()
if err != nil {
return err
}
if !compareObjects([]string{"foo", "quxx"}, out.Contents) {
return fmt.Errorf("expected output to be %v, instead got %v", []string{"foo", "quxx"}, out.Contents)
}
return nil
})
}
func ListObjectsV2_start_after_empty_result(s *S3Conf) error {
testName := "ListObjectsV2_start_after_empty_result"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: &bucket,
StartAfter: getPtr("zzz"),
})
cancel()
if err != nil {
return err
}
if len(out.Contents) != 0 {
return fmt.Errorf("expected empty output instead got %v", out.Contents)
}
return nil
})
}
func DeleteObject_non_existing_object(s *S3Conf) error {
testName := "DeleteObject_non_existing_object"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {

View File

@@ -44,6 +44,9 @@ var _ backend.Backend = &BackendMock{}
// DeleteBucketFunc: func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error {
// panic("mock out the DeleteBucket method")
// },
// DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
// panic("mock out the DeleteBucketTagging method")
// },
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error {
// panic("mock out the DeleteObject method")
// },
@@ -56,6 +59,9 @@ var _ backend.Backend = &BackendMock{}
// GetBucketAclFunc: func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error) {
// panic("mock out the GetBucketAcl method")
// },
// GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
// panic("mock out the GetBucketTagging method")
// },
// GetObjectFunc: func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
// panic("mock out the GetObject method")
// },
@@ -95,6 +101,9 @@ var _ backend.Backend = &BackendMock{}
// PutBucketAclFunc: func(contextMoqParam context.Context, bucket string, data []byte) error {
// panic("mock out the PutBucketAcl method")
// },
// PutBucketTaggingFunc: func(contextMoqParam context.Context, bucket string, tags map[string]string) error {
// panic("mock out the PutBucketTagging method")
// },
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error) {
// panic("mock out the PutObject method")
// },
@@ -150,6 +159,9 @@ type BackendMock struct {
// DeleteBucketFunc mocks the DeleteBucket method.
DeleteBucketFunc func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error
// DeleteBucketTaggingFunc mocks the DeleteBucketTagging method.
DeleteBucketTaggingFunc func(contextMoqParam context.Context, bucket string) error
// DeleteObjectFunc mocks the DeleteObject method.
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error
@@ -162,6 +174,9 @@ type BackendMock struct {
// GetBucketAclFunc mocks the GetBucketAcl method.
GetBucketAclFunc func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error)
// GetBucketTaggingFunc mocks the GetBucketTagging method.
GetBucketTaggingFunc func(contextMoqParam context.Context, bucket string) (map[string]string, error)
// GetObjectFunc mocks the GetObject method.
GetObjectFunc func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error)
@@ -201,6 +216,9 @@ type BackendMock struct {
// PutBucketAclFunc mocks the PutBucketAcl method.
PutBucketAclFunc func(contextMoqParam context.Context, bucket string, data []byte) error
// PutBucketTaggingFunc mocks the PutBucketTagging method.
PutBucketTaggingFunc func(contextMoqParam context.Context, bucket string, tags map[string]string) error
// PutObjectFunc mocks the PutObject method.
PutObjectFunc func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error)
@@ -283,6 +301,13 @@ type BackendMock struct {
// DeleteBucketInput is the deleteBucketInput argument value.
DeleteBucketInput *s3.DeleteBucketInput
}
// DeleteBucketTagging holds details about calls to the DeleteBucketTagging method.
DeleteBucketTagging []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
}
// DeleteObject holds details about calls to the DeleteObject method.
DeleteObject []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -313,6 +338,13 @@ type BackendMock struct {
// GetBucketAclInput is the getBucketAclInput argument value.
GetBucketAclInput *s3.GetBucketAclInput
}
// GetBucketTagging holds details about calls to the GetBucketTagging method.
GetBucketTagging []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
}
// GetObject holds details about calls to the GetObject method.
GetObject []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -410,6 +442,15 @@ type BackendMock struct {
// Data is the data argument value.
Data []byte
}
// PutBucketTagging holds details about calls to the PutBucketTagging method.
PutBucketTagging []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
// Tags is the tags argument value.
Tags map[string]string
}
// PutObject holds details about calls to the PutObject method.
PutObject []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -477,10 +518,12 @@ type BackendMock struct {
lockCreateBucket sync.RWMutex
lockCreateMultipartUpload sync.RWMutex
lockDeleteBucket sync.RWMutex
lockDeleteBucketTagging sync.RWMutex
lockDeleteObject sync.RWMutex
lockDeleteObjectTagging sync.RWMutex
lockDeleteObjects sync.RWMutex
lockGetBucketAcl sync.RWMutex
lockGetBucketTagging sync.RWMutex
lockGetObject sync.RWMutex
lockGetObjectAcl sync.RWMutex
lockGetObjectAttributes sync.RWMutex
@@ -494,6 +537,7 @@ type BackendMock struct {
lockListObjectsV2 sync.RWMutex
lockListParts sync.RWMutex
lockPutBucketAcl sync.RWMutex
lockPutBucketTagging sync.RWMutex
lockPutObject sync.RWMutex
lockPutObjectAcl sync.RWMutex
lockPutObjectTagging sync.RWMutex
@@ -765,6 +809,42 @@ func (mock *BackendMock) DeleteBucketCalls() []struct {
return calls
}
// DeleteBucketTagging calls DeleteBucketTaggingFunc.
func (mock *BackendMock) DeleteBucketTagging(contextMoqParam context.Context, bucket string) error {
if mock.DeleteBucketTaggingFunc == nil {
panic("BackendMock.DeleteBucketTaggingFunc: method is nil but Backend.DeleteBucketTagging was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
}
mock.lockDeleteBucketTagging.Lock()
mock.calls.DeleteBucketTagging = append(mock.calls.DeleteBucketTagging, callInfo)
mock.lockDeleteBucketTagging.Unlock()
return mock.DeleteBucketTaggingFunc(contextMoqParam, bucket)
}
// DeleteBucketTaggingCalls gets all the calls that were made to DeleteBucketTagging.
// Check the length with:
//
// len(mockedBackend.DeleteBucketTaggingCalls())
func (mock *BackendMock) DeleteBucketTaggingCalls() []struct {
ContextMoqParam context.Context
Bucket string
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
}
mock.lockDeleteBucketTagging.RLock()
calls = mock.calls.DeleteBucketTagging
mock.lockDeleteBucketTagging.RUnlock()
return calls
}
// DeleteObject calls DeleteObjectFunc.
func (mock *BackendMock) DeleteObject(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error {
if mock.DeleteObjectFunc == nil {
@@ -913,6 +993,42 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
return calls
}
// GetBucketTagging calls GetBucketTaggingFunc.
func (mock *BackendMock) GetBucketTagging(contextMoqParam context.Context, bucket string) (map[string]string, error) {
if mock.GetBucketTaggingFunc == nil {
panic("BackendMock.GetBucketTaggingFunc: method is nil but Backend.GetBucketTagging was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
}
mock.lockGetBucketTagging.Lock()
mock.calls.GetBucketTagging = append(mock.calls.GetBucketTagging, callInfo)
mock.lockGetBucketTagging.Unlock()
return mock.GetBucketTaggingFunc(contextMoqParam, bucket)
}
// GetBucketTaggingCalls gets all the calls that were made to GetBucketTagging.
// Check the length with:
//
// len(mockedBackend.GetBucketTaggingCalls())
func (mock *BackendMock) GetBucketTaggingCalls() []struct {
ContextMoqParam context.Context
Bucket string
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
}
mock.lockGetBucketTagging.RLock()
calls = mock.calls.GetBucketTagging
mock.lockGetBucketTagging.RUnlock()
return calls
}
// GetObject calls GetObjectFunc.
func (mock *BackendMock) GetObject(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
if mock.GetObjectFunc == nil {
@@ -1393,6 +1509,46 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
return calls
}
// PutBucketTagging calls PutBucketTaggingFunc.
func (mock *BackendMock) PutBucketTagging(contextMoqParam context.Context, bucket string, tags map[string]string) error {
if mock.PutBucketTaggingFunc == nil {
panic("BackendMock.PutBucketTaggingFunc: method is nil but Backend.PutBucketTagging was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
Tags map[string]string
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
Tags: tags,
}
mock.lockPutBucketTagging.Lock()
mock.calls.PutBucketTagging = append(mock.calls.PutBucketTagging, callInfo)
mock.lockPutBucketTagging.Unlock()
return mock.PutBucketTaggingFunc(contextMoqParam, bucket, tags)
}
// PutBucketTaggingCalls gets all the calls that were made to PutBucketTagging.
// Check the length with:
//
// len(mockedBackend.PutBucketTaggingCalls())
func (mock *BackendMock) PutBucketTaggingCalls() []struct {
ContextMoqParam context.Context
Bucket string
Tags map[string]string
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
Tags map[string]string
}
mock.lockPutBucketTagging.RLock()
calls = mock.calls.PutBucketTagging
mock.lockPutBucketTagging.RUnlock()
return calls
}
// PutObject calls PutObjectFunc.
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error) {
if mock.PutObjectFunc == nil {

View File

@@ -16,7 +16,6 @@ package controllers
import (
"bytes"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
@@ -237,6 +236,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
bucket := ctx.Params("bucket")
prefix := ctx.Query("prefix")
cToken := ctx.Query("continuation-token")
sAfter := ctx.Query("start-after")
marker := ctx.Query("marker")
delimiter := ctx.Query("delimiter")
maxkeysStr := ctx.Query("max-keys")
@@ -247,6 +247,24 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
isRoot := ctx.Locals("isRoot").(bool)
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
if ctx.Request().URI().QueryArgs().Has("tagging") {
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetBucketTagging", BucketOwner: parsedAcl.Owner})
}
tags, err := c.be.GetBucketTagging(ctx.Context(), bucket)
if err != nil {
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetBucketTagging", BucketOwner: parsedAcl.Owner})
}
resp := s3response.Tagging{TagSet: s3response.TagSet{Tags: []s3response.Tag{}}}
for key, val := range tags {
resp.TagSet.Tags = append(resp.TagSet.Tags, s3response.Tag{Key: key, Value: val})
}
return SendXMLResponse(ctx, resp, nil, &MetaOpts{Logger: c.logger, Action: "GetBucketTagging", BucketOwner: parsedAcl.Owner})
}
if ctx.Request().URI().QueryArgs().Has("acl") {
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ_ACP", isRoot); err != nil {
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetBucketAcl", BucketOwner: parsedAcl.Owner})
@@ -302,6 +320,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
ContinuationToken: &cToken,
Delimiter: &delimiter,
MaxKeys: &maxkeys,
StartAfter: &sAfter,
})
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListObjectsV2", BucketOwner: parsedAcl.Owner})
}
@@ -341,6 +360,32 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
ctx.Locals("account").(auth.Account),
ctx.Locals("isRoot").(bool)
if ctx.Request().URI().QueryArgs().Has("tagging") {
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
var bucketTagging s3response.Tagging
err := xml.Unmarshal(ctx.Body(), &bucketTagging)
if err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), &MetaOpts{Logger: c.logger, Action: "PutBucketTagging", BucketOwner: parsedAcl.Owner})
}
tags := make(map[string]string, len(bucketTagging.TagSet.Tags))
for _, tag := range bucketTagging.TagSet.Tags {
if len(tag.Key) > 128 || len(tag.Value) > 256 {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidTag), &MetaOpts{Logger: c.logger, Action: "PutBucketTagging", BucketOwner: parsedAcl.Owner})
}
tags[tag.Key] = tag.Value
}
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutBucketTagging", BucketOwner: parsedAcl.Owner})
}
err = c.be.PutBucketTagging(ctx.Context(), bucket, tags)
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutBucketTagging", BucketOwner: parsedAcl.Owner})
}
grants := grantFullControl + grantRead + grantReadACP + granWrite + grantWriteACP
if ctx.Request().URI().QueryArgs().Has("acl") {
@@ -408,16 +453,29 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketName), &MetaOpts{Logger: c.logger, Action: "CreateBucket"})
}
if acl != "" && grants != "" {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), &MetaOpts{Logger: c.logger, Action: "PutBucketAcl", BucketOwner: acct.Access})
}
defACL := auth.ACL{ACL: "private", Owner: acct.Access, Grantees: []auth.Grantee{}}
jsonACL, err := json.Marshal(defACL)
updAcl, err := auth.UpdateACL(&s3.PutBucketAclInput{
GrantFullControl: &grantFullControl,
GrantRead: &grantRead,
GrantReadACP: &grantReadACP,
GrantWrite: &granWrite,
GrantWriteACP: &grantWriteACP,
AccessControlPolicy: &types.AccessControlPolicy{Owner: &types.Owner{ID: &acct.Access}},
ACL: types.BucketCannedACL(acl),
}, defACL, c.iam)
if err != nil {
return SendResponse(ctx, fmt.Errorf("marshal acl: %w", err), &MetaOpts{Logger: c.logger, Action: "CreateBucket", BucketOwner: acct.Access})
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "CreateBucket", BucketOwner: acct.Access})
}
err = c.be.CreateBucket(ctx.Context(), &s3.CreateBucketInput{
Bucket: &bucket,
ObjectOwnership: types.ObjectOwnership(acct.Access),
}, jsonACL)
}, updAcl)
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "CreateBucket", BucketOwner: acct.Access})
}
@@ -702,6 +760,15 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
bucket, acct, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("account").(auth.Account), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
if ctx.Request().URI().QueryArgs().Has("tagging") {
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucketTagging", BucketOwner: parsedAcl.Owner})
}
err := c.be.DeleteBucketTagging(ctx.Context(), bucket)
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucketTagging", BucketOwner: parsedAcl.Owner, Status: http.StatusNoContent})
}
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner})
}
@@ -709,7 +776,7 @@ func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
err := c.be.DeleteBucket(ctx.Context(), &s3.DeleteBucketInput{
Bucket: &bucket,
})
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner, Status: 204})
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner, Status: http.StatusNoContent})
}
func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {

View File

@@ -343,6 +343,9 @@ func TestS3ApiController_ListActions(t *testing.T) {
ListObjectsFunc: func(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
return &s3.ListObjectsOutput{}, nil
},
GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
return map[string]string{}, nil
},
},
}
@@ -365,6 +368,9 @@ func TestS3ApiController_ListActions(t *testing.T) {
ListObjectsFunc: func(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
},
GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
},
},
}
appError := fiber.New()
@@ -384,6 +390,24 @@ func TestS3ApiController_ListActions(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "Get-bucket-tagging-non-existing-bucket",
app: appError,
args: args{
req: httptest.NewRequest(http.MethodGet, "/my-bucket?tagging", nil),
},
wantErr: false,
statusCode: 404,
},
{
name: "Get-bucket-tagging-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodGet, "/my-bucket?tagging", nil),
},
wantErr: false,
statusCode: 200,
},
{
name: "Get-bucket-acl-success",
app: app,
@@ -492,6 +516,17 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
</AccessControlPolicy>
`
tagBody := `
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
<Tag>
<Key>organization</Key>
<Value>marketing</Value>
</Tag>
</TagSet>
</Tagging>
`
s3ApiController := S3ApiController{
be: &BackendMock{
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
@@ -503,6 +538,9 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
CreateBucketFunc: func(context.Context, *s3.CreateBucketInput, []byte) error {
return nil
},
PutBucketTaggingFunc: func(contextMoqParam context.Context, bucket string, tags map[string]string) error {
return nil
},
},
}
// Mock ctx.Locals
@@ -543,6 +581,24 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "Put-bucket-tagging-invalid-body",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/my-bucket?tagging", nil),
},
wantErr: false,
statusCode: 400,
},
{
name: "Put-bucket-tagging-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/my-bucket?tagging", strings.NewReader(tagBody)),
},
wantErr: false,
statusCode: 200,
},
{
name: "Put-bucket-acl-invalid-acl",
app: app,
@@ -869,12 +925,12 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
app := fiber.New()
s3ApiController := S3ApiController{
be: &BackendMock{
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
return acldata, nil
},
DeleteBucketFunc: func(context.Context, *s3.DeleteBucketInput) error {
return nil
},
DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
return nil
},
},
}
@@ -904,6 +960,15 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
wantErr: false,
statusCode: 204,
},
{
name: "Delete-bucket-tagging-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodDelete, "/my-bucket?tagging", nil),
},
wantErr: false,
statusCode: 204,
},
}
for _, tt := range tests {
resp, err := tt.app.Test(tt.args.req)

View File

@@ -38,7 +38,7 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger) fiber.Handler {
if ctx.Method() == http.MethodPatch {
return ctx.Next()
}
if len(pathParts) == 2 && pathParts[1] != "" && ctx.Method() == http.MethodPut && !ctx.Request().URI().QueryArgs().Has("acl") {
if len(pathParts) == 2 && pathParts[1] != "" && ctx.Method() == http.MethodPut && !ctx.Request().URI().QueryArgs().Has("acl") && !ctx.Request().URI().QueryArgs().Has("tagging") {
if err := auth.IsAdmin(acct, isRoot); err != nil {
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
}

151
tests/posix_tests.sh Normal file → Executable file
View File

@@ -1,105 +1,11 @@
#!/usr/bin/env bats
source ./tests/tests.sh
# check if object exists both on S3 and locally
# param: object path
# 0 for yes, 1 for no, 2 for error
object_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "object existence check requires single name parameter"
return 2
fi
object_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if object exists"
return 2
fi
if [[ $exist_result -eq 1 ]]; then
echo "Error: object doesn't exist remotely"
return 1
fi
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: object doesn't exist locally"
return 1
fi
return 0
}
# check if object doesn't exist both on S3 and locally
# param: object path
# return 0 for doesn't exist, 1 for still exists, 2 for error
object_not_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "object non-existence check requires single name parameter"
return 2
fi
object_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if object doesn't exist"
return 2
fi
if [[ $exist_result -eq 0 ]]; then
echo "Error: object exists remotely"
return 1
fi
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: object exists locally"
return 1
fi
return 0
}
# check if a bucket doesn't exist both on S3 and on gateway
# param: bucket name
# return: 0 for doesn't exist, 1 for does, 2 for error
bucket_not_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "bucket existence check requires single name parameter"
return 2
fi
bucket_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if bucket exists"
return 2
fi
if [[ $exist_result -eq 0 ]]; then
echo "Error: bucket exists remotely"
return 1
fi
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: bucket exists locally"
return 1
fi
return 0
}
# check if a bucket exists both on S3 and on gateway
# param: bucket name
# return: 0 for yes, 1 for no, 2 for error
bucket_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "bucket existence check requires single name parameter"
return 2
fi
bucket_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if bucket exists"
return 2
fi
if [[ $exist_result -eq 1 ]]; then
echo "Error: bucket doesn't exist remotely"
return 1
fi
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: bucket doesn't exist locally"
return 1
fi
return 0
}
source ./tests/setup.sh
source ./tests/util.sh
source ./tests/util_posix.sh
# test that changes to local folders and files are reflected on S3
@test test_local_creation_deletion {
@test "test_local_creation_deletion" {
local bucket_name="versity-gwtest-put-object-test"
local object_name="test-object"
@@ -137,3 +43,52 @@ bucket_exists_remote_and_local() {
[[ $bucket_deleted -eq 0 ]] || fail "Failed bucket deletion check"
}
# test head-object command
@test "test_head_object" {
local bucket_name="versity-gwtest-head-object"
local object_name="object-one"
touch "$object_name"
if [ -e "$LOCAL_FOLDER"/$bucket_name/$object_name ]; then
chmod 755 "$LOCAL_FOLDER"/$bucket_name/$object_name
fi
check_and_create_bucket $bucket_name || local created=$?
[[ $created -eq 0 ]] || fail "Error creating bucket"
put_object "$object_name" "$bucket_name"/"$object_name" || local result="$?"
[[ result -eq 0 ]] || fail "Error adding object one"
chmod 000 "$LOCAL_FOLDER"/$bucket_name/$object_name
sleep 1
object_is_accessible $bucket_name $object_name || local accessible=$?
[[ $accessible -eq 1 ]] || fail "Object should be inaccessible"
chmod 755 "$LOCAL_FOLDER"/$bucket_name/$object_name
sleep 1
object_is_accessible $bucket_name $object_name || local accessible_two=$?
[[ $accessible_two -eq 0 ]] || fail "Object should be accessible"
delete_object $bucket_name/$object_name
delete_bucket $bucket_name
}
# check info, accessiblity of bucket
@test "test_get_bucket_info" {
local bucket_name="versity-gwtest-get-bucket-info"
if [ -e "$LOCAL_FOLDER"/$bucket_name ]; then
chmod 755 "$LOCAL_FOLDER"/$bucket_name
sleep 1
else
create_bucket $bucket_name || local created=$?
[[ $created -eq 0 ]] || fail "Error creating bucket"
fi
chmod 000 "$LOCAL_FOLDER"/$bucket_name
sleep 1
bucket_is_accessible $bucket_name || local accessible=$?
[[ $accessible -eq 1 ]] || fail "Bucket should be inaccessible"
chmod 755 "$LOCAL_FOLDER"/$bucket_name
sleep 1
bucket_is_accessible $bucket_name || local accessible_two=$?
[[ $accessible_two -eq 0 ]] || fail "Bucket should be accessible"
delete_bucket $bucket_name
}

View File

@@ -1,221 +1,7 @@
#!/usr/bin/env bats
source ./tests/tests.sh
# create an AWS bucket
# param: bucket name
# return 0 for success, 1 for failure
create_bucket() {
if [ $# -ne 1 ]; then
echo "create bucket missing bucket name"
return 1
fi
local exit_code=0
local error
error=$(aws s3 mb s3://"$1" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error creating bucket: $error"
return 1
fi
return 0
}
# delete an AWS bucket
# param: bucket name
# return 0 for success, 1 for failure
delete_bucket() {
if [ $# -ne 1 ]; then
echo "delete bucket missing bucket name"
return 1
fi
local exit_code=0
local error
error=$(aws s3 rb s3://"$1" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
return 0
else
echo "error deleting bucket: $error"
return 1
fi
fi
return 0
}
# check if bucket exists
# param: bucket name
# return 0 for true, 1 for false, 2 for error
bucket_exists() {
if [ $# -ne 1 ]; then
echo "bucket exists check missing bucket name"
return 2
fi
local exit_code=0
local error
error=$(aws s3 ls s3://"$1" 2>&1) || exit_code="$?"
echo "Exit code: $exit_code, error: $error"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]] || [[ "$error" == *"Access Denied"* ]]; then
return 1
else
echo "error checking if bucket exists: $error"
return 2
fi
fi
return 0
}
# create bucket if it doesn't exist
# param: bucket name
# return 0 for success, 1 for failure
check_and_create_bucket() {
if [ $# -ne 1 ]; then
echo "bucket creation function requires bucket name"
return 1
fi
local exists_result
bucket_exists "$1" || exists_result=$?
if [[ $exists_result -eq 2 ]]; then
echo "Bucket existence check error"
return 1
fi
local create_result
if [[ $exists_result -eq 1 ]]; then
create_bucket "$1" || create_result=$?
if [[ $create_result -ne 0 ]]; then
echo "Error creating bucket"
return 1
fi
fi
return 0
}
# check if object exists on S3 via gateway
# param: object path
# return 0 for true, 1 for false, 2 for error
object_exists() {
if [ $# -ne 1 ]; then
echo "object exists check missing object name"
return 2
fi
local exit_code=0
local error
error=$(aws s3 ls s3://"$1" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == "" ]]; then
return 1
else
echo "error checking if object exists: $error"
return 2
fi
fi
return 0
}
# add object to versitygw
# params: source file, destination copy location
# return 0 for success, 1 for failure
put_object() {
if [ $# -ne 2 ]; then
echo "put object command requires source, destination"
return 1
fi
local exit_code=0
local error
error=$(aws s3 cp "$1" s3://"$2" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error copying object to bucket: $error"
return 1
fi
return 0
}
# add object to versitygw if it doesn't exist
# params: source file, destination copy location
# return 0 for success or already exists, 1 for failure
check_and_put_object() {
if [ $# -ne 2 ]; then
echo "check and put object function requires source, destination"
return 1
fi
object_exists "$2" || local exists_result=$?
if [ $exists_result -eq 2 ]; then
echo "error checking if object exists"
return 1
fi
if [ $exists_result -eq 1 ]; then
put_object "$1" "$2" || local put_result=$?
if [ $put_result -ne 0 ]; then
echo "error adding object"
return 1
fi
fi
return 0
}
# delete object from versitygw
# param: object location
# return 0 for success, 1 for failure
delete_object() {
if [ $# -ne 1 ]; then
echo "delete object command requires object parameter"
return 1
fi
local exit_code=0
local error
error=$(aws s3 rm s3://"$1" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error deleting object: $error"
return 1
fi
return 0
}
# list buckets on versitygw
# no params
# export bucket_array (bucket names) on success, return 1 for failure
list_buckets() {
local exit_code=0
local output
output=$(aws s3 ls 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error listing buckets: $output"
return 1
fi
bucket_array=()
while IFS= read -r line; do
bucket_name=$(echo "$line" | awk '{print $NF}')
bucket_array+=("$bucket_name")
done <<< "$output"
export bucket_array
}
# list objects on versitygw, in bucket or folder
# param: path of bucket or folder
# export object_array (object names) on success, return 1 for failure
list_objects() {
if [ $# -ne 1 ]; then
echo "list objects command requires bucket or folder"
return 1
fi
local exit_code=0
local output
output=$(aws s3 ls s3://"$1" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error listing objects: $output"
return 1
fi
object_array=()
while IFS= read -r line; do
object_name=$(echo "$line" | awk '{print $NF}')
object_array+=("$object_name")
done <<< "$output"
export object_array
}
source ./tests/setup.sh
source ./tests/util.sh
# test creation and deletion of bucket on versitygw
@test "create_delete_bucket_test" {
@@ -320,7 +106,7 @@ list_objects() {
}
# test listing a bucket's objects on versitygw
@test test_list_objects {
@test "test_list_objects" {
bucket_name="versity-gwtest-list-object"
object_one="test-file-one"
@@ -351,3 +137,48 @@ list_objects() {
delete_bucket $bucket_name
rm $object_one $object_two
}
# test ability to retrieve bucket ACLs
@test "test_get_bucket_acl" {
local bucket_name="versity-gwtest-get-bucket-acl"
check_and_create_bucket $bucket_name || local created=$?
[[ $created -eq 0 ]] || fail "Error creating bucket"
get_bucket_acl $bucket_name || local result=$?
[[ $result -eq 0 ]] || fail "Error retrieving acl"
id=$(echo "$acl" | jq '.Owner.ID')
[[ $id == '"'"$AWS_ACCESS_KEY_ID"'"' ]] || fail "Acl mismatch"
delete_bucket $bucket_name
}
# test ability to delete multiple objects from bucket
@test "test_delete_objects" {
local bucket_name="versity-gwtest-delete-objects"
local object_one="test-file-one"
local object_two="test-file-two"
touch "$object_one" "$object_two"
check_and_create_bucket $bucket_name || local result_one=$?
[[ $result_one -eq 0 ]] || fail "Error creating bucket"
put_object "$object_one" "$bucket_name"/"$object_one" || local result_two=$?
[[ $result_two -eq 0 ]] || fail "Error adding object one"
put_object "$object_two" "$bucket_name"/"$object_two" || local result_three=$?
[[ $result_three -eq 0 ]] || fail "Error adding object two"
error=$(aws s3api delete-objects --bucket $bucket_name --delete '{
"Objects": [
{"Key": "test-file-one"},
{"Key": "test-file-two"}
]
}') || local result=$?
[[ $result -eq 0 ]] || fail "Error deleting objects: $error"
object_exists "$bucket_name"/"$object_one" || local exists_one=$?
[[ $exists_one -eq 1 ]] || fail "Object one not deleted"
object_exists "$bucket_name"/"$object_two" || local exists_two=$?
[[ $exists_two -eq 1 ]] || fail "Object two not deleted"
delete_bucket $bucket_name
rm "$object_one" "$object_two"
}

273
tests/util.sh Normal file
View File

@@ -0,0 +1,273 @@
# create an AWS bucket
# param: bucket name
# return 0 for success, 1 for failure
create_bucket() {
if [ $# -ne 1 ]; then
echo "create bucket missing bucket name"
return 1
fi
local exit_code=0
local error
error=$(aws s3 mb s3://"$1" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error creating bucket: $error"
return 1
fi
return 0
}
# delete an AWS bucket
# param: bucket name
# return 0 for success, 1 for failure
delete_bucket() {
if [ $# -ne 1 ]; then
echo "delete bucket missing bucket name"
return 1
fi
local exit_code=0
local error
error=$(aws s3 rb s3://"$1" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
return 0
else
echo "error deleting bucket: $error"
return 1
fi
fi
return 0
}
# check if bucket exists
# param: bucket name
# return 0 for true, 1 for false, 2 for error
bucket_exists() {
if [ $# -ne 1 ]; then
echo "bucket exists check missing bucket name"
return 2
fi
local exit_code=0
local error
error=$(aws s3 ls s3://"$1" 2>&1) || exit_code="$?"
echo "Exit code: $exit_code, error: $error"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]] || [[ "$error" == *"Access Denied"* ]]; then
return 1
else
echo "error checking if bucket exists: $error"
return 2
fi
fi
return 0
}
# create bucket if it doesn't exist
# param: bucket name
# return 0 for success, 1 for failure
check_and_create_bucket() {
if [ $# -ne 1 ]; then
echo "bucket creation function requires bucket name"
return 1
fi
local exists_result
bucket_exists "$1" || exists_result=$?
if [[ $exists_result -eq 2 ]]; then
echo "Bucket existence check error"
return 1
fi
local create_result
if [[ $exists_result -eq 1 ]]; then
create_bucket "$1" || create_result=$?
if [[ $create_result -ne 0 ]]; then
echo "Error creating bucket"
return 1
fi
fi
return 0
}
# check if object exists on S3 via gateway
# param: object path
# return 0 for true, 1 for false, 2 for error
object_exists() {
if [ $# -ne 1 ]; then
echo "object exists check missing object name"
return 2
fi
local exit_code=0
local error
error=$(aws s3 ls s3://"$1" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == "" ]]; then
return 1
else
echo "error checking if object exists: $error"
return 2
fi
fi
return 0
}
# add object to versitygw
# params: source file, destination copy location
# return 0 for success, 1 for failure
put_object() {
if [ $# -ne 2 ]; then
echo "put object command requires source, destination"
return 1
fi
local exit_code=0
local error
error=$(aws s3 cp "$1" s3://"$2" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error copying object to bucket: $error"
return 1
fi
return 0
}
# add object to versitygw if it doesn't exist
# params: source file, destination copy location
# return 0 for success or already exists, 1 for failure
check_and_put_object() {
if [ $# -ne 2 ]; then
echo "check and put object function requires source, destination"
return 1
fi
object_exists "$2" || local exists_result=$?
if [ $exists_result -eq 2 ]; then
echo "error checking if object exists"
return 1
fi
if [ $exists_result -eq 1 ]; then
put_object "$1" "$2" || local put_result=$?
if [ $put_result -ne 0 ]; then
echo "error adding object"
return 1
fi
fi
return 0
}
# delete object from versitygw
# param: object location
# return 0 for success, 1 for failure
delete_object() {
if [ $# -ne 1 ]; then
echo "delete object command requires object parameter"
return 1
fi
local exit_code=0
local error
error=$(aws s3 rm s3://"$1" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error deleting object: $error"
return 1
fi
return 0
}
# list buckets on versitygw
# no params
# export bucket_array (bucket names) on success, return 1 for failure
list_buckets() {
local exit_code=0
local output
output=$(aws s3 ls 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error listing buckets: $output"
return 1
fi
bucket_array=()
while IFS= read -r line; do
bucket_name=$(echo "$line" | awk '{print $NF}')
bucket_array+=("$bucket_name")
done <<< "$output"
export bucket_array
}
# list objects on versitygw, in bucket or folder
# param: path of bucket or folder
# export object_array (object names) on success, return 1 for failure
list_objects() {
if [ $# -ne 1 ]; then
echo "list objects command requires bucket or folder"
return 1
fi
local exit_code=0
local output
output=$(aws s3 ls s3://"$1" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error listing objects: $output"
return 1
fi
object_array=()
while IFS= read -r line; do
object_name=$(echo "$line" | awk '{print $NF}')
object_array+=("$object_name")
done <<< "$output"
export object_array
}
# check if bucket info can be retrieved
# param: path of bucket or folder
# return 0 for yes, 1 for no, 2 for error
bucket_is_accessible() {
if [ $# -ne 1 ]; then
echo "bucket accessibility check missing bucket name"
return 2
fi
local exit_code=0
local error
error=$(aws s3api head-bucket --bucket "$1" 2>&1) || exit_code="$?"
if [ $exit_code -eq 0 ]; then
return 0
fi
if [[ "$error" == *"500"* ]]; then
return 1
fi
echo "Error checking bucket accessibility: $error"
return 2
}
# check if object info (etag) is accessible
# param: path of object
# return 0 for yes, 1 for no, 2 for error
object_is_accessible() {
if [ $# -ne 2 ]; then
echo "object accessibility check missing bucket and/or key"
return 2
fi
local exit_code=0
object_data=$(aws s3api head-object --bucket "$1" --key "$2" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
echo "Error obtaining object data: $object_data"
return 2
fi
etag=$(echo "$object_data" | jq '.ETag')
if [[ "$etag" == '""' ]]; then
return 1
fi
return 0
}
# get bucket acl
# param: bucket path
# export acl for success, return 1 for error
get_bucket_acl() {
if [ $# -ne 1 ]; then
echo "bucket ACL command missing bucket name"
return 1
fi
local exit_code=0
acl=$(aws s3api get-bucket-acl --bucket "$1" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
echo "Error: $acl"
return 1
fi
export acl
}

97
tests/util_posix.sh Normal file
View File

@@ -0,0 +1,97 @@
# check if object exists both on S3 and locally
# param: object path
# 0 for yes, 1 for no, 2 for error
object_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "object existence check requires single name parameter"
return 2
fi
object_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if object exists"
return 2
fi
if [[ $exist_result -eq 1 ]]; then
echo "Error: object doesn't exist remotely"
return 1
fi
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: object doesn't exist locally"
return 1
fi
return 0
}
# check if object doesn't exist both on S3 and locally
# param: object path
# return 0 for doesn't exist, 1 for still exists, 2 for error
object_not_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "object non-existence check requires single name parameter"
return 2
fi
object_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if object doesn't exist"
return 2
fi
if [[ $exist_result -eq 0 ]]; then
echo "Error: object exists remotely"
return 1
fi
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: object exists locally"
return 1
fi
return 0
}
# check if a bucket doesn't exist both on S3 and on gateway
# param: bucket name
# return: 0 for doesn't exist, 1 for does, 2 for error
bucket_not_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "bucket existence check requires single name parameter"
return 2
fi
bucket_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if bucket exists"
return 2
fi
if [[ $exist_result -eq 0 ]]; then
echo "Error: bucket exists remotely"
return 1
fi
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: bucket exists locally"
return 1
fi
return 0
}
# check if a bucket exists both on S3 and on gateway
# param: bucket name
# return: 0 for yes, 1 for no, 2 for error
bucket_exists_remote_and_local() {
if [ $# -ne 1 ]; then
echo "bucket existence check requires single name parameter"
return 2
fi
bucket_exists "$1" || local exist_result=$?
if [[ $exist_result -eq 2 ]]; then
echo "Error checking if bucket exists"
return 2
fi
if [[ $exist_result -eq 1 ]]; then
echo "Error: bucket doesn't exist remotely"
return 1
fi
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
echo "Error: bucket doesn't exist locally"
return 1
fi
return 0
}