feat: Implements object meta properties for CopyObject in azure and posix backends.

Fixes #998
Closes #1125
Closes #1126
Closes #1127

Implements objects meta properties(Content-Disposition, Content-Language, Content-Encoding, Cache-Control, Expires) and tagging besed on the directives(metadata, tagging) in CopyObject in posix and azure backends. The properties/tagging should be coppied from the source object if "COPY" directive is provided and it should be replaced otherwise.

Changes the object copy principle in azure: instead of using the `CopyFromURL` method from azure sdk, it first loads the object then creates one, to be able to compare and store the meta properties.
This commit is contained in:
niksis02
2025-03-17 19:20:11 +04:00
committed by Ben McClelland
parent 38768a92b8
commit cfb2d6d87d
13 changed files with 819 additions and 61 deletions

View File

@@ -457,7 +457,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
ExpiresString: blobDownloadResponse.Metadata[string(keyExpires)],
ETag: (*string)(blobDownloadResponse.ETag),
LastModified: blobDownloadResponse.LastModified,
Metadata: parseAzMetadata(blobDownloadResponse.Metadata),
Metadata: parseAndFilterAzMetadata(blobDownloadResponse.Metadata),
TagCount: &tagcount,
ContentRange: blobDownloadResponse.ContentRange,
Body: blobDownloadResponse.Body,
@@ -519,7 +519,7 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
ExpiresString: resp.Metadata[string(keyExpires)],
ETag: (*string)(resp.ETag),
LastModified: resp.LastModified,
Metadata: parseAzMetadata(resp.Metadata),
Metadata: parseAndFilterAzMetadata(resp.Metadata),
StorageClass: types.StorageClassStandard,
}
@@ -767,41 +767,158 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput
}, nil
}
func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
bclient, err := az.getBlobClient(*input.Bucket, *input.Key)
func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
dstClient, err := az.getBlobClient(*input.Bucket, *input.Key)
if err != nil {
return nil, err
}
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource {
props, err := bclient.GetProperties(ctx, nil)
if input.MetadataDirective != types.MetadataDirectiveReplace {
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
}
// Set object meta http headers
res, err := dstClient.SetHTTPHeaders(ctx, blob.HTTPHeaders{
BlobCacheControl: input.CacheControl,
BlobContentDisposition: input.ContentDisposition,
BlobContentEncoding: input.ContentEncoding,
BlobContentLanguage: input.ContentLanguage,
BlobContentType: input.ContentType,
}, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
mdmap := props.Metadata
if isMetaSame(mdmap, input.Metadata) {
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
meta := input.Metadata
if meta == nil {
meta = make(map[string]string)
}
// Embed "Expires" in object metadata
if getString(input.Expires) != "" {
meta[string(keyExpires)] = *input.Expires
}
// Set object metadata
_, err = dstClient.SetMetadata(ctx, parseMetadata(meta), nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
// Set object legal hold
if input.ObjectLockLegalHoldStatus != "" {
err = az.PutObjectLegalHold(ctx, *input.Bucket, *input.Key, "", input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
// Set object retention
if input.ObjectLockMode != "" && input.ObjectLockRetainUntilDate != nil {
retention := s3response.PutObjectRetentionInput{
Mode: types.ObjectLockRetentionMode(input.ObjectLockMode),
RetainUntilDate: s3response.AmzDate{
Time: *input.ObjectLockRetainUntilDate,
},
}
retParsed, err := json.Marshal(retention)
if err != nil {
return nil, fmt.Errorf("parse object retention: %w", err)
}
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", true, retParsed)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
// Set object Tagging, if tagging directive is "REPLACE"
if input.TaggingDirective == types.TaggingDirectiveReplace {
tags, err := parseTags(input.Tagging)
if err != nil {
return nil, err
}
_, err = dstClient.SetTags(ctx, tags, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
LastModified: res.LastModified,
ETag: (*string)(res.ETag),
},
}, nil
}
tags, err := parseTags(input.Tagging)
srcBucket, srcObj, _, err := backend.ParseCopySource(*input.CopySource)
if err != nil {
return nil, err
}
resp, err := bclient.CopyFromURL(ctx, az.serviceURL+"/"+*input.CopySource, &blob.CopyFromURLOptions{
BlobTags: tags,
Metadata: parseMetadata(input.Metadata),
})
// Get the source object
downloadResp, err := az.client.DownloadStream(ctx, srcBucket, srcObj, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
pInput := s3response.PutObjectInput{
Body: downloadResp.Body,
Bucket: input.Bucket,
Key: input.Key,
ContentLength: downloadResp.ContentLength,
ContentType: input.ContentType,
ContentEncoding: input.ContentEncoding,
ContentDisposition: input.ContentDisposition,
ContentLanguage: input.ContentLanguage,
CacheControl: input.CacheControl,
Expires: input.Expires,
Metadata: input.Metadata,
ObjectLockRetainUntilDate: input.ObjectLockRetainUntilDate,
ObjectLockMode: input.ObjectLockMode,
ObjectLockLegalHoldStatus: input.ObjectLockLegalHoldStatus,
}
if input.MetadataDirective == types.MetadataDirectiveCopy {
// Expires is in downloadResp.Metadata
pInput.Expires = nil
pInput.CacheControl = downloadResp.CacheControl
pInput.ContentDisposition = downloadResp.ContentDisposition
pInput.ContentEncoding = downloadResp.ContentEncoding
pInput.ContentLanguage = downloadResp.ContentLanguage
pInput.ContentType = downloadResp.ContentType
pInput.Metadata = parseAzMetadata(downloadResp.Metadata)
}
if input.TaggingDirective == types.TaggingDirectiveReplace {
pInput.Tagging = input.Tagging
}
// Create the destination object
resp, err := az.PutObject(ctx, pInput)
if err != nil {
return nil, err
}
// Copy the object tagging, if tagging directive is "COPY"
if input.TaggingDirective == types.TaggingDirectiveCopy {
srcClient, err := az.getBlobClient(srcBucket, srcObj)
if err != nil {
return nil, err
}
res, err := srcClient.GetTags(ctx, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
_, err = dstClient.SetTags(ctx, parseAzTags(res.BlobTagSet), nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
ETag: (*string)(resp.ETag),
LastModified: resp.LastModified,
ETag: &resp.ETag,
},
}, nil
}
@@ -1629,7 +1746,7 @@ func parseMetadata(m map[string]string) map[string]*string {
return meta
}
func parseAzMetadata(m map[string]*string) map[string]string {
func parseAndFilterAzMetadata(m map[string]*string) map[string]string {
if m == nil {
return nil
}
@@ -1648,6 +1765,19 @@ func parseAzMetadata(m map[string]*string) map[string]string {
return meta
}
func parseAzMetadata(m map[string]*string) map[string]string {
if m == nil {
return nil
}
meta := make(map[string]string)
for k, v := range m {
meta[k] = *v
}
return meta
}
func parseTags(tagstr *string) (map[string]string, error) {
tagsStr := getString(tagstr)
tags := make(map[string]string)
@@ -1830,24 +1960,6 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
return &acl, nil
}
func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool {
if len(azMeta) != len(awsMeta) {
return false
}
for key, val := range azMeta {
if key == string(keyAclCapital) || key == string(keyAclLower) {
continue
}
awsVal, ok := awsMeta[key]
if !ok || awsVal != *val {
return false
}
}
return true
}
func createMetaTmpPath(obj, uploadId string) string {
objNameSum := sha256.Sum256([]byte(obj))
return filepath.Join(string(metaTmpMultipartPrefix), uploadId, fmt.Sprintf("%x", objNameSum))

View File

@@ -62,7 +62,7 @@ type Backend interface {
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
@@ -188,7 +188,7 @@ func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {

View File

@@ -211,6 +211,30 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
return srcBucket, srcObject, versionId, nil
}
// ParseObjectTags parses the url encoded input string into
// map[string]string key-value tag set
func ParseObjectTags(t string) (map[string]string, error) {
tagging := make(map[string]string)
if t == "" {
return tagging, nil
}
tagParts := strings.Split(t, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
if len(p[0]) > 128 || len(p[1]) > 256 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
tagging[p[0]] = p[1]
}
return tagging, nil
}
func GetMultipartMD5(parts []types.CompletedPart) string {
var partsEtagBytes []byte
for _, part := range parts {

View File

@@ -3819,7 +3819,7 @@ func (p *Posix) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttr
}, nil
}
func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if input.Bucket == nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
@@ -3925,6 +3925,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
return &s3.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
}
// Delete the object metadata
for k := range mdmap {
err := p.meta.DeleteAttribute(dstBucket, dstObject,
fmt.Sprintf("%v.%v", metaHdr, k))
@@ -3932,6 +3933,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
return nil, fmt.Errorf("delete user metadata: %w", err)
}
}
// Store the new metadata
for k, v := range input.Metadata {
err := p.meta.StoreAttribute(nil, dstBucket, dstObject,
fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
@@ -4006,6 +4008,32 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
version = backend.GetPtrFromString(string(vId))
// Store the provided object meta properties
err = p.storeObjectMetadata(nil, dstBucket, dstObject,
objectMetadata{
ContentType: input.ContentType,
ContentEncoding: input.ContentEncoding,
ContentLanguage: input.ContentLanguage,
ContentDisposition: input.ContentDisposition,
CacheControl: input.CacheControl,
Expires: input.Expires,
})
if err != nil {
return nil, err
}
if input.TaggingDirective == types.TaggingDirectiveReplace {
tags, err := backend.ParseObjectTags(getString(input.Tagging))
if err != nil {
return nil, err
}
err = p.PutObjectTagging(ctx, dstBucket, dstObject, tags)
if err != nil {
return nil, err
}
}
} else {
contentLength := fi.Size()
@@ -4020,18 +4048,61 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
checksums.Algorithm = input.ChecksumAlgorithm
}
res, err := p.PutObject(ctx,
s3response.PutObjectInput{
Bucket: &dstBucket,
Key: &dstObject,
Body: f,
ContentLength: &contentLength,
Metadata: input.Metadata,
ChecksumAlgorithm: checksums.Algorithm,
})
putObjectInput := s3response.PutObjectInput{
Bucket: &dstBucket,
Key: &dstObject,
Body: f,
ContentLength: &contentLength,
ChecksumAlgorithm: checksums.Algorithm,
ContentType: input.ContentType,
ContentEncoding: input.ContentEncoding,
ContentDisposition: input.ContentDisposition,
ContentLanguage: input.ContentLanguage,
CacheControl: input.CacheControl,
Expires: input.Expires,
Metadata: input.Metadata,
ObjectLockRetainUntilDate: input.ObjectLockRetainUntilDate,
ObjectLockMode: input.ObjectLockMode,
ObjectLockLegalHoldStatus: input.ObjectLockLegalHoldStatus,
}
// load and pass the source object meta properties, if metadata directive is "COPY"
if input.MetadataDirective != types.MetadataDirectiveReplace {
metaProps := p.loadObjectMetaData(srcBucket, srcObject, &fi, nil)
putObjectInput.ContentEncoding = metaProps.ContentEncoding
putObjectInput.ContentDisposition = metaProps.ContentDisposition
putObjectInput.ContentLanguage = metaProps.ContentLanguage
putObjectInput.ContentType = metaProps.ContentType
putObjectInput.CacheControl = metaProps.CacheControl
putObjectInput.Expires = metaProps.Expires
putObjectInput.Metadata = mdmap
}
// pass the input tagging to PutObject, if tagging directive is "REPLACE"
if input.TaggingDirective == types.TaggingDirectiveReplace {
putObjectInput.Tagging = input.Tagging
}
res, err := p.PutObject(ctx, putObjectInput)
if err != nil {
return nil, err
}
// copy the source object tagging after the destination object
// creation, if tagging directive is "COPY"
if input.TaggingDirective == types.TaggingDirectiveCopy {
tagging, err := p.meta.RetrieveAttribute(nil, srcBucket, srcObject, tagHdr)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get source object tagging: %w", err)
}
if err == nil {
err := p.meta.StoreAttribute(nil, dstBucket, dstObject, tagHdr, tagging)
if err != nil {
return nil, fmt.Errorf("set destination object tagging: %w", err)
}
}
}
etag = res.ETag
version = &res.VersionID
crc32 = res.ChecksumCRC32

View File

@@ -988,7 +988,7 @@ func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAt
}, handleError(err)
}
func (s *S3Proxy) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if input.CacheControl != nil && *input.CacheControl == "" {
input.CacheControl = nil
}
@@ -1031,7 +1031,7 @@ func (s *S3Proxy) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s
if input.ExpectedSourceBucketOwner != nil && *input.ExpectedSourceBucketOwner == "" {
input.ExpectedSourceBucketOwner = nil
}
if input.Expires != nil && *input.Expires == defTime {
if input.Expires != nil && *input.Expires == "" {
input.Expires = nil
}
if input.GrantFullControl != nil && *input.GrantFullControl == "" {
@@ -1071,7 +1071,58 @@ func (s *S3Proxy) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s
input.WebsiteRedirectLocation = nil
}
out, err := s.client.CopyObject(ctx, input)
var expires *time.Time
if input.Expires != nil {
exp, err := time.Parse(time.RFC1123, *input.Expires)
if err == nil {
expires = &exp
}
}
out, err := s.client.CopyObject(ctx,
&s3.CopyObjectInput{
Metadata: input.Metadata,
Bucket: input.Bucket,
CopySource: input.CopySource,
Key: input.Key,
CacheControl: input.CacheControl,
ContentDisposition: input.ContentDisposition,
ContentEncoding: input.ContentEncoding,
ContentLanguage: input.ContentLanguage,
ContentType: input.ContentType,
CopySourceIfMatch: input.CopySourceIfMatch,
CopySourceIfNoneMatch: input.CopySourceIfNoneMatch,
CopySourceSSECustomerAlgorithm: input.CopySourceSSECustomerAlgorithm,
CopySourceSSECustomerKey: input.CopySourceSSECustomerKey,
CopySourceSSECustomerKeyMD5: input.CopySourceSSECustomerKeyMD5,
ExpectedBucketOwner: input.ExpectedBucketOwner,
ExpectedSourceBucketOwner: input.ExpectedSourceBucketOwner,
Expires: expires,
GrantFullControl: input.GrantFullControl,
GrantRead: input.GrantRead,
GrantReadACP: input.GrantReadACP,
GrantWriteACP: input.GrantWriteACP,
SSECustomerAlgorithm: input.SSECustomerAlgorithm,
SSECustomerKey: input.SSECustomerKey,
SSECustomerKeyMD5: input.SSECustomerKeyMD5,
SSEKMSEncryptionContext: input.SSEKMSEncryptionContext,
SSEKMSKeyId: input.SSEKMSKeyId,
Tagging: input.Tagging,
WebsiteRedirectLocation: input.WebsiteRedirectLocation,
CopySourceIfModifiedSince: input.CopySourceIfModifiedSince,
CopySourceIfUnmodifiedSince: input.CopySourceIfUnmodifiedSince,
ObjectLockRetainUntilDate: input.ObjectLockRetainUntilDate,
BucketKeyEnabled: input.BucketKeyEnabled,
ACL: input.ACL,
ChecksumAlgorithm: input.ChecksumAlgorithm,
MetadataDirective: input.MetadataDirective,
ObjectLockLegalHoldStatus: input.ObjectLockLegalHoldStatus,
ObjectLockMode: input.ObjectLockMode,
RequestPayer: input.RequestPayer,
ServerSideEncryption: input.ServerSideEncryption,
StorageClass: input.StorageClass,
TaggingDirective: input.TaggingDirective,
})
return out, handleError(err)
}

View File

@@ -32,7 +32,7 @@ var _ backend.Backend = &BackendMock{}
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
// panic("mock out the CompleteMultipartUpload method")
// },
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
// panic("mock out the CopyObject method")
// },
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
@@ -193,7 +193,7 @@ type BackendMock struct {
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
// CopyObjectFunc mocks the CopyObject method.
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
// CreateBucketFunc mocks the CreateBucket method.
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error
@@ -366,7 +366,7 @@ type BackendMock struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// CopyObjectInput is the copyObjectInput argument value.
CopyObjectInput *s3.CopyObjectInput
CopyObjectInput s3response.CopyObjectInput
}
// CreateBucket holds details about calls to the CreateBucket method.
CreateBucket []struct {
@@ -898,13 +898,13 @@ func (mock *BackendMock) CompleteMultipartUploadCalls() []struct {
}
// CopyObject calls CopyObjectFunc.
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if mock.CopyObjectFunc == nil {
panic("BackendMock.CopyObjectFunc: method is nil but Backend.CopyObject was just called")
}
callInfo := struct {
ContextMoqParam context.Context
CopyObjectInput *s3.CopyObjectInput
CopyObjectInput s3response.CopyObjectInput
}{
ContextMoqParam: contextMoqParam,
CopyObjectInput: copyObjectInput,
@@ -921,11 +921,11 @@ func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectI
// len(mockedBackend.CopyObjectCalls())
func (mock *BackendMock) CopyObjectCalls() []struct {
ContextMoqParam context.Context
CopyObjectInput *s3.CopyObjectInput
CopyObjectInput s3response.CopyObjectInput
} {
var calls []struct {
ContextMoqParam context.Context
CopyObjectInput *s3.CopyObjectInput
CopyObjectInput s3response.CopyObjectInput
}
mock.lockCopyObject.RLock()
calls = mock.calls.CopyObject

View File

@@ -1731,6 +1731,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
contentDisposition := ctx.Get("Content-Disposition")
contentLanguage := ctx.Get("Content-Language")
cacheControl := ctx.Get("Cache-Control")
expires := ctx.Get("Expires")
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
tagging := ctx.Get("x-amz-tagging")
@@ -2387,6 +2388,23 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
metaDirective = types.MetadataDirectiveReplace
}
tDirective := types.TaggingDirective(ctx.Get("X-Amz-Tagging-Directive"))
if tDirective != "" && tDirective != types.TaggingDirectiveCopy && tDirective != types.TaggingDirectiveReplace {
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrInvalidTaggingDirective),
&MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
Action: metrics.ActionCopyObject,
BucketOwner: parsedAcl.Owner,
})
}
taggingDirective := types.TaggingDirectiveCopy
if tDirective == types.TaggingDirectiveReplace {
taggingDirective = types.TaggingDirectiveReplace
}
checksumAlgorithm := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
err = utils.IsChecksumAlgorithmValid(checksumAlgorithm)
if err != nil {
@@ -2402,10 +2420,29 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
objLock, err := utils.ParsObjectLockHdrs(ctx)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
Action: metrics.ActionPutObject,
BucketOwner: parsedAcl.Owner,
})
}
res, err := c.be.CopyObject(ctx.Context(),
&s3.CopyObjectInput{
s3response.CopyObjectInput{
Bucket: &bucket,
Key: &keyStart,
ContentType: &contentType,
ContentDisposition: &contentDisposition,
ContentEncoding: &contentEncoding,
ContentLanguage: &contentLanguage,
CacheControl: &cacheControl,
Expires: &expires,
Tagging: &tagging,
TaggingDirective: taggingDirective,
CopySource: &copySource,
CopySourceIfMatch: &copySrcIfMatch,
CopySourceIfNoneMatch: &copySrcIfNoneMatch,
@@ -2416,6 +2453,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
MetadataDirective: metaDirective,
StorageClass: types.StorageClass(storageClass),
ChecksumAlgorithm: checksumAlgorithm,
ObjectLockRetainUntilDate: &objLock.RetainUntilDate,
ObjectLockLegalHoldStatus: objLock.LegalHoldStatus,
ObjectLockMode: objLock.ObjectLockMode,
})
if err == nil {
hdrs := []utils.CustomHeader{}
@@ -2526,8 +2566,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
expires := ctx.Get("Expires")
var body io.Reader
bodyi := ctx.Locals("body-reader")
if bodyi != nil {

View File

@@ -975,7 +975,7 @@ func TestS3ApiController_PutActions(t *testing.T) {
PutObjectAclFunc: func(context.Context, *s3.PutObjectAclInput) error {
return nil
},
CopyObjectFunc: func(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{},
}, nil

View File

@@ -144,6 +144,7 @@ const (
ErrUnexpectedContent
ErrMissingSecurityHeader
ErrInvalidMetadataDirective
ErrInvalidTaggingDirective
ErrKeyTooLong
ErrInvalidVersionId
ErrNoSuchVersion
@@ -597,6 +598,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Unknown metadata directive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTaggingDirective: {
Code: "InvalidArgument",
Description: "Unknown tagging directive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidVersionId: {
Code: "InvalidArgument",
Description: "Invalid version id specified",

View File

@@ -524,6 +524,53 @@ type CreateMultipartUploadInput struct {
StorageClass types.StorageClass
}
type CopyObjectInput struct {
Metadata map[string]string
Bucket *string
CopySource *string
Key *string
CacheControl *string
ContentDisposition *string
ContentEncoding *string
ContentLanguage *string
ContentType *string
CopySourceIfMatch *string
CopySourceIfNoneMatch *string
CopySourceSSECustomerAlgorithm *string
CopySourceSSECustomerKey *string
CopySourceSSECustomerKeyMD5 *string
ExpectedBucketOwner *string
ExpectedSourceBucketOwner *string
Expires *string
GrantFullControl *string
GrantRead *string
GrantReadACP *string
GrantWriteACP *string
SSECustomerAlgorithm *string
SSECustomerKey *string
SSECustomerKeyMD5 *string
SSEKMSEncryptionContext *string
SSEKMSKeyId *string
Tagging *string
WebsiteRedirectLocation *string
CopySourceIfModifiedSince *time.Time
CopySourceIfUnmodifiedSince *time.Time
ObjectLockRetainUntilDate *time.Time
BucketKeyEnabled *bool
ACL types.ObjectCannedACL
ChecksumAlgorithm types.ChecksumAlgorithm
MetadataDirective types.MetadataDirective
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
ObjectLockMode types.ObjectLockMode
RequestPayer types.RequestPayer
ServerSideEncryption types.ServerSideEncryption
StorageClass types.StorageClass
TaggingDirective types.TaggingDirective
}
type AmzDate struct {
time.Time
}

View File

@@ -266,9 +266,15 @@ func TestCopyObject(s *S3Conf) {
CopyObject_not_owned_source_bucket(s)
CopyObject_copy_to_itself(s)
CopyObject_copy_to_itself_invalid_directive(s)
CopyObject_should_copy_tagging(s)
CopyObject_invalid_tagging_directive(s)
CopyObject_to_itself_with_new_metadata(s)
CopyObject_CopySource_starting_with_slash(s)
CopyObject_non_existing_dir_object(s)
CopyObject_should_copy_meta_props(s)
CopyObject_should_replace_meta_props(s)
CopyObject_with_legal_hold(s)
CopyObject_with_retention_lock(s)
//TODO: remove the condition after implementing checksums in azure
if !s.azureTests {
CopyObject_invalid_checksum_algorithm(s)
@@ -909,9 +915,15 @@ func GetIntTests() IntTests {
"CopyObject_not_owned_source_bucket": CopyObject_not_owned_source_bucket,
"CopyObject_copy_to_itself": CopyObject_copy_to_itself,
"CopyObject_copy_to_itself_invalid_directive": CopyObject_copy_to_itself_invalid_directive,
"CopyObject_should_copy_tagging": CopyObject_should_copy_tagging,
"CopyObject_invalid_tagging_directive": CopyObject_invalid_tagging_directive,
"CopyObject_to_itself_with_new_metadata": CopyObject_to_itself_with_new_metadata,
"CopyObject_CopySource_starting_with_slash": CopyObject_CopySource_starting_with_slash,
"CopyObject_non_existing_dir_object": CopyObject_non_existing_dir_object,
"CopyObject_should_copy_meta_props": CopyObject_should_copy_meta_props,
"CopyObject_should_replace_meta_props": CopyObject_should_replace_meta_props,
"CopyObject_with_legal_hold": CopyObject_with_legal_hold,
"CopyObject_with_retention_lock": CopyObject_with_retention_lock,
"CopyObject_invalid_checksum_algorithm": CopyObject_invalid_checksum_algorithm,
"CopyObject_create_checksum_on_copy": CopyObject_create_checksum_on_copy,
"CopyObject_should_copy_the_existing_checksum": CopyObject_should_copy_the_existing_checksum,

View File

@@ -5880,6 +5880,131 @@ func CopyObject_copy_to_itself_invalid_directive(s *S3Conf) error {
})
}
func CopyObject_invalid_tagging_directive(s *S3Conf) error {
testName := "CopyObject_invalid_tagging_directive"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
obj := "my-obj"
_, err := putObjects(s3client, []string{obj}, bucket)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: &bucket,
Key: &obj,
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, obj)),
TaggingDirective: types.TaggingDirective("invalid"),
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidTaggingDirective)); err != nil {
return err
}
return nil
})
}
func CopyObject_should_copy_tagging(s *S3Conf) error {
testName := "CopyObject_should_copy_tagging"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
srcObj, dstObj := "source-object", "dest-object"
tagging := "foo=bar&baz=quxx"
_, err := putObjectWithData(100, &s3.PutObjectInput{
Bucket: &bucket,
Key: &srcObj,
Tagging: &tagging,
}, s3client)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: &bucket,
Key: &dstObj,
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, srcObj)),
})
cancel()
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
res, err := s3client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
Bucket: &bucket,
Key: &dstObj,
})
cancel()
if err != nil {
return err
}
expectedTagSet := []types.Tag{
{Key: getPtr("foo"), Value: getPtr("bar")},
{Key: getPtr("baz"), Value: getPtr("quxx")},
}
if !areTagsSame(res.TagSet, expectedTagSet) {
return fmt.Errorf("expected the tag set to be %v, instead got %v", expectedTagSet, res.TagSet)
}
return nil
})
}
func CopyObject_should_reaplace_tagging(s *S3Conf) error {
testName := "CopyObject_should_reaplace_tagging"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
srcObj, dstObj := "source-object", "dest-object"
tagging := "foo=bar&baz=quxx"
_, err := putObjectWithData(100, &s3.PutObjectInput{
Bucket: &bucket,
Key: &srcObj,
Tagging: &tagging,
}, s3client)
if err != nil {
return err
}
copyTagging := "key1=val1&key2=val2"
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: &bucket,
Key: &dstObj,
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, srcObj)),
TaggingDirective: types.TaggingDirectiveReplace,
Tagging: &copyTagging,
})
cancel()
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
res, err := s3client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
Bucket: &bucket,
Key: &dstObj,
})
cancel()
if err != nil {
return err
}
expectedTagSet := []types.Tag{
{Key: getPtr("key1"), Value: getPtr("val1")},
{Key: getPtr("key2"), Value: getPtr("val2")},
}
if !areTagsSame(res.TagSet, expectedTagSet) {
return fmt.Errorf("expected the tag set to be %v, instead got %v", expectedTagSet, res.TagSet)
}
return nil
})
}
func CopyObject_to_itself_with_new_metadata(s *S3Conf) error {
testName := "CopyObject_to_itself_with_new_metadata"
@@ -6055,6 +6180,220 @@ func CopyObject_non_existing_dir_object(s *S3Conf) error {
})
}
func CopyObject_should_copy_meta_props(s *S3Conf) error {
testName := "CopyObject_should_copy_meta_props"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
srcObj, dstObj := "source-object", "dest-object"
cType, cEnc, cDesp, cLang, cLength := "application/json", "base64", "test-desp", "us", int64(100)
cacheControl, expires := "no-cache", time.Now().Add(time.Hour*10)
meta := map[string]string{
"foo": "bar",
"baz": "quxx",
}
_, err := putObjectWithData(cLength, &s3.PutObjectInput{
Bucket: &bucket,
Key: &srcObj,
ContentDisposition: &cDesp,
ContentEncoding: &cEnc,
ContentLanguage: &cLang,
ContentType: &cType,
CacheControl: &cacheControl,
Expires: &expires,
Metadata: meta,
}, s3client)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: &bucket,
Key: &dstObj,
CopySource: getPtr(bucket + "/" + srcObj),
})
cancel()
if err != nil {
return err
}
return checkObjectMetaProps(s3client, bucket, dstObj, ObjectMetaProps{
ContentLength: cLength,
ContentType: cType,
ContentEncoding: cEnc,
ContentDisposition: cDesp,
ContentLanguage: cLang,
CacheControl: cacheControl,
ExpiresString: expires.UTC().Format(timefmt),
Metadata: meta,
})
})
}
func CopyObject_should_replace_meta_props(s *S3Conf) error {
testName := "CopyObject_should_replace_meta_props"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
srcObj, dstObj := "source-object", "dest-object"
expire := time.Now().Add(time.Minute * 10)
contentLength := int64(200)
_, err := putObjectWithData(contentLength, &s3.PutObjectInput{
Bucket: &bucket,
Key: &srcObj,
ContentDisposition: getPtr("test"),
ContentEncoding: getPtr("test"),
ContentLanguage: getPtr("test"),
ContentType: getPtr("test"),
CacheControl: getPtr("test"),
Expires: &expire,
Metadata: map[string]string{
"key": "val",
},
}, s3client)
if err != nil {
return err
}
cType, cEnc, cDesp, cLang := "application/binary", "hex", "desp", "mex"
cacheControl, expires := "no-cache", time.Now().Add(time.Hour*10)
meta := map[string]string{
"foo": "bar",
"baz": "quxx",
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: &bucket,
Key: &dstObj,
CopySource: getPtr(bucket + "/" + srcObj),
MetadataDirective: types.MetadataDirectiveReplace,
ContentDisposition: &cDesp,
ContentEncoding: &cEnc,
ContentLanguage: &cLang,
ContentType: &cType,
CacheControl: &cacheControl,
Expires: &expires,
Metadata: meta,
})
cancel()
if err != nil {
return err
}
return checkObjectMetaProps(s3client, bucket, dstObj, ObjectMetaProps{
ContentLength: contentLength,
ContentType: cType,
ContentEncoding: cEnc,
ContentDisposition: cDesp,
ContentLanguage: cLang,
CacheControl: cacheControl,
ExpiresString: expires.UTC().Format(timefmt),
Metadata: meta,
})
})
}
func CopyObject_with_legal_hold(s *S3Conf) error {
testName := "CopyObject_with_legal_hold"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
srcObj, dstObj := "source-object", "dst-object"
_, err := putObjectWithData(100, &s3.PutObjectInput{
Bucket: &bucket,
Key: &srcObj,
}, s3client)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: &bucket,
Key: &dstObj,
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, srcObj)),
ObjectLockLegalHoldStatus: types.ObjectLockLegalHoldStatusOn,
})
cancel()
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
res, err := s3client.GetObjectLegalHold(ctx, &s3.GetObjectLegalHoldInput{
Bucket: &bucket,
Key: &dstObj,
})
cancel()
if err != nil {
return err
}
if res.LegalHold.Status != types.ObjectLockLegalHoldStatusOn {
return fmt.Errorf("expected the copied object legal hold status to be %v, instead got %v", types.ObjectLockLegalHoldStatusOn, res.LegalHold.Status)
}
err = changeBucketObjectLockStatus(s3client, bucket, false)
if err != nil {
return err
}
return nil
}, withLock())
}
func CopyObject_with_retention_lock(s *S3Conf) error {
testName := "CopyObject_with_retention_lock"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
srcObj, dstObj := "source-object", "dst-object"
_, err := putObjectWithData(200, &s3.PutObjectInput{
Bucket: &bucket,
Key: &srcObj,
}, s3client)
if err != nil {
return err
}
retDate := time.Now().Add(time.Hour * 7)
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: &bucket,
Key: &dstObj,
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, srcObj)),
ObjectLockMode: types.ObjectLockModeGovernance,
ObjectLockRetainUntilDate: &retDate,
})
cancel()
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
res, err := s3client.GetObjectRetention(ctx, &s3.GetObjectRetentionInput{
Bucket: &bucket,
Key: &dstObj,
})
cancel()
if err != nil {
return err
}
if res.Retention.Mode != types.ObjectLockRetentionModeGovernance {
return fmt.Errorf("expected the copied object retention mode to be %v, instead got %v", types.ObjectLockRetentionModeGovernance, res.Retention.Mode)
}
if res.Retention.RetainUntilDate.UTC().Unix() != retDate.UTC().Unix() {
return fmt.Errorf("expected the retention date to be %v, instead got %v", retDate.Format(time.RFC1123), res.Retention.RetainUntilDate.Format(time.RFC1123))
}
err = changeBucketObjectLockStatus(s3client, bucket, false)
if err != nil {
return err
}
return nil
}, withLock())
}
func CopyObject_invalid_checksum_algorithm(s *S3Conf) error {
testName := "CopyObject_invalid_checksum_algorithm"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {

View File

@@ -1286,6 +1286,64 @@ func compareDelMarkers(d1, d2 []types.DeleteMarkerEntry) bool {
return true
}
type ObjectMetaProps struct {
ContentLength int64
ContentType string
ContentEncoding string
ContentDisposition string
ContentLanguage string
CacheControl string
ExpiresString string
Metadata map[string]string
}
func checkObjectMetaProps(client *s3.Client, bucket, object string, o ObjectMetaProps) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: &bucket,
Key: &object,
})
cancel()
if err != nil {
return err
}
if o.Metadata != nil {
if !areMapsSame(out.Metadata, o.Metadata) {
return fmt.Errorf("expected the object metadata to be %v, instead got %v", o.Metadata, out.Metadata)
}
}
if out.ContentLength == nil {
return fmt.Errorf("expected Content-Length %v, instead got nil", o.ContentLength)
}
if *out.ContentLength != o.ContentLength {
return fmt.Errorf("expected Content-Length %v, instead got %v", o.ContentLength, *out.ContentLength)
}
if o.ContentType != "" && getString(out.ContentType) != o.ContentType {
return fmt.Errorf("expected Content-Type %v, instead got %v", o.ContentType, getString(out.ContentType))
}
if o.ContentDisposition != "" && getString(out.ContentDisposition) != o.ContentDisposition {
return fmt.Errorf("expected Content-Disposition %v, instead got %v", o.ContentDisposition, getString(out.ContentDisposition))
}
if o.ContentEncoding != "" && getString(out.ContentEncoding) != o.ContentEncoding {
return fmt.Errorf("expected Content-Encoding %v, instead got %v", o.ContentEncoding, getString(out.ContentEncoding))
}
if o.ContentLanguage != "" && getString(out.ContentLanguage) != o.ContentLanguage {
return fmt.Errorf("expected Content-Language %v, instead got %v", o.ContentLanguage, getString(out.ContentLanguage))
}
if o.CacheControl != "" && getString(out.CacheControl) != o.CacheControl {
return fmt.Errorf("expected Cache-Control %v, instead got %v", o.CacheControl, getString(out.CacheControl))
}
if o.ExpiresString != "" && getString(out.ExpiresString) != o.ExpiresString {
return fmt.Errorf("expected Expires %v, instead got %v", o.ExpiresString, getString(out.ExpiresString))
}
if out.StorageClass != types.StorageClassStandard {
return fmt.Errorf("expected the storage class to be %v, instead got %v", types.StorageClassStandard, out.StorageClass)
}
return nil
}
func getBoolPtr(b bool) *bool {
return &b
}