fix: Fixes GetObject and UploadPartCopy actions data range parsing.

Fixes #1004
Fixes #1122
Fixes #1120

Separates `GetObject` and `UploadPartCopy` range parsing/validation.

`GetObject` returns a successful response if acceptRange is invalid.
Adjusts the range upper limit, if it exceeds the actual objects size for `GetObject`.
Corrects the `ContentRange` in the `GetObject` response.

Fixes the `UploadPartCopy` action copy source range parsing/validation.
`UploadPartCopy` returns `InvalidArgument` if the copy source range is not valid.
This commit is contained in:
niksis02
2025-03-08 00:39:04 +04:00
parent ca1697f1f5
commit 96af2b6471
7 changed files with 383 additions and 167 deletions

View File

@@ -389,17 +389,29 @@ func (az *Azure) DeleteBucketTagging(ctx context.Context, bucket string) error {
}
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
client, err := az.getBlobClient(*input.Bucket, *input.Key)
if err != nil {
return nil, err
}
resp, err := client.GetProperties(ctx, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
var opts *azblob.DownloadStreamOptions
if *input.Range != "" {
offset, count, err := backend.ParseRange(0, *input.Range)
offset, count, isValid, err := backend.ParseGetObjectRange(*resp.ContentLength, *input.Range)
if err != nil {
return nil, err
}
opts = &azblob.DownloadStreamOptions{
Range: blob.HTTPRange{
Count: count,
Offset: offset,
},
if isValid {
opts = &azblob.DownloadStreamOptions{
Range: blob.HTTPRange{
Count: count,
Offset: offset,
},
}
}
}
blobDownloadResponse, err := az.client.DownloadStream(ctx, *input.Bucket, *input.Key, opts)
@@ -418,7 +430,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
}
return &s3.GetObjectOutput{
AcceptRanges: input.Range,
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentLength: blobDownloadResponse.ContentLength,
ContentEncoding: blobDownloadResponse.ContentEncoding,
ContentType: contentType,

View File

@@ -19,7 +19,6 @@ import (
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
@@ -80,44 +79,109 @@ func TrimEtag(etag *string) *string {
}
var (
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
)
// ParseRange parses input range header and returns startoffset, length, and
// error. If no endoffset specified, then length is set to -1.
func ParseRange(size int64, acceptRange string) (int64, int64, error) {
// ParseGetObjectRange parses input range header and returns startoffset, length, isValid
// and error. If no endoffset specified, then length is set to the object size
// for invalid inputs, it returns no error, but isValid=false
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
func ParseGetObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
if acceptRange == "" {
return 0, size, false, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) != 2 {
return 0, size, false, nil
}
if rangeKv[0] != "bytes" {
return 0, size, false, nil
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) != 2 {
return 0, size, false, nil
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if bRange[1] == "" {
return startOffset, size - startOffset, true, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, size, false, nil
}
if endOffset < startOffset {
return 0, size, false, nil
}
if endOffset >= size {
return startOffset, size - startOffset, true, nil
}
return startOffset, endOffset - startOffset + 1, true, nil
}
// ParseCopySourceRange parses input range header and returns startoffset, length
// and error. If no endoffset specified, then length is set to the object size
func ParseCopySourceRange(size int64, acceptRange string) (int64, int64, error) {
if acceptRange == "" {
return 0, size, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) < 2 {
return 0, 0, errInvalidRange
if len(rangeKv) != 2 {
return 0, 0, errInvalidCopySourceRange
}
if rangeKv[0] != "bytes" {
return 0, 0, errInvalidCopySourceRange
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) < 1 || len(bRange) > 2 {
return 0, 0, errInvalidRange
if len(bRange) != 2 {
return 0, 0, errInvalidCopySourceRange
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, 0, errInvalidRange
return 0, 0, errInvalidCopySourceRange
}
endOffset := int64(-1)
if len(bRange) == 1 || bRange[1] == "" {
return startOffset, endOffset, nil
if startOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
}
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
if bRange[1] == "" {
return startOffset, size - startOffset + 1, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, 0, errInvalidRange
return 0, 0, errInvalidCopySourceRange
}
if endOffset < startOffset {
return 0, 0, errInvalidRange
return 0, 0, errInvalidCopySourceRange
}
if endOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
}
return startOffset, endOffset - startOffset + 1, nil
@@ -147,14 +211,6 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
return srcBucket, srcObject, versionId, nil
}
func CreateExceedingRangeErr(objSize int64) s3err.APIError {
return s3err.APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
HTTPStatusCode: http.StatusBadRequest,
}
}
func GetMultipartMD5(parts []types.CompletedPart) string {
var partsEtagBytes []byte
for _, part := range parts {

View File

@@ -2480,19 +2480,11 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
return s3response.CopyPartResult{}, fmt.Errorf("stat object: %w", err)
}
startOffset, length, err := backend.ParseRange(fi.Size(), *upi.CopySourceRange)
startOffset, length, err := backend.ParseCopySourceRange(fi.Size(), *upi.CopySourceRange)
if err != nil {
return s3response.CopyPartResult{}, err
}
if length == -1 {
length = fi.Size() - startOffset + 1
}
if startOffset+length > fi.Size()+1 {
return s3response.CopyPartResult{}, backend.CreateExceedingRangeErr(fi.Size())
}
f, err := p.openTmpFile(filepath.Join(*upi.Bucket, objdir),
*upi.Bucket, partPath, length, acct, doFalloc)
if err != nil {
@@ -3379,29 +3371,20 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
}
}
acceptRange := *input.Range
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
objSize := fi.Size()
startOffset, length, isValid, err := backend.ParseGetObjectRange(objSize, *input.Range)
if err != nil {
return nil, err
}
objSize := fi.Size()
if fi.IsDir() {
// directory objects are always 0 len
objSize = 0
length = 0
}
if length == -1 {
length = objSize - startOffset
}
if startOffset+length > objSize {
length = objSize - startOffset
}
var contentRange string
if acceptRange != "" {
if isValid {
contentRange = fmt.Sprintf("bytes %v-%v/%v",
startOffset, startOffset+length-1, objSize)
}
@@ -3429,7 +3412,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
}
return &s3.GetObjectOutput{
AcceptRanges: &acceptRange,
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentLength: &length,
ContentEncoding: &contentEncoding,
ContentType: &contentType,
@@ -3504,7 +3487,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
}
return &s3.GetObjectOutput{
AcceptRanges: &acceptRange,
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentLength: &length,
ContentEncoding: &contentEncoding,
ContentType: &contentType,

View File

@@ -628,28 +628,20 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.Ge
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
objSize := fi.Size()
startOffset, length, isValid, err := backend.ParseGetObjectRange(objSize, acceptRange)
if err != nil {
return nil, err
}
objSize := fi.Size()
if fi.IsDir() {
// directory objects are always 0 len
objSize = 0
length = 0
}
if length == -1 {
length = fi.Size() - startOffset + 1
}
if startOffset+length > fi.Size() {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
var contentRange string
if acceptRange != "" {
if isValid {
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, objSize)
}
@@ -696,7 +688,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.Ge
tagCount := int32(len(tags))
return &s3.GetObjectOutput{
AcceptRanges: &acceptRange,
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentLength: &length,
ContentEncoding: &contentEncoding,
ContentType: &contentType,

View File

@@ -83,6 +83,7 @@ const (
ErrInternalError
ErrInvalidCopyDest
ErrInvalidCopySource
ErrInvalidCopySourceRange
ErrInvalidTag
ErrAuthHeaderEmpty
ErrSignatureVersionNotSupported
@@ -296,6 +297,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCopySourceRange: {
Code: "InvalidArgument",
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTag: {
Code: "InvalidArgument",
Description: "The Tag value you have provided is invalid",
@@ -791,3 +797,11 @@ func GetInvalidMpObjectSizeErr(val int64) APIError {
HTTPStatusCode: http.StatusBadRequest,
}
}
func CreateExceedingRangeErr(objSize int64) APIError {
return APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
HTTPStatusCode: http.StatusBadRequest,
}
}

View File

@@ -57,7 +57,9 @@ func TestPresignedAuthentication(s *S3Conf) {
PresignedAuth_incorrect_secret_key(s)
PresignedAuth_PutObject_success(s)
PresignedAuth_Put_GetObject_with_data(s)
PresignedAuth_Put_GetObject_with_UTF8_chars(s)
if !s.azureTests {
PresignedAuth_Put_GetObject_with_UTF8_chars(s)
}
PresignedAuth_UploadPart(s)
}
@@ -186,7 +188,8 @@ func TestGetObjectAttributes(s *S3Conf) {
func TestGetObject(s *S3Conf) {
GetObject_non_existing_key(s)
GetObject_directory_object_noslash(s)
GetObject_invalid_ranges(s)
GetObject_should_succeed_for_invalid_ranges(s)
GetObject_content_ranges(s)
GetObject_invalid_parent(s)
GetObject_with_meta(s)
GetObject_large_object(s)
@@ -343,7 +346,8 @@ func TestUploadPartCopy(s *S3Conf) {
UploadPartCopy_non_existing_source_bucket(s)
UploadPartCopy_non_existing_source_object_key(s)
UploadPartCopy_success(s)
UploadPartCopy_by_range_invalid_range(s)
UploadPartCopy_by_range_invalid_ranges(s)
UploadPartCopy_exceeding_copy_source_range(s)
UploadPartCopy_greater_range_than_obj_size(s)
UploadPartCopy_by_range_success(s)
//TODO: remove the condition after implementing checksums in azure
@@ -858,7 +862,8 @@ func GetIntTests() IntTests {
"GetObjectAttributes_checksums": GetObjectAttributes_checksums,
"GetObject_non_existing_key": GetObject_non_existing_key,
"GetObject_directory_object_noslash": GetObject_directory_object_noslash,
"GetObject_invalid_ranges": GetObject_invalid_ranges,
"GetObject_should_succeed_for_invalid_ranges": GetObject_should_succeed_for_invalid_ranges,
"GetObject_content_ranges": GetObject_content_ranges,
"GetObject_invalid_parent": GetObject_invalid_parent,
"GetObject_with_meta": GetObject_with_meta,
"GetObject_large_object": GetObject_large_object,
@@ -958,7 +963,8 @@ func GetIntTests() IntTests {
"UploadPartCopy_non_existing_source_bucket": UploadPartCopy_non_existing_source_bucket,
"UploadPartCopy_non_existing_source_object_key": UploadPartCopy_non_existing_source_object_key,
"UploadPartCopy_success": UploadPartCopy_success,
"UploadPartCopy_by_range_invalid_range": UploadPartCopy_by_range_invalid_range,
"UploadPartCopy_by_range_invalid_ranges": UploadPartCopy_by_range_invalid_ranges,
"UploadPartCopy_exceeding_copy_source_range": UploadPartCopy_exceeding_copy_source_range,
"UploadPartCopy_greater_range_than_obj_size": UploadPartCopy_greater_range_than_obj_size,
"UploadPartCopy_by_range_success": UploadPartCopy_by_range_success,
"UploadPartCopy_should_copy_the_checksum": UploadPartCopy_should_copy_the_checksum,

View File

@@ -33,7 +33,6 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"golang.org/x/sync/errgroup"
)
@@ -1333,7 +1332,7 @@ func PresignedAuth_Put_GetObject_with_data(s *S3Conf) error {
}
func PresignedAuth_Put_GetObject_with_UTF8_chars(s *S3Conf) error {
testName := "PresignedAuth_Put_GetObject_with_data"
testName := "PresignedAuth_Put_GetObject_with_UTF8_chars"
return presignedAuthHandler(s, testName, func(client *s3.PresignClient) error {
bucket, obj := getBucketName(), "my-$%^&*;"
err := setup(s, bucket)
@@ -3942,8 +3941,51 @@ func GetObject_directory_object_noslash(s *S3Conf) error {
})
}
func GetObject_invalid_ranges(s *S3Conf) error {
testName := "GetObject_invalid_ranges"
func GetObject_invalid_range(s *S3Conf) error {
testName := "GetObject_invalid_range"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
dataLength, obj := int64(2500), "my-obj"
_, err := putObjectWithData(dataLength, &s3.PutObjectInput{
Bucket: &bucket,
Key: &obj,
}, s3client)
if err != nil {
return err
}
getobj := func(acceptRange string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: &acceptRange,
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidRange)); err != nil {
return err
}
return nil
}
for _, rg := range []string{
"bytes=2500-3000",
"bytes=2501-4000",
"bytes=5000-",
} {
err := getobj(rg)
if err != nil {
return err
}
}
return nil
})
}
func GetObject_should_succeed_for_invalid_ranges(s *S3Conf) error {
testName := "GetObject_should_succeed_for_invalid_ranges"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
dataLength, obj := int64(1234567), "my-obj"
@@ -3955,42 +3997,92 @@ func GetObject_invalid_ranges(s *S3Conf) error {
return err
}
getObj := func(acceptRange string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
res, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: &acceptRange,
})
cancel()
if err != nil {
return err
}
if *res.ContentLength != dataLength {
return fmt.Errorf("expected Content-Length to be %v, instead got %v", dataLength, *res.ContentLength)
}
if getString(res.ContentRange) != "" {
return fmt.Errorf("expected empty Content-Range, instead got %v", *res.ContentRange)
}
if getString(res.AcceptRanges) != "bytes" {
return fmt.Errorf("expected the accept ranges to be 'bytes', instead got %v", getString(res.AcceptRanges))
}
return nil
}
for _, rg := range []string{
"bytes=invalid-range",
"bytes=33-10",
"bytes-12-34",
"bytes=-2-5",
"byte=100-200",
"bytes=inv-300",
} {
err := getObj(rg)
if err != nil {
return err
}
}
return nil
})
}
func GetObject_content_ranges(s *S3Conf) error {
testName := "GetObject_should_adjust_range_upper_limit"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
dataLength, obj := int64(1024), "my-obj"
_, err := putObjectWithData(dataLength, &s3.PutObjectInput{
Bucket: &bucket,
Key: &obj,
}, s3client)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.GetObject(ctx, &s3.GetObjectInput{
res, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: getPtr("bytes=invalid-range"),
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidRange)); err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: getPtr("bytes=33-10"),
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidRange)); err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
resp, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: getPtr("bytes=1500-999999999999"),
Range: getPtr("bytes=100-"),
})
cancel()
if err != nil {
return err
}
if *resp.ContentLength != dataLength-1500 {
return fmt.Errorf("expected content-length to be %v, instead got %v", dataLength-1500, *resp.ContentLength)
expectedRange := "bytes 100-1023/1024"
if getString(res.ContentRange) != expectedRange {
return fmt.Errorf("expected the accept ranges to be %v, instead got %v", expectedRange, getString(res.ContentRange))
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
res, err = s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: getPtr("bytes=100-99999999"),
})
cancel()
if err != nil {
return err
}
if getString(res.ContentRange) != expectedRange {
return fmt.Errorf("expected the accept ranges to be %v, instead got %v", expectedRange, getString(res.ContentRange))
}
return nil
})
}
@@ -4271,58 +4363,45 @@ func GetObject_by_range_success(s *S3Conf) error {
return err
}
rangeString := "bytes=100-200"
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: &rangeString,
})
defer cancel()
if err != nil {
return err
}
defer out.Body.Close()
for _, el := range []struct {
acceptRange string
contentRange string
startOffset int64
endOffset int64
}{
{"bytes=100-200", fmt.Sprintf("bytes 100-200/%v", dataLength), 100, 201},
{"bytes=100-", fmt.Sprintf("bytes 100-1234566/%v", dataLength), 100, dataLength},
{"bytes=100-1234567", fmt.Sprintf("bytes 100-1234566/%v", dataLength), 100, dataLength},
} {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: &el.acceptRange,
})
defer cancel()
if err != nil {
return err
}
defer out.Body.Close()
if getString(out.ContentRange) != fmt.Sprintf("bytes 100-200/%v", dataLength) {
return fmt.Errorf("expected content range: %v, instead got: %v", fmt.Sprintf("bytes 100-200/%v", dataLength), getString(out.ContentRange))
}
if getString(out.AcceptRanges) != rangeString {
return fmt.Errorf("expected accept range: %v, instead got: %v", rangeString, getString(out.AcceptRanges))
}
b, err := io.ReadAll(out.Body)
if err != nil && !errors.Is(err, io.EOF) {
return err
if getString(out.ContentRange) != el.contentRange {
return fmt.Errorf("expected content range: %v, instead got: %v", el.contentRange, getString(out.ContentRange))
}
if getString(out.AcceptRanges) != "bytes" {
return fmt.Errorf("expected accept range: bytes, instead got: %v", getString(out.AcceptRanges))
}
b, err := io.ReadAll(out.Body)
if err != nil && !errors.Is(err, io.EOF) {
return err
}
// bytes range is inclusive, go range for second value is not
if !isEqual(b, r.data[el.startOffset:el.endOffset]) {
return fmt.Errorf("data mismatch of range")
}
}
// bytes range is inclusive, go range for second value is not
if !isEqual(b, r.data[100:201]) {
return fmt.Errorf("data mismatch of range")
}
rangeString = "bytes=100-"
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err = s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &obj,
Range: &rangeString,
})
defer cancel()
if err != nil {
return err
}
defer out.Body.Close()
b, err = io.ReadAll(out.Body)
if err != nil && !errors.Is(err, io.EOF) {
return err
}
// bytes range is inclusive, go range for second value is not
if !isEqual(b, r.data[100:]) {
return fmt.Errorf("data mismatch of range")
}
return nil
})
}
@@ -7716,16 +7795,16 @@ func UploadPartCopy_success(s *S3Conf) error {
})
}
func UploadPartCopy_by_range_invalid_range(s *S3Conf) error {
testName := "UploadPartCopy_by_range_invalid_range"
func UploadPartCopy_by_range_invalid_ranges(s *S3Conf) error {
testName := "UploadPartCopy_by_range_invalid_ranges"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
obj, srcBucket, srcObj := "my-obj", getBucketName(), "src-obj"
err := setup(s, srcBucket)
if err != nil {
return err
}
objSize := 5 * 1024 * 1024
_, err = putObjectWithData(int64(objSize), &s3.PutObjectInput{
objSize := int64(5 * 1024 * 1024)
_, err = putObjectWithData(objSize, &s3.PutObjectInput{
Bucket: &srcBucket,
Key: &srcObj,
}, s3client)
@@ -7738,21 +7817,95 @@ func UploadPartCopy_by_range_invalid_range(s *S3Conf) error {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
partNumber := int32(1)
_, err = s3client.UploadPartCopy(ctx, &s3.UploadPartCopyInput{
Bucket: &bucket,
CopySource: getPtr(srcBucket + "/" + srcObj),
UploadId: out.UploadId,
Key: &obj,
PartNumber: &partNumber,
CopySourceRange: getPtr("invalid-range"),
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidRange)); err != nil {
uploadPartCopy := func(csRange string, ptNumber int32) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.UploadPartCopy(ctx, &s3.UploadPartCopyInput{
Bucket: &bucket,
CopySource: getPtr(srcBucket + "/" + srcObj),
UploadId: out.UploadId,
Key: &obj,
PartNumber: &ptNumber,
CopySourceRange: &csRange,
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)); err != nil {
return err
}
return nil
}
for i, rg := range []string{
"byte=100-200",
"bytes=invalid-range",
"bytes=200-100",
"bytes=-2-300",
"bytes=aa-12",
"bytes=12-aa",
"bytes=bb-",
} {
err := uploadPartCopy(rg, int32(i+1))
if err != nil {
return err
}
}
err = teardown(s, srcBucket)
if err != nil {
return err
}
return nil
})
}
func UploadPartCopy_exceeding_copy_source_range(s *S3Conf) error {
testName := "UploadPartCopy_exceeding_copy_source_range"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
obj, srcBucket, srcObj := "my-obj", getBucketName(), "src-obj"
err := setup(s, srcBucket)
if err != nil {
return err
}
objSize := int64(1000)
_, err = putObjectWithData(objSize, &s3.PutObjectInput{
Bucket: &srcBucket,
Key: &srcObj,
}, s3client)
if err != nil {
return err
}
out, err := createMp(s3client, bucket, obj)
if err != nil {
return err
}
uploadPartCopy := func(csRange string, ptNumber int32) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.UploadPartCopy(ctx, &s3.UploadPartCopyInput{
Bucket: &bucket,
CopySource: getPtr(srcBucket + "/" + srcObj),
UploadId: out.UploadId,
Key: &obj,
PartNumber: &ptNumber,
CopySourceRange: &csRange,
})
cancel()
return checkApiErr(err, s3err.CreateExceedingRangeErr(objSize))
}
for i, rg := range []string{
"bytes=100-1005",
"bytes=1250-3000",
"bytes=100-1000",
} {
err := uploadPartCopy(rg, int32(i+1))
if err != nil {
return err
}
}
err = teardown(s, srcBucket)
if err != nil {
return err
@@ -7795,7 +7948,7 @@ func UploadPartCopy_greater_range_than_obj_size(s *S3Conf) error {
PartNumber: &partNumber,
})
cancel()
if err := checkApiErr(err, backend.CreateExceedingRangeErr(int64(srcObjSize))); err != nil {
if err := checkApiErr(err, s3err.CreateExceedingRangeErr(int64(srcObjSize))); err != nil {
return err
}