Merge pull request #1031 from versity/fix/complete-mp-parts-sizing

fix: Adds the minimum allowed size(5 Mib) check for mp parts sizes in…
This commit is contained in:
Ben McClelland
2025-02-03 10:51:21 -08:00
committed by GitHub
7 changed files with 64 additions and 15 deletions

View File

@@ -1201,6 +1201,7 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
return ptNumber - nextPtNumber
})
last := len(blockList.UncommittedBlocks) - 1
for i, block := range blockList.UncommittedBlocks {
ptNumber, err := decodeBlockId(*block.Name)
if err != nil {
@@ -1213,6 +1214,11 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
if *input.MultipartUpload.Parts[i].PartNumber != int32(ptNumber) {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
// all parts except the last need to be greater, thena
// the minimum allowed size (5 Mib)
if i < last && *block.Size < backend.MinPartSize {
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
blockIds = append(blockIds, *block.Name)
}

View File

@@ -34,6 +34,9 @@ const (
// this is the media type for directories in AWS and Nextcloud
DirContentType = "application/x-directory"
DefaultContentType = "binary/octet-stream"
// this is the minimum allowed size for mp parts
MinPartSize = 5 * 1024 * 1024
)
func IsValidBucketName(name string) bool { return true }

View File

@@ -1375,7 +1375,6 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
// check all parts ok
last := len(parts) - 1
partsize := int64(0)
var totalsize int64
for i, part := range parts {
if part.PartNumber == nil || *part.PartNumber < 1 {
@@ -1389,13 +1388,11 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if i == 0 {
partsize = fi.Size()
}
totalsize += fi.Size()
// all parts except the last need to be the same size
if i < last && partsize != fi.Size() {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
// all parts except the last need to be greater, thena
// the minimum allowed size (5 Mib)
if i < last && fi.Size() < backend.MinPartSize {
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
b, err := p.meta.RetrieveAttribute(nil, bucket, partObjPath, etagkey)

View File

@@ -321,6 +321,7 @@ func TestCompleteMultipartUpload(s *S3Conf) {
CompletedMultipartUpload_non_existing_bucket(s)
CompleteMultipartUpload_invalid_part_number(s)
CompleteMultipartUpload_invalid_ETag(s)
CompleteMultipartUpload_small_upload_size(s)
CompleteMultipartUpload_empty_parts(s)
CompleteMultipartUpload_success(s)
if !s.azureTests {
@@ -856,7 +857,6 @@ func GetIntTests() IntTests {
"CompletedMultipartUpload_non_existing_bucket": CompletedMultipartUpload_non_existing_bucket,
"CompleteMultipartUpload_invalid_part_number": CompleteMultipartUpload_invalid_part_number,
"CompleteMultipartUpload_invalid_ETag": CompleteMultipartUpload_invalid_ETag,
"CompleteMultipartUpload_empty_parts": CompleteMultipartUpload_empty_parts,
"CompleteMultipartUpload_success": CompleteMultipartUpload_success,
"CompleteMultipartUpload_racey_success": CompleteMultipartUpload_racey_success,
"PutBucketAcl_non_existing_bucket": PutBucketAcl_non_existing_bucket,

View File

@@ -7192,6 +7192,49 @@ func CompleteMultipartUpload_invalid_ETag(s *S3Conf) error {
})
}
func CompleteMultipartUpload_small_upload_size(s *S3Conf) error {
testName := "CompleteMultipartUpload_small_upload_size"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
obj := "my-obj"
mp, err := createMp(s3client, bucket, obj)
if err != nil {
return err
}
// The uploaded parts size is 256 < 5 Mib (the minimum allowed size)
parts, _, err := uploadParts(s3client, 1024, 4, bucket, obj, *mp.UploadId)
if err != nil {
return err
}
cParts := []types.CompletedPart{}
for _, el := range parts {
cParts = append(cParts, types.CompletedPart{
PartNumber: el.PartNumber,
ETag: el.ETag,
})
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
Bucket: &bucket,
Key: &obj,
UploadId: mp.UploadId,
MultipartUpload: &types.CompletedMultipartUpload{
Parts: cParts,
},
})
cancel()
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrEntityTooSmall)); err != nil {
return err
}
return nil
})
}
func CompleteMultipartUpload_empty_parts(s *S3Conf) error {
testName := "CompleteMultipartUpload_empty_parts"
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
@@ -7233,7 +7276,7 @@ func CompleteMultipartUpload_success(s *S3Conf) error {
return err
}
objSize := int64(5 * 1024 * 1024)
objSize := int64(25 * 1024 * 1024)
parts, csum, err := uploadParts(s3client, objSize, 5, bucket, obj, *out.UploadId)
if err != nil {
return err
@@ -7326,7 +7369,7 @@ func CompleteMultipartUpload_racey_success(s *S3Conf) error {
var mu sync.RWMutex
uploads := make([]mpinfo, 10)
sums := make([]string, 10)
objSize := int64(5 * 1024 * 1024)
objSize := int64(25 * 1024 * 1024)
eg := errgroup.Group{}
for i := 0; i < 10; i++ {
@@ -12990,7 +13033,7 @@ func Versioning_Multipart_Upload_success(s *S3Conf) error {
return err
}
objSize := int64(5 * 1024 * 1024)
objSize := int64(25 * 1024 * 1024)
parts, _, err := uploadParts(s3client, objSize, 5, bucket, obj, *out.UploadId)
if err != nil {
return err
@@ -13070,7 +13113,7 @@ func Versioning_Multipart_Upload_overwrite_an_object(s *S3Conf) error {
return err
}
objSize := int64(5 * 1024 * 1024)
objSize := int64(25 * 1024 * 1024)
parts, _, err := uploadParts(s3client, objSize, 5, bucket, obj, *out.UploadId)
if err != nil {
return err

View File

@@ -54,7 +54,7 @@ source ./tests/commands/list_multipart_uploads.sh
run create_test_files "$bucket_file"
assert_success
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=20M count=1
assert_success
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
@@ -115,7 +115,7 @@ source ./tests/commands/list_multipart_uploads.sh
run create_test_file "$bucket_file"
assert_success
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=20M count=1
assert_success
run setup_bucket "s3api" "$BUCKET_ONE_NAME"

View File

@@ -315,7 +315,7 @@ setup_multipart_upload_with_params() {
return 1
fi
if ! result=$(dd if=/dev/urandom of="$TEST_FILE_FOLDER/$2" bs=5M count=1 2>&1); then
if ! result=$(dd if=/dev/urandom of="$TEST_FILE_FOLDER/$2" bs=20M count=1 2>&1); then
log 2 "error creating large file: $result"
return 1
fi