mirror of
https://github.com/versity/versitygw.git
synced 2026-01-28 05:52:03 +00:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d98ca9b034 | ||
|
|
034feb746b | ||
|
|
86742997cc | ||
|
|
8fa2b58f8e | ||
|
|
2d82ef8463 | ||
|
|
7ea386aec9 | ||
|
|
f0005a0047 |
@@ -25,6 +25,7 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -215,7 +216,17 @@ func (az *Azure) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput) error {
|
||||
_, err := az.client.DeleteContainer(ctx, *input.Bucket, nil)
|
||||
pager := az.client.NewListBlobsFlatPager(*input.Bucket, nil)
|
||||
|
||||
pg, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
if len(pg.Segment.BlobItems) > 0 {
|
||||
return s3err.GetAPIError(s3err.ErrBucketNotEmpty)
|
||||
}
|
||||
_, err = az.client.DeleteContainer(ctx, *input.Bucket, nil)
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
@@ -319,7 +330,7 @@ func (az *Azure) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
var opts *azblob.DownloadStreamOptions
|
||||
if *input.Range != "" {
|
||||
offset, count, err := parseRange(*input.Range)
|
||||
offset, count, err := backend.ParseRange(0, *input.Range)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -360,6 +371,37 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput, writer
|
||||
}
|
||||
|
||||
func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
if input.PartNumber != nil {
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
partsCount := int32(len(res.UncommittedBlocks))
|
||||
|
||||
for _, block := range res.UncommittedBlocks {
|
||||
partNumber, err := decodeBlockId(*block.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if partNumber == int(*input.PartNumber) {
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: block.Size,
|
||||
ETag: block.Name,
|
||||
PartsCount: &partsCount,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
client, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -370,7 +412,7 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
result := &s3.HeadObjectOutput{
|
||||
AcceptRanges: resp.AcceptRanges,
|
||||
ContentLength: resp.ContentLength,
|
||||
ContentType: resp.ContentType,
|
||||
@@ -381,7 +423,27 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
LastModified: resp.LastModified,
|
||||
Metadata: parseAzMetadata(resp.Metadata),
|
||||
Expires: resp.ExpiresOn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
status, ok := resp.Metadata[string(keyObjLegalHold)]
|
||||
if ok {
|
||||
if *status == "1" {
|
||||
result.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOn
|
||||
} else {
|
||||
result.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOff
|
||||
}
|
||||
}
|
||||
|
||||
retention, ok := resp.Metadata[string(keyObjRetention)]
|
||||
if ok {
|
||||
var config types.ObjectLockRetention
|
||||
if err := json.Unmarshal([]byte(*retention), &config); err == nil {
|
||||
result.ObjectLockMode = types.ObjectLockMode(config.Mode)
|
||||
result.ObjectLockRetainUntilDate = config.RetainUntilDate
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
@@ -433,7 +495,7 @@ func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAtt
|
||||
IsTruncated: resp.IsTruncated,
|
||||
MaxParts: resp.MaxParts,
|
||||
PartNumberMarker: resp.PartNumberMarker,
|
||||
NextPartNumberMarker: resp.PartNumberMarker,
|
||||
NextPartNumberMarker: resp.NextPartNumberMarker,
|
||||
Parts: parts,
|
||||
},
|
||||
}, nil
|
||||
@@ -494,8 +556,14 @@ Pager:
|
||||
}
|
||||
|
||||
func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
marker := ""
|
||||
if *input.ContinuationToken > *input.StartAfter {
|
||||
marker = *input.ContinuationToken
|
||||
} else {
|
||||
marker = *input.StartAfter
|
||||
}
|
||||
pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{
|
||||
Marker: input.ContinuationToken,
|
||||
Marker: &marker,
|
||||
MaxResults: input.MaxKeys,
|
||||
Prefix: input.Prefix,
|
||||
})
|
||||
@@ -544,6 +612,7 @@ Pager:
|
||||
NextContinuationToken: nextMarker,
|
||||
Prefix: input.Prefix,
|
||||
IsTruncated: &isTruncated,
|
||||
Delimiter: input.Delimiter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -760,20 +829,20 @@ func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3res
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
if partNumberMarker != 0 && partNumberMarker < partNumber {
|
||||
if partNumberMarker != 0 && partNumberMarker >= partNumber {
|
||||
continue
|
||||
}
|
||||
if len(parts) >= int(maxParts) {
|
||||
nextPartNumberMarker = partNumber
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
parts = append(parts, s3response.Part{
|
||||
Size: *el.Size,
|
||||
ETag: *el.Name,
|
||||
PartNumber: partNumber,
|
||||
LastModified: time.Now().Format(backend.RFC3339TimeFormat),
|
||||
})
|
||||
if len(parts) >= int(maxParts) {
|
||||
nextPartNumberMarker = partNumber
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return s3response.ListPartsResult{
|
||||
Bucket: *input.Bucket,
|
||||
@@ -861,9 +930,37 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
|
||||
return nil, err
|
||||
}
|
||||
blockIds := []string{}
|
||||
for _, el := range input.MultipartUpload.Parts {
|
||||
blockIds = append(blockIds, *el.ETag)
|
||||
|
||||
blockList, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
if len(blockList.UncommittedBlocks) != len(input.MultipartUpload.Parts) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
slices.SortFunc(blockList.UncommittedBlocks, func(a *blockblob.Block, b *blockblob.Block) int {
|
||||
ptNumber, _ := decodeBlockId(*a.Name)
|
||||
nextPtNumber, _ := decodeBlockId(*b.Name)
|
||||
return ptNumber - nextPtNumber
|
||||
})
|
||||
|
||||
for i, block := range blockList.UncommittedBlocks {
|
||||
ptNumber, err := decodeBlockId(*block.Name)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
if *input.MultipartUpload.Parts[i].ETag != *block.Name {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
if *input.MultipartUpload.Parts[i].PartNumber != int32(ptNumber) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
blockIds = append(blockIds, *block.Name)
|
||||
}
|
||||
|
||||
resp, err := client.CommitBlockList(ctx, blockIds, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
@@ -954,7 +1051,7 @@ func (az *Azure) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, er
|
||||
|
||||
policyPtr, ok := props.Metadata[string(keyPolicy)]
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)
|
||||
}
|
||||
|
||||
policy, err := base64.StdEncoding.DecodeString(*policyPtr)
|
||||
@@ -1064,7 +1161,28 @@ func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, version
|
||||
string(keyObjRetention): backend.GetStringPtr(string(retention)),
|
||||
}
|
||||
} else {
|
||||
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retention))
|
||||
objLockCfg, ok := meta[string(keyObjRetention)]
|
||||
if !ok {
|
||||
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retention))
|
||||
} else {
|
||||
var lockCfg types.ObjectLockRetention
|
||||
if err := json.Unmarshal([]byte(*objLockCfg), &lockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
switch lockCfg.Mode {
|
||||
// Compliance mode can't be overridden
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
// To override governance mode user should have "s3:BypassGovernanceRetention" permission
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retention))
|
||||
}
|
||||
}
|
||||
|
||||
_, err = blobClient.SetMetadata(ctx, meta, nil)
|
||||
@@ -1356,39 +1474,6 @@ func decodeBlockId(blockID string) (int, error) {
|
||||
return int(binary.LittleEndian.Uint32(slice)), nil
|
||||
}
|
||||
|
||||
func parseRange(rg string) (offset, count int64, err error) {
|
||||
rangeKv := strings.Split(rg, "=")
|
||||
|
||||
if len(rangeKv) < 2 {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) < 1 || len(bRange) > 2 {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
offset, err = strconv.ParseInt(bRange[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
if len(bRange) == 1 || bRange[1] == "" {
|
||||
return offset, count, nil
|
||||
}
|
||||
|
||||
count, err = strconv.ParseInt(bRange[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
if count < offset {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
return offset, count - offset + 1, nil
|
||||
}
|
||||
|
||||
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
|
||||
aclPtr, ok := meta[string(key)]
|
||||
if !ok {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -61,9 +60,9 @@ var (
|
||||
|
||||
// ParseRange parses input range header and returns startoffset, length, and
|
||||
// error. If no endoffset specified, then length is set to -1.
|
||||
func ParseRange(fi fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
func ParseRange(size int64, acceptRange string) (int64, int64, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, fi.Size(), nil
|
||||
return 0, size, nil
|
||||
}
|
||||
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
|
||||
@@ -1199,7 +1199,7 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
startOffset, length, err := backend.ParseRange(fi, *upi.CopySourceRange)
|
||||
startOffset, length, err := backend.ParseRange(fi.Size(), *upi.CopySourceRange)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, err
|
||||
}
|
||||
@@ -1565,7 +1565,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
}
|
||||
|
||||
acceptRange := *input.Range
|
||||
startOffset, length, err := backend.ParseRange(fi, acceptRange)
|
||||
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -611,7 +611,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
startOffset, length, err := backend.ParseRange(fi, acceptRange)
|
||||
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -50,12 +50,13 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
}
|
||||
|
||||
return &ScoutFS{
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
meta: metastore,
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
meta: metastore,
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
glaciermode: opts.GlacierMode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@@ -4,7 +4,7 @@ go 1.21.0
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.2
|
||||
|
||||
4
go.sum
4
go.sum
@@ -1,7 +1,7 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
|
||||
@@ -2548,23 +2548,20 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
key = key + "/"
|
||||
}
|
||||
|
||||
var restoreRequest s3.RestoreObjectInput
|
||||
if ctx.Request().URI().QueryArgs().Has("restore") {
|
||||
err := xml.Unmarshal(ctx.Body(), &restoreRequest)
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Printf("error unmarshalling restore object: %v", err)
|
||||
var restoreRequest types.RestoreRequest
|
||||
if err := xml.Unmarshal(ctx.Body(), &restoreRequest); err != nil {
|
||||
if !errors.Is(io.EOF, err) {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionRestoreObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionRestoreObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be,
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -2585,10 +2582,11 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
restoreRequest.Bucket = &bucket
|
||||
restoreRequest.Key = &key
|
||||
|
||||
err = c.be.RestoreObject(ctx.Context(), &restoreRequest)
|
||||
err = c.be.RestoreObject(ctx.Context(), &s3.RestoreObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
RestoreRequest: &restoreRequest,
|
||||
})
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
|
||||
@@ -1646,20 +1646,11 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
{
|
||||
name: "Restore-object-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?restore", strings.NewReader(`<root><key>body</key></root>`)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Restore-object-error",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?restore", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Select-object-content-invalid-body",
|
||||
|
||||
@@ -236,6 +236,7 @@ func TestUploadPartCopy(s *S3Conf) {
|
||||
func TestListParts(s *S3Conf) {
|
||||
ListParts_incorrect_uploadId(s)
|
||||
ListParts_incorrect_object_key(s)
|
||||
ListParts_truncated(s)
|
||||
ListParts_success(s)
|
||||
}
|
||||
|
||||
@@ -605,6 +606,7 @@ func GetIntTests() IntTests {
|
||||
"UploadPartCopy_by_range_success": UploadPartCopy_by_range_success,
|
||||
"ListParts_incorrect_uploadId": ListParts_incorrect_uploadId,
|
||||
"ListParts_incorrect_object_key": ListParts_incorrect_object_key,
|
||||
"ListParts_truncated": ListParts_truncated,
|
||||
"ListParts_success": ListParts_success,
|
||||
"ListMultipartUploads_non_existing_bucket": ListMultipartUploads_non_existing_bucket,
|
||||
"ListMultipartUploads_empty_result": ListMultipartUploads_empty_result,
|
||||
|
||||
@@ -5274,6 +5274,70 @@ func ListParts_incorrect_object_key(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func ListParts_truncated(s *S3Conf) error {
|
||||
testName := "ListParts_truncated"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parts, err := uploadParts(s3client, 5*1024*1024, 5, bucket, obj, *out.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maxParts := int32(3)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
res, err := s3client.ListParts(ctx, &s3.ListPartsInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
UploadId: out.UploadId,
|
||||
MaxParts: &maxParts,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !*res.IsTruncated {
|
||||
return fmt.Errorf("expected the result to be truncated")
|
||||
}
|
||||
if *res.MaxParts != maxParts {
|
||||
return fmt.Errorf("expected max-parts to be %v, instead got %v", maxParts, *res.MaxParts)
|
||||
}
|
||||
if *res.NextPartNumberMarker != fmt.Sprint(*parts[2].PartNumber) {
|
||||
return fmt.Errorf("expected next part number marker to be %v, instead got %v", fmt.Sprint(*parts[2].PartNumber), *res.NextPartNumberMarker)
|
||||
}
|
||||
if ok := compareParts(res.Parts, parts[:3]); !ok {
|
||||
return fmt.Errorf("expected the parts data to be %v, instead got %v", parts[:3], res.Parts)
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
res2, err := s3client.ListParts(ctx, &s3.ListPartsInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
UploadId: out.UploadId,
|
||||
PartNumberMarker: res.NextPartNumberMarker,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *res2.PartNumberMarker != *res.NextPartNumberMarker {
|
||||
return fmt.Errorf("expected part number marker to be %v, instead got %v", *res.NextPartNumberMarker, *res2.PartNumberMarker)
|
||||
}
|
||||
if ok := compareParts(parts[3:], res2.Parts); !ok {
|
||||
return fmt.Errorf("expected the parts data to be %v, instead got %v", parts[3:], res2.Parts)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func ListParts_success(s *S3Conf) error {
|
||||
testName := "ListParts_success"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -5471,7 +5535,7 @@ func ListMultipartUploads_ignore_upload_id_marker(s *S3Conf) error {
|
||||
}
|
||||
|
||||
func ListMultipartUploads_success(s *S3Conf) error {
|
||||
testName := "ListMultipartUploads_max_uploads"
|
||||
testName := "ListMultipartUploads_success"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj1, obj2 := "my-obj-1", "my-obj-2"
|
||||
out1, err := createMp(s3client, bucket, obj1)
|
||||
|
||||
Reference in New Issue
Block a user