mirror of
https://github.com/versity/versitygw.git
synced 2026-02-02 16:32:03 +00:00
Compare commits
40 Commits
v1.0.6
...
ben/plugfe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7086579590 | ||
|
|
dea5e0c0b2 | ||
|
|
16995acc17 | ||
|
|
1f41f91f2d | ||
|
|
21a51380f9 | ||
|
|
590295c3d1 | ||
|
|
f43040d1da | ||
|
|
ae1c566656 | ||
|
|
53a7abf82f | ||
|
|
9dbfaeed0c | ||
|
|
080fd0136c | ||
|
|
220819444f | ||
|
|
cf6c1b97d1 | ||
|
|
d50027419e | ||
|
|
2d9a7cc019 | ||
|
|
de67b1d718 | ||
|
|
22a958bcc4 | ||
|
|
cecf563d92 | ||
|
|
47d1a799f6 | ||
|
|
4ed54d9bd9 | ||
|
|
7127cdeee5 | ||
|
|
a6fd1322f7 | ||
|
|
5b6f806829 | ||
|
|
90fb90d9a5 | ||
|
|
f742a40ac2 | ||
|
|
8a46de8e3b | ||
|
|
448765ba04 | ||
|
|
dc71365bab | ||
|
|
6ad1e25c2b | ||
|
|
cae5535556 | ||
|
|
bdc8324242 | ||
|
|
e4bc3d51e5 | ||
|
|
ccd4166b2e | ||
|
|
3bf8b296d8 | ||
|
|
66a7879b0a | ||
|
|
5321095de5 | ||
|
|
1adf3d9565 | ||
|
|
2823676aa2 | ||
|
|
ddcc62ae0a | ||
|
|
151326b5d7 |
12
.github/workflows/docker-bats.yaml
vendored
12
.github/workflows/docker-bats.yaml
vendored
@@ -7,22 +7,22 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
mv tests/.env.docker.default tests/.env.docker
|
||||
mv tests/.secrets.default tests/.secrets
|
||||
docker build --build-arg="GO_LIBRARY=go1.21.7.linux-amd64.tar.gz" \
|
||||
docker build --build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
|
||||
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" --build-arg="MC_FOLDER=linux-amd64" \
|
||||
--progress=plain -f Dockerfile_test_bats -t bats_test .
|
||||
--progress=plain -f tests/Dockerfile_test_bats -t bats_test .
|
||||
|
||||
- name: Set up Docker Compose
|
||||
run: sudo apt-get install -y docker-compose
|
||||
|
||||
- name: Run Docker Container
|
||||
run: docker-compose -f docker-compose-bats.yml up posix_backend
|
||||
run: docker-compose -f tests/docker-compose-bats.yml up --exit-code-from posix_backend posix_backend
|
||||
|
||||
57
.github/workflows/system.yml
vendored
57
.github/workflows/system.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- set: 1
|
||||
- set: "s3cmd, posix"
|
||||
LOCAL_FOLDER: /tmp/gw1
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-1
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-1
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
PORT: 7070
|
||||
BACKEND: "posix"
|
||||
- set: 2
|
||||
- set: "s3, posix"
|
||||
LOCAL_FOLDER: /tmp/gw2
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-2
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-2
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
PORT: 7071
|
||||
BACKEND: "posix"
|
||||
- set: 3
|
||||
- set: "s3api, posix"
|
||||
LOCAL_FOLDER: /tmp/gw3
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-3
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-3
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
PORT: 7072
|
||||
BACKEND: "posix"
|
||||
- set: 4
|
||||
- set: "mc, posix"
|
||||
LOCAL_FOLDER: /tmp/gw4
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-4
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-4
|
||||
@@ -52,39 +52,72 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
PORT: 7073
|
||||
BACKEND: "posix"
|
||||
- set: 5
|
||||
- set: "s3api-user, posix, s3 IAM"
|
||||
LOCAL_FOLDER: /tmp/gw5
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-5
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-5
|
||||
IAM_TYPE: s3
|
||||
USERS_BUCKET: versity-gwtest-iam
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7074
|
||||
RUN_SET: "aws-user"
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
PORT: 7074
|
||||
BACKEND: "posix"
|
||||
- set: 6
|
||||
- set: "s3api non-policy, static buckets"
|
||||
LOCAL_FOLDER: /tmp/gw6
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-6
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-6
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam6
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7075
|
||||
RUN_SET: "aws"
|
||||
RUN_SET: "s3api-non-policy"
|
||||
RECREATE_BUCKETS: "false"
|
||||
PORT: 7075
|
||||
BACKEND: "posix"
|
||||
- set: 7
|
||||
- set: "s3api, s3 backend"
|
||||
LOCAL_FOLDER: /tmp/gw7
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-7
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-7
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam7
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7076
|
||||
RUN_SET: "aws"
|
||||
RUN_SET: "s3api"
|
||||
RECREATE_BUCKETS: "true"
|
||||
PORT: 7076
|
||||
BACKEND: "s3"
|
||||
- set: "REST, posix"
|
||||
LOCAL_FOLDER: /tmp/gw8
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-7
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-7
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam8
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7077
|
||||
RUN_SET: "rest"
|
||||
RECREATE_BUCKETS: "true"
|
||||
PORT: 7077
|
||||
BACKEND: "posix"
|
||||
- set: "s3api policy, static buckets"
|
||||
LOCAL_FOLDER: /tmp/gw9
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-8
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-8
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam9
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7078
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "false"
|
||||
PORT: 7078
|
||||
BACKEND: "posix"
|
||||
- set: "s3api user, static buckets"
|
||||
LOCAL_FOLDER: /tmp/gw10
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-9
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-9
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam10
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7079
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "false"
|
||||
PORT: 7079
|
||||
BACKEND: "posix"
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
@@ -115,6 +148,10 @@ jobs:
|
||||
curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o /usr/local/bin/mc
|
||||
chmod 755 /usr/local/bin/mc
|
||||
|
||||
- name: Install xmllint (for rest)
|
||||
run: |
|
||||
sudo apt-get install libxml2-utils
|
||||
|
||||
- name: Build and run, posix backend
|
||||
env:
|
||||
LOCAL_FOLDER: ${{ matrix.LOCAL_FOLDER }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -45,6 +45,7 @@ tests/.secrets*
|
||||
|
||||
# IAM users files often created in testing
|
||||
users.json
|
||||
users.json.backup
|
||||
|
||||
# env files for testing
|
||||
**/.env*
|
||||
|
||||
@@ -32,4 +32,4 @@ RUN mkdir -p $SETUP_DIR
|
||||
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /app/versitygw
|
||||
|
||||
ENTRYPOINT [ "/app/versitygw" ]
|
||||
ENTRYPOINT [ "/app/versitygw" ]
|
||||
|
||||
@@ -36,6 +36,7 @@ const (
|
||||
ListBucketMultipartUploadsAction Action = "s3:ListBucketMultipartUploads"
|
||||
PutObjectAction Action = "s3:PutObject"
|
||||
GetObjectAction Action = "s3:GetObject"
|
||||
GetObjectVersionAction Action = "s3:GetObjectVersion"
|
||||
DeleteObjectAction Action = "s3:DeleteObject"
|
||||
GetObjectAclAction Action = "s3:GetObjectAcl"
|
||||
GetObjectAttributesAction Action = "s3:GetObjectAttributes"
|
||||
@@ -75,6 +76,7 @@ var supportedActionList = map[Action]struct{}{
|
||||
ListBucketMultipartUploadsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
@@ -103,6 +105,7 @@ var supportedObjectActionList = map[Action]struct{}{
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
|
||||
@@ -17,6 +17,7 @@ package azure
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
@@ -25,7 +26,9 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -40,6 +43,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/google/uuid"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
@@ -52,18 +56,33 @@ import (
|
||||
type key string
|
||||
|
||||
const (
|
||||
keyAclCapital key = "Acl"
|
||||
keyAclLower key = "acl"
|
||||
keyOwnership key = "Ownership"
|
||||
keyTags key = "Tags"
|
||||
keyPolicy key = "Policy"
|
||||
keyBucketLock key = "Bucketlock"
|
||||
keyObjRetention key = "Objectretention"
|
||||
keyObjLegalHold key = "Objectlegalhold"
|
||||
|
||||
defaultContentType = "binary/octet-stream"
|
||||
keyAclCapital key = "Acl"
|
||||
keyAclLower key = "acl"
|
||||
keyOwnership key = "Ownership"
|
||||
keyTags key = "Tags"
|
||||
keyPolicy key = "Policy"
|
||||
keyBucketLock key = "Bucketlock"
|
||||
keyObjRetention key = "Objectretention"
|
||||
keyObjLegalHold key = "Objectlegalhold"
|
||||
onameAttr key = "Objname"
|
||||
onameAttrLower key = "objname"
|
||||
metaTmpMultipartPrefix key = ".sgwtmp" + "/multipart"
|
||||
)
|
||||
|
||||
func (key) Table() map[string]struct{} {
|
||||
return map[string]struct{}{
|
||||
"acl": {},
|
||||
"ownership": {},
|
||||
"tags": {},
|
||||
"policy": {},
|
||||
"bucketlock": {},
|
||||
"objectretention": {},
|
||||
"objectlegalhold": {},
|
||||
"objname": {},
|
||||
".sgwtmp/multipart": {},
|
||||
}
|
||||
}
|
||||
|
||||
type Azure struct {
|
||||
backend.BackendUnsupported
|
||||
|
||||
@@ -254,6 +273,9 @@ func (az *Azure) GetBucketOwnershipControls(ctx context.Context, bucket string)
|
||||
if err != nil {
|
||||
return ownship, err
|
||||
}
|
||||
if len(ownership) == 0 {
|
||||
return ownship, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound)
|
||||
}
|
||||
|
||||
return types.ObjectOwnership(ownership), nil
|
||||
}
|
||||
@@ -262,10 +284,10 @@ func (az *Azure) DeleteBucketOwnershipControls(ctx context.Context, bucket strin
|
||||
return az.deleteContainerMetaData(ctx, bucket, string(keyOwnership))
|
||||
}
|
||||
|
||||
func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, error) {
|
||||
func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
tags, err := parseTags(po.Tagging)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
|
||||
opts := &blockblob.UploadStreamOptions{
|
||||
@@ -277,22 +299,27 @@ func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (string,
|
||||
opts.HTTPHeaders.BlobContentEncoding = po.ContentEncoding
|
||||
opts.HTTPHeaders.BlobContentLanguage = po.ContentLanguage
|
||||
opts.HTTPHeaders.BlobContentDisposition = po.ContentDisposition
|
||||
opts.HTTPHeaders.BlobContentType = po.ContentType
|
||||
if strings.HasSuffix(*po.Key, "/") {
|
||||
// Hardcode "application/x-directory" for direcoty objects
|
||||
opts.HTTPHeaders.BlobContentType = backend.GetStringPtr(backend.DirContentType)
|
||||
} else {
|
||||
opts.HTTPHeaders.BlobContentType = po.ContentType
|
||||
}
|
||||
|
||||
if opts.HTTPHeaders.BlobContentType == nil {
|
||||
opts.HTTPHeaders.BlobContentType = backend.GetStringPtr(string(defaultContentType))
|
||||
opts.HTTPHeaders.BlobContentType = backend.GetStringPtr(backend.DefaultContentType)
|
||||
}
|
||||
|
||||
uploadResp, err := az.client.UploadStream(ctx, *po.Bucket, *po.Key, po.Body, opts)
|
||||
if err != nil {
|
||||
return "", azureErrToS3Err(err)
|
||||
return s3response.PutObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
// Set object legal hold
|
||||
if po.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
err := az.PutObjectLegalHold(ctx, *po.Bucket, *po.Key, "", true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -304,15 +331,17 @@ func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (string,
|
||||
}
|
||||
retParsed, err := json.Marshal(retention)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse object lock retention: %w", err)
|
||||
return s3response.PutObjectOutput{}, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", true, retParsed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return string(*uploadResp.ETag), nil
|
||||
return s3response.PutObjectOutput{
|
||||
ETag: string(*uploadResp.ETag),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutBucketTagging(ctx context.Context, bucket string, tags map[string]string) error {
|
||||
@@ -334,11 +363,11 @@ func (az *Azure) GetBucketTagging(ctx context.Context, bucket string) (map[strin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tags map[string]string
|
||||
if len(tagsJson) == 0 {
|
||||
return tags, nil
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)
|
||||
}
|
||||
|
||||
var tags map[string]string
|
||||
err = json.Unmarshal(tagsJson, &tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -377,7 +406,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
|
||||
|
||||
contentType := blobDownloadResponse.ContentType
|
||||
if contentType == nil {
|
||||
contentType = backend.GetStringPtr(defaultContentType)
|
||||
contentType = backend.GetStringPtr(backend.DefaultContentType)
|
||||
}
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
@@ -391,6 +420,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
|
||||
TagCount: &tagcount,
|
||||
ContentRange: blobDownloadResponse.ContentRange,
|
||||
Body: blobDownloadResponse.Body,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -419,6 +449,7 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
ContentLength: block.Size,
|
||||
ETag: block.Name,
|
||||
PartsCount: &partsCount,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
@@ -447,6 +478,7 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
LastModified: resp.LastModified,
|
||||
Metadata: parseAzMetadata(resp.Metadata),
|
||||
Expires: resp.ExpiresOn,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}
|
||||
|
||||
status, ok := resp.Metadata[string(keyObjLegalHold)]
|
||||
@@ -475,64 +507,31 @@ func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAtt
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
})
|
||||
if err == nil {
|
||||
return s3response.GetObjectAttributesResult{
|
||||
ETag: data.ETag,
|
||||
LastModified: data.LastModified,
|
||||
ObjectSize: data.ContentLength,
|
||||
StorageClass: data.StorageClass,
|
||||
VersionId: data.VersionId,
|
||||
}, nil
|
||||
}
|
||||
if !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
return s3response.GetObjectAttributesResult{}, err
|
||||
}
|
||||
|
||||
resp, err := az.ListParts(ctx, &s3.ListPartsInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
PartNumberMarker: input.PartNumberMarker,
|
||||
MaxParts: input.MaxParts,
|
||||
})
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchUpload)) {
|
||||
return s3response.GetObjectAttributesResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.GetObjectAttributesResult{}, err
|
||||
}
|
||||
|
||||
parts := []types.ObjectPart{}
|
||||
|
||||
for _, p := range resp.Parts {
|
||||
partNumber := int32(p.PartNumber)
|
||||
size := p.Size
|
||||
|
||||
parts = append(parts, types.ObjectPart{
|
||||
Size: &size,
|
||||
PartNumber: &partNumber,
|
||||
})
|
||||
}
|
||||
|
||||
//TODO: handle PartsCount prop
|
||||
return s3response.GetObjectAttributesResult{
|
||||
ObjectParts: &s3response.ObjectParts{
|
||||
IsTruncated: resp.IsTruncated,
|
||||
MaxParts: resp.MaxParts,
|
||||
PartNumberMarker: resp.PartNumberMarker,
|
||||
NextPartNumberMarker: resp.NextPartNumberMarker,
|
||||
Parts: parts,
|
||||
},
|
||||
ETag: data.ETag,
|
||||
LastModified: data.LastModified,
|
||||
ObjectSize: data.ContentLength,
|
||||
StorageClass: data.StorageClass,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{
|
||||
client, err := az.getContainerClient(*input.Bucket)
|
||||
if err != nil {
|
||||
return s3response.ListObjectsResult{}, nil
|
||||
}
|
||||
pager := client.NewListBlobsHierarchyPager(*input.Delimiter, &container.ListBlobsHierarchyOptions{
|
||||
Marker: input.Marker,
|
||||
MaxResults: input.MaxKeys,
|
||||
Prefix: input.Prefix,
|
||||
})
|
||||
|
||||
var objects []s3response.Object
|
||||
var cPrefixes []types.CommonPrefix
|
||||
var nextMarker *string
|
||||
var isTruncated bool
|
||||
var maxKeys int32 = math.MaxInt32
|
||||
@@ -547,13 +546,10 @@ Pager:
|
||||
if err != nil {
|
||||
return s3response.ListObjectsResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
for _, v := range resp.Segment.BlobItems {
|
||||
if nextMarker == nil && *resp.NextMarker != "" {
|
||||
nextMarker = resp.NextMarker
|
||||
if len(objects)+len(cPrefixes) >= int(maxKeys) {
|
||||
nextMarker = objects[len(objects)-1].Key
|
||||
isTruncated = true
|
||||
}
|
||||
if len(objects) >= int(maxKeys) {
|
||||
break Pager
|
||||
}
|
||||
objects = append(objects, s3response.Object{
|
||||
@@ -561,7 +557,20 @@ Pager:
|
||||
Key: v.Name,
|
||||
LastModified: v.Properties.LastModified,
|
||||
Size: v.Properties.ContentLength,
|
||||
StorageClass: types.ObjectStorageClass(*v.Properties.AccessTier),
|
||||
StorageClass: types.ObjectStorageClassStandard,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Segment.BlobPrefixes {
|
||||
if *v.Name <= *input.Marker {
|
||||
continue
|
||||
}
|
||||
if len(objects)+len(cPrefixes) >= int(maxKeys) {
|
||||
nextMarker = cPrefixes[len(cPrefixes)-1].Prefix
|
||||
isTruncated = true
|
||||
break Pager
|
||||
}
|
||||
cPrefixes = append(cPrefixes, types.CommonPrefix{
|
||||
Prefix: v.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -569,14 +578,15 @@ Pager:
|
||||
// TODO: generate common prefixes when appropriate
|
||||
|
||||
return s3response.ListObjectsResult{
|
||||
Contents: objects,
|
||||
Marker: input.Marker,
|
||||
MaxKeys: input.MaxKeys,
|
||||
Name: input.Bucket,
|
||||
NextMarker: nextMarker,
|
||||
Prefix: input.Prefix,
|
||||
IsTruncated: &isTruncated,
|
||||
Delimiter: input.Delimiter,
|
||||
Contents: objects,
|
||||
Marker: input.Marker,
|
||||
MaxKeys: input.MaxKeys,
|
||||
Name: input.Bucket,
|
||||
NextMarker: nextMarker,
|
||||
Prefix: input.Prefix,
|
||||
IsTruncated: &isTruncated,
|
||||
Delimiter: input.Delimiter,
|
||||
CommonPrefixes: cPrefixes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -587,13 +597,18 @@ func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input
|
||||
} else {
|
||||
marker = *input.StartAfter
|
||||
}
|
||||
pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{
|
||||
client, err := az.getContainerClient(*input.Bucket)
|
||||
if err != nil {
|
||||
return s3response.ListObjectsV2Result{}, nil
|
||||
}
|
||||
pager := client.NewListBlobsHierarchyPager(*input.Delimiter, &container.ListBlobsHierarchyOptions{
|
||||
Marker: &marker,
|
||||
MaxResults: input.MaxKeys,
|
||||
Prefix: input.Prefix,
|
||||
})
|
||||
|
||||
var objects []s3response.Object
|
||||
var cPrefixes []types.CommonPrefix
|
||||
var nextMarker *string
|
||||
var isTruncated bool
|
||||
var maxKeys int32 = math.MaxInt32
|
||||
@@ -609,26 +624,34 @@ Pager:
|
||||
return s3response.ListObjectsV2Result{}, azureErrToS3Err(err)
|
||||
}
|
||||
for _, v := range resp.Segment.BlobItems {
|
||||
if nextMarker == nil && *resp.NextMarker != "" {
|
||||
nextMarker = resp.NextMarker
|
||||
if len(objects)+len(cPrefixes) >= int(maxKeys) {
|
||||
nextMarker = objects[len(objects)-1].Key
|
||||
isTruncated = true
|
||||
}
|
||||
if len(objects) >= int(maxKeys) {
|
||||
break Pager
|
||||
}
|
||||
nextMarker = resp.NextMarker
|
||||
objects = append(objects, s3response.Object{
|
||||
ETag: (*string)(v.Properties.ETag),
|
||||
Key: v.Name,
|
||||
LastModified: v.Properties.LastModified,
|
||||
Size: v.Properties.ContentLength,
|
||||
StorageClass: types.ObjectStorageClass(*v.Properties.AccessTier),
|
||||
StorageClass: types.ObjectStorageClassStandard,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Segment.BlobPrefixes {
|
||||
if *v.Name <= marker {
|
||||
continue
|
||||
}
|
||||
if len(objects)+len(cPrefixes) >= int(maxKeys) {
|
||||
nextMarker = cPrefixes[len(cPrefixes)-1].Prefix
|
||||
isTruncated = true
|
||||
break Pager
|
||||
}
|
||||
cPrefixes = append(cPrefixes, types.CommonPrefix{
|
||||
Prefix: v.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: generate common prefixes when appropriate
|
||||
|
||||
return s3response.ListObjectsV2Result{
|
||||
Contents: objects,
|
||||
ContinuationToken: input.ContinuationToken,
|
||||
@@ -638,25 +661,26 @@ Pager:
|
||||
Prefix: input.Prefix,
|
||||
IsTruncated: &isTruncated,
|
||||
Delimiter: input.Delimiter,
|
||||
CommonPrefixes: cPrefixes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) error {
|
||||
func (az *Azure) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
_, err := az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil)
|
||||
if err != nil {
|
||||
azerr, ok := err.(*azcore.ResponseError)
|
||||
if ok && azerr.StatusCode == 404 {
|
||||
// if the object does not exist, S3 returns success
|
||||
return nil
|
||||
return &s3.DeleteObjectOutput{}, nil
|
||||
}
|
||||
}
|
||||
return azureErrToS3Err(err)
|
||||
return &s3.DeleteObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
|
||||
delResult, errs := []types.DeletedObject{}, []types.Error{}
|
||||
for _, obj := range input.Delete.Objects {
|
||||
err := az.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
_, err := az.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: obj.Key,
|
||||
})
|
||||
@@ -687,13 +711,21 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput
|
||||
}
|
||||
|
||||
func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
mdmap, err := az.getContainerMetaDataMap(ctx, *input.Bucket)
|
||||
bclient, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource && isMetaSame(mdmap, input.Metadata) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource {
|
||||
props, err := bclient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
mdmap := props.Metadata
|
||||
if isMetaSame(mdmap, input.Metadata) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
}
|
||||
}
|
||||
|
||||
tags, err := parseTags(input.Tagging)
|
||||
@@ -701,11 +733,6 @@ func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bclient, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := bclient.CopyFromURL(ctx, az.serviceURL+"/"+*input.CopySource, &blob.CopyFromURLOptions{
|
||||
BlobTags: tags,
|
||||
Metadata: parseMetadata(input.Metadata),
|
||||
@@ -765,26 +792,97 @@ func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string)
|
||||
}
|
||||
|
||||
func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
// Multipart upload starts with UploadPart action so there is no
|
||||
// correlating function for creating mutlipart uploads.
|
||||
// TODO: since azure only allows for a single multipart upload
|
||||
// for an object name at a time, we need to send an error back to
|
||||
// the client if there is already an outstanding upload in progress
|
||||
// for this object.
|
||||
// Alternatively, is there something we can do with upload ids to
|
||||
// keep concurrent uploads unique still? I haven't found an efficient
|
||||
// way to rename final objects.
|
||||
if input.ObjectLockLegalHoldStatus != "" || input.ObjectLockMode != "" {
|
||||
bucketLock, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyBucketLock))
|
||||
if err != nil {
|
||||
return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
if len(bucketLock) == 0 {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
}
|
||||
|
||||
var bucketLockConfig auth.BucketLockConfig
|
||||
if err := json.Unmarshal(bucketLock, &bucketLockConfig); err != nil {
|
||||
return s3response.InitiateMultipartUploadResult{}, fmt.Errorf("parse bucket lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
}
|
||||
}
|
||||
|
||||
meta := parseMetadata(input.Metadata)
|
||||
meta[string(onameAttr)] = input.Key
|
||||
|
||||
// parse object tags
|
||||
tagsStr := getString(input.Tagging)
|
||||
tags := map[string]string{}
|
||||
if tagsStr != "" {
|
||||
tagParts := strings.Split(tagsStr, "&")
|
||||
for _, prt := range tagParts {
|
||||
p := strings.Split(prt, "=")
|
||||
if len(p) != 2 {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
if len(p[0]) > 128 || len(p[1]) > 256 {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
tags[p[0]] = p[1]
|
||||
}
|
||||
}
|
||||
|
||||
// set blob legal hold status in metadata
|
||||
if input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
meta[string(keyObjLegalHold)] = backend.GetStringPtr("1")
|
||||
}
|
||||
|
||||
// set blob retention date
|
||||
if input.ObjectLockMode != "" {
|
||||
retention := types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionMode(input.ObjectLockMode),
|
||||
RetainUntilDate: input.ObjectLockRetainUntilDate,
|
||||
}
|
||||
retParsed, err := json.Marshal(retention)
|
||||
if err != nil {
|
||||
return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retParsed))
|
||||
}
|
||||
|
||||
uploadId := uuid.New().String()
|
||||
|
||||
tmpPath := createMetaTmpPath(*input.Key, uploadId)
|
||||
|
||||
opts := &blockblob.UploadBufferOptions{
|
||||
Metadata: meta,
|
||||
Tags: tags,
|
||||
}
|
||||
if getString(input.ContentType) != "" {
|
||||
opts.HTTPHeaders = &blob.HTTPHeaders{
|
||||
BlobContentType: input.ContentType,
|
||||
BlobContentEncoding: input.ContentEncoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create and empty blob in .sgwtmp/multipart/<uploadId>/<object hash>
|
||||
// The blob indicates multipart upload initialization and holds the mp metadata
|
||||
// e.g tagging, content-type, metadata, object lock status ...
|
||||
_, err := az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts)
|
||||
if err != nil {
|
||||
return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return s3response.InitiateMultipartUploadResult{
|
||||
Bucket: *input.Bucket,
|
||||
Key: *input.Key,
|
||||
UploadId: *input.Key,
|
||||
UploadId: uploadId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Each part is translated into an uncommitted block in a newly created blob in staging area
|
||||
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -797,6 +895,11 @@ func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (eta
|
||||
return "", err
|
||||
}
|
||||
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// block id serves as etag here
|
||||
etag = blockIDInt32ToBase64(*input.PartNumber)
|
||||
_, err = client.StageBlock(ctx, etag, rdr, nil)
|
||||
@@ -813,10 +916,14 @@ func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInp
|
||||
return s3response.CopyObjectResult{}, nil
|
||||
}
|
||||
|
||||
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
|
||||
return s3response.CopyObjectResult{}, err
|
||||
}
|
||||
|
||||
eTag := blockIDInt32ToBase64(*input.PartNumber)
|
||||
//TODO: handle block copy by range
|
||||
//TODO: the action returns not implemented on azurite, maybe in production this will work?
|
||||
// UploadId here is the source block id
|
||||
_, err = client.StageBlockFromURL(ctx, *input.UploadId, *input.CopySource, nil)
|
||||
_, err = client.StageBlockFromURL(ctx, eTag, *input.CopySource, nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, parseMpError(err)
|
||||
}
|
||||
@@ -826,15 +933,14 @@ func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInp
|
||||
|
||||
// Lists all uncommitted parts from the blob
|
||||
func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, nil
|
||||
}
|
||||
|
||||
resp, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, parseMpError(err)
|
||||
}
|
||||
var partNumberMarker int
|
||||
var nextPartNumberMarker int
|
||||
var maxParts int32 = math.MaxInt32
|
||||
@@ -850,13 +956,28 @@ func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3res
|
||||
maxParts = *input.MaxParts
|
||||
}
|
||||
|
||||
resp, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
|
||||
if err != nil {
|
||||
// If the mp exists but the client returns 'NoSuchKey' error, return empty result
|
||||
if errors.Is(azureErrToS3Err(err), s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
return s3response.ListPartsResult{
|
||||
Bucket: *input.Bucket,
|
||||
Key: *input.Key,
|
||||
PartNumberMarker: partNumberMarker,
|
||||
IsTruncated: isTruncated,
|
||||
MaxParts: int(maxParts),
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
parts := []s3response.Part{}
|
||||
for _, el := range resp.UncommittedBlocks {
|
||||
partNumber, err := decodeBlockId(*el.Name)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
if partNumberMarker != 0 && partNumberMarker >= partNumber {
|
||||
if partNumberMarker >= partNumber {
|
||||
continue
|
||||
}
|
||||
parts = append(parts, s3response.Part{
|
||||
@@ -879,29 +1000,29 @@ func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3res
|
||||
PartNumberMarker: partNumberMarker,
|
||||
IsTruncated: isTruncated,
|
||||
MaxParts: int(maxParts),
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Lists all block blobs, which has uncommitted blocks
|
||||
// Lists all the multipart uploads initiated with .sgwtmp/multipart prefix
|
||||
func (az *Azure) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
client, err := az.getContainerClient(*input.Bucket)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, err
|
||||
}
|
||||
pager := client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
||||
Include: container.ListBlobsInclude{UncommittedBlobs: true},
|
||||
Marker: input.KeyMarker,
|
||||
Prefix: input.Prefix,
|
||||
})
|
||||
|
||||
var maxUploads int32
|
||||
if input.MaxUploads != nil {
|
||||
maxUploads = *input.MaxUploads
|
||||
}
|
||||
isTruncated := false
|
||||
nextKeyMarker := ""
|
||||
uploads := []s3response.Upload{}
|
||||
breakFlag := false
|
||||
|
||||
var uploadIDMarker string
|
||||
if input.UploadIdMarker != nil {
|
||||
uploadIDMarker = *input.UploadIdMarker
|
||||
}
|
||||
uploadIdMarkerFound := false
|
||||
prefix := string(metaTmpMultipartPrefix)
|
||||
|
||||
pager := client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
||||
Prefix: &prefix,
|
||||
})
|
||||
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
@@ -909,49 +1030,131 @@ func (az *Azure) ListMultipartUploads(ctx context.Context, input *s3.ListMultipa
|
||||
return s3response.ListMultipartUploadsResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
for _, el := range resp.Segment.BlobItems {
|
||||
if el.Properties.AccessTier == nil {
|
||||
if len(uploads) >= int(*input.MaxUploads) && maxUploads != 0 {
|
||||
breakFlag = true
|
||||
nextKeyMarker = *el.Name
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
uploads = append(uploads, s3response.Upload{
|
||||
Key: *el.Name,
|
||||
Initiated: *el.Properties.CreationTime,
|
||||
})
|
||||
key, ok := el.Metadata[string(onameAttrLower)]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if breakFlag {
|
||||
break
|
||||
if *key <= *input.KeyMarker {
|
||||
continue
|
||||
}
|
||||
if input.Prefix != nil && !strings.HasPrefix(*key, *input.Prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Clean(*el.Name)
|
||||
parts := strings.Split(path, "/")
|
||||
uploadId := parts[2]
|
||||
|
||||
uploads = append(uploads, s3response.Upload{
|
||||
Key: *key,
|
||||
Initiated: *el.Properties.CreationTime,
|
||||
UploadID: uploadId,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
})
|
||||
}
|
||||
}
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Uploads: uploads,
|
||||
Bucket: *input.Bucket,
|
||||
KeyMarker: *input.KeyMarker,
|
||||
NextKeyMarker: nextKeyMarker,
|
||||
MaxUploads: int(maxUploads),
|
||||
Prefix: *input.Prefix,
|
||||
IsTruncated: isTruncated,
|
||||
Delimiter: *input.Delimiter,
|
||||
}, nil
|
||||
maxUploads := 1000
|
||||
if input.MaxUploads != nil {
|
||||
maxUploads = int(*input.MaxUploads)
|
||||
}
|
||||
if *input.KeyMarker != "" && uploadIDMarker != "" && !uploadIdMarkerFound {
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: *input.Bucket,
|
||||
Delimiter: *input.Delimiter,
|
||||
KeyMarker: *input.KeyMarker,
|
||||
MaxUploads: maxUploads,
|
||||
Prefix: *input.Prefix,
|
||||
UploadIDMarker: *input.UploadIdMarker,
|
||||
Uploads: []s3response.Upload{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
sort.SliceStable(uploads, func(i, j int) bool {
|
||||
return uploads[i].Key < uploads[j].Key
|
||||
})
|
||||
|
||||
if *input.KeyMarker != "" && *input.UploadIdMarker != "" {
|
||||
// the uploads are already filtered by keymarker
|
||||
// filter the uploads by uploadIdMarker
|
||||
for i, upl := range uploads {
|
||||
if upl.UploadID == uploadIDMarker {
|
||||
uploads = uploads[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(uploads) <= maxUploads {
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: *input.Bucket,
|
||||
Delimiter: *input.Delimiter,
|
||||
KeyMarker: *input.KeyMarker,
|
||||
MaxUploads: maxUploads,
|
||||
Prefix: *input.Prefix,
|
||||
UploadIDMarker: *input.UploadIdMarker,
|
||||
Uploads: uploads,
|
||||
}, nil
|
||||
} else {
|
||||
resUploads := uploads[:maxUploads]
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: *input.Bucket,
|
||||
Delimiter: *input.Delimiter,
|
||||
KeyMarker: *input.KeyMarker,
|
||||
NextKeyMarker: resUploads[len(resUploads)-1].Key,
|
||||
MaxUploads: maxUploads,
|
||||
Prefix: *input.Prefix,
|
||||
UploadIDMarker: *input.UploadIdMarker,
|
||||
NextUploadIDMarker: resUploads[len(resUploads)-1].UploadID,
|
||||
IsTruncated: true,
|
||||
Uploads: resUploads,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Deletes the block blob with committed/uncommitted blocks
|
||||
// Cleans up the initiated multipart upload in .sgwtmp namespace
|
||||
func (az *Azure) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
|
||||
// TODO: need to verify this blob has uncommitted blocks?
|
||||
_, err := az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil)
|
||||
tmpPath := createMetaTmpPath(*input.Key, *input.UploadId)
|
||||
_, err := az.client.DeleteBlob(ctx, *input.Bucket, tmpPath, nil)
|
||||
if err != nil {
|
||||
return parseMpError(err)
|
||||
}
|
||||
|
||||
// Cleanup the uploaded parts
|
||||
_, err = az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil)
|
||||
if err != nil {
|
||||
err = azureErrToS3Err(err)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commits all the uncommitted blocks inside the block blob
|
||||
// And moves the block blob from staging area into the blobs list
|
||||
// And moves the block blob from staging area into the blobs list.
|
||||
// Copeies the multipart metadata from .sgwtmp namespace into the newly created blob
|
||||
// Deletes the multipart upload 'blob' from .sgwtmp namespace
|
||||
// It indicates the end of the multipart upload
|
||||
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
tmpPath := createMetaTmpPath(*input.Key, *input.UploadId)
|
||||
blobClient, err := az.getBlobClient(*input.Bucket, tmpPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
props, err := blobClient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
}
|
||||
tags, err := blobClient.GetTags(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
}
|
||||
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -988,7 +1191,22 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
|
||||
blockIds = append(blockIds, *block.Name)
|
||||
}
|
||||
|
||||
resp, err := client.CommitBlockList(ctx, blockIds, nil)
|
||||
opts := &blockblob.CommitBlockListOptions{
|
||||
Metadata: props.Metadata,
|
||||
Tags: parseAzTags(tags.BlobTagSet),
|
||||
}
|
||||
opts.HTTPHeaders = &blob.HTTPHeaders{
|
||||
BlobContentType: props.ContentType,
|
||||
BlobContentEncoding: props.ContentEncoding,
|
||||
}
|
||||
|
||||
resp, err := client.CommitBlockList(ctx, blockIds, opts)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
}
|
||||
|
||||
// cleanup the multipart upload
|
||||
_, err = blobClient.Delete(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
}
|
||||
@@ -1313,9 +1531,15 @@ func parseAzMetadata(m map[string]*string) map[string]string {
|
||||
return nil
|
||||
}
|
||||
|
||||
keywords := keyTags.Table()
|
||||
|
||||
meta := make(map[string]string)
|
||||
|
||||
for k, v := range m {
|
||||
_, ok := keywords[strings.ToLower(k)]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
meta[k] = *v
|
||||
}
|
||||
return meta
|
||||
@@ -1427,20 +1651,6 @@ func (az *Azure) getContainerMetaData(ctx context.Context, bucket, key string) (
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (az *Azure) getContainerMetaDataMap(ctx context.Context, bucket string) (map[string]*string, error) {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return props.Metadata, nil
|
||||
}
|
||||
|
||||
func (az *Azure) setContainerMetaData(ctx context.Context, bucket, key string, value []byte) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
@@ -1517,7 +1727,7 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
|
||||
}
|
||||
|
||||
func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool {
|
||||
if len(azMeta) != len(awsMeta)+1 {
|
||||
if len(azMeta) != len(awsMeta) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1533,3 +1743,24 @@ func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool {
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func createMetaTmpPath(obj, uploadId string) string {
|
||||
objNameSum := sha256.Sum256([]byte(obj))
|
||||
return filepath.Join(string(metaTmpMultipartPrefix), uploadId, fmt.Sprintf("%x", objNameSum))
|
||||
}
|
||||
|
||||
// Checks if the multipart upload existis with the given bucket, key and uploadId
|
||||
func (az *Azure) checkIfMpExists(ctx context.Context, bucket, obj, uploadId string) error {
|
||||
tmpPath := createMetaTmpPath(obj, uploadId)
|
||||
blobClient, err := az.getBlobClient(bucket, tmpPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = blobClient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ type Backend interface {
|
||||
CreateBucket(_ context.Context, _ *s3.CreateBucketInput, defaultACL []byte) error
|
||||
PutBucketAcl(_ context.Context, bucket string, data []byte) error
|
||||
DeleteBucket(context.Context, *s3.DeleteBucketInput) error
|
||||
PutBucketVersioning(context.Context, *s3.PutBucketVersioningInput) error
|
||||
PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error
|
||||
GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error)
|
||||
PutBucketPolicy(_ context.Context, bucket string, policy []byte) error
|
||||
GetBucketPolicy(_ context.Context, bucket string) ([]byte, error)
|
||||
@@ -57,7 +57,7 @@ type Backend interface {
|
||||
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
|
||||
|
||||
// standard object operations
|
||||
PutObject(context.Context, *s3.PutObjectInput) (string, error)
|
||||
PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error)
|
||||
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
@@ -65,10 +65,10 @@ type Backend interface {
|
||||
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
|
||||
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
|
||||
DeleteObject(context.Context, *s3.DeleteObjectInput) error
|
||||
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteResult, error)
|
||||
PutObjectAcl(context.Context, *s3.PutObjectAclInput) error
|
||||
ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
|
||||
ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error)
|
||||
|
||||
// special case object operations
|
||||
RestoreObject(context.Context, *s3.RestoreObjectInput) error
|
||||
@@ -126,7 +126,7 @@ func (BackendUnsupported) PutBucketAcl(_ context.Context, bucket string, data []
|
||||
func (BackendUnsupported) DeleteBucket(context.Context, *s3.DeleteBucketInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketVersioning(context.Context, *s3.PutBucketVersioningInput) error {
|
||||
func (BackendUnsupported) PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
|
||||
@@ -173,8 +173,8 @@ func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInpu
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (string, error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
@@ -197,8 +197,8 @@ func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3
|
||||
func (BackendUnsupported) ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObject(context.Context, *s3.DeleteObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
|
||||
return s3response.DeleteResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
@@ -225,8 +225,8 @@ func (BackendUnsupported) SelectObjectContent(ctx context.Context, input *s3.Sel
|
||||
}
|
||||
}
|
||||
|
||||
func (BackendUnsupported) ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) ListObjectVersions(context.Context, *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
return s3response.ListVersionsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetBucketTagging(_ context.Context, bucket string) (map[string]string, error) {
|
||||
|
||||
@@ -30,6 +30,12 @@ import (
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
const (
|
||||
// this is the media type for directories in AWS and Nextcloud
|
||||
DirContentType = "application/x-directory"
|
||||
DefaultContentType = "binary/octet-stream"
|
||||
)
|
||||
|
||||
func IsValidBucketName(name string) bool { return true }
|
||||
|
||||
type ByBucketName []s3response.ListAllMyBucketsEntry
|
||||
@@ -96,6 +102,32 @@ func ParseRange(size int64, acceptRange string) (int64, int64, error) {
|
||||
return startOffset, endOffset - startOffset + 1, nil
|
||||
}
|
||||
|
||||
// ParseCopySource parses x-amz-copy-source header and returns source bucket,
|
||||
// source object, versionId, error respectively
|
||||
func ParseCopySource(copySourceHeader string) (string, string, string, error) {
|
||||
if copySourceHeader[0] == '/' {
|
||||
copySourceHeader = copySourceHeader[1:]
|
||||
}
|
||||
|
||||
cSplitted := strings.Split(copySourceHeader, "?")
|
||||
copySource := cSplitted[0]
|
||||
var versionId string
|
||||
if len(cSplitted) > 1 {
|
||||
versionIdParts := strings.Split(cSplitted[1], "=")
|
||||
if len(versionIdParts) != 2 || versionIdParts[0] != "versionId" {
|
||||
return "", "", "", s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
versionId = versionIdParts[1]
|
||||
}
|
||||
|
||||
srcBucket, srcObject, ok := strings.Cut(copySource, "/")
|
||||
if !ok {
|
||||
return "", "", "", s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
|
||||
return srcBucket, srcObject, versionId, nil
|
||||
}
|
||||
|
||||
func CreateExceedingRangeErr(objSize int64) s3err.APIError {
|
||||
return s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -161,6 +161,48 @@ func (s *S3Proxy) DeleteBucketOwnershipControls(ctx context.Context, bucket stri
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutBucketVersioning(ctx context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
_, err := s.client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
|
||||
Bucket: &bucket,
|
||||
VersioningConfiguration: &types.VersioningConfiguration{
|
||||
Status: status,
|
||||
},
|
||||
})
|
||||
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
|
||||
out, err := s.client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
out, err := s.client.ListObjectVersions(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.ListVersionsResult{}, handleError(err)
|
||||
}
|
||||
|
||||
return s3response.ListVersionsResult{
|
||||
CommonPrefixes: out.CommonPrefixes,
|
||||
DeleteMarkers: out.DeleteMarkers,
|
||||
Delimiter: out.Delimiter,
|
||||
EncodingType: out.EncodingType,
|
||||
IsTruncated: out.IsTruncated,
|
||||
KeyMarker: out.KeyMarker,
|
||||
MaxKeys: out.MaxKeys,
|
||||
Name: out.Name,
|
||||
NextKeyMarker: out.NextKeyMarker,
|
||||
NextVersionIdMarker: out.NextVersionIdMarker,
|
||||
Prefix: out.Prefix,
|
||||
VersionIdMarker: input.VersionIdMarker,
|
||||
Versions: out.Versions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
|
||||
out, err := s.client.CreateMultipartUpload(ctx, input)
|
||||
if err != nil {
|
||||
@@ -304,17 +346,25 @@ func (s *S3Proxy) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyIn
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObject(ctx context.Context, input *s3.PutObjectInput) (string, error) {
|
||||
func (s *S3Proxy) PutObject(ctx context.Context, input *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
// streaming backend is not seekable,
|
||||
// use unsigned payload for streaming ops
|
||||
output, err := s.client.PutObject(ctx, input, s3.WithAPIOptions(
|
||||
v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware,
|
||||
))
|
||||
if err != nil {
|
||||
return "", handleError(err)
|
||||
return s3response.PutObjectOutput{}, handleError(err)
|
||||
}
|
||||
|
||||
return *output.ETag, nil
|
||||
var versionID string
|
||||
if output.VersionId != nil {
|
||||
versionID = *output.VersionId
|
||||
}
|
||||
|
||||
return s3response.PutObjectOutput{
|
||||
ETag: *output.ETag,
|
||||
VersionID: versionID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
@@ -416,9 +466,9 @@ func (s *S3Proxy) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Inpu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) error {
|
||||
_, err := s.client.DeleteObject(ctx, input)
|
||||
return handleError(err)
|
||||
func (s *S3Proxy) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
res, err := s.client.DeleteObject(ctx, input)
|
||||
return res, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
|
||||
|
||||
211
backend/walk.go
211
backend/walk.go
@@ -191,12 +191,19 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
|
||||
|
||||
// Common prefixes are a set, so should not have duplicates.
|
||||
// These are abstractly a "directory", so need to include the
|
||||
// delimiter at the end.
|
||||
// delimiter at the end when we add to the map.
|
||||
cprefNoDelim := prefix + before
|
||||
cpref := prefix + before + delimiter
|
||||
if cpref == marker {
|
||||
pastMarker = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if marker != "" && strings.HasPrefix(marker, cprefNoDelim) {
|
||||
// skip common prefixes that are before the marker
|
||||
return nil
|
||||
}
|
||||
|
||||
cpmap[cpref] = struct{}{}
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
newMarker = cpref
|
||||
@@ -239,3 +246,205 @@ func contains(a string, strs []string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type WalkVersioningResults struct {
|
||||
CommonPrefixes []types.CommonPrefix
|
||||
ObjectVersions []types.ObjectVersion
|
||||
DelMarkers []types.DeleteMarkerEntry
|
||||
Truncated bool
|
||||
NextMarker string
|
||||
NextVersionIdMarker string
|
||||
}
|
||||
|
||||
type ObjVersionFuncResult struct {
|
||||
ObjectVersions []types.ObjectVersion
|
||||
DelMarkers []types.DeleteMarkerEntry
|
||||
NextVersionIdMarker string
|
||||
Truncated bool
|
||||
}
|
||||
|
||||
type GetVersionsFunc func(path, versionIdMarker string, pastVersionIdMarker *bool, availableObjCount int, d fs.DirEntry) (*ObjVersionFuncResult, error)
|
||||
|
||||
// WalkVersions walks the supplied fs.FS and returns results compatible with
|
||||
// ListObjectVersions action response
|
||||
func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyMarker, versionIdMarker string, max int, getObj GetVersionsFunc, skipdirs []string) (WalkVersioningResults, error) {
|
||||
cpmap := make(map[string]struct{})
|
||||
var objects []types.ObjectVersion
|
||||
var delMarkers []types.DeleteMarkerEntry
|
||||
|
||||
var pastMarker bool
|
||||
if keyMarker == "" {
|
||||
pastMarker = true
|
||||
}
|
||||
var nextMarker string
|
||||
var nextVersionIdMarker string
|
||||
var truncated bool
|
||||
|
||||
pastVersionIdMarker := versionIdMarker == ""
|
||||
|
||||
err := fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Ignore the root directory
|
||||
if path == "." {
|
||||
return nil
|
||||
}
|
||||
if contains(d.Name(), skipdirs) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
if !pastMarker {
|
||||
if path == keyMarker {
|
||||
pastMarker = true
|
||||
}
|
||||
if path < keyMarker {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
// If prefix is defined and the directory does not match prefix,
|
||||
// do not descend into the directory because nothing will
|
||||
// match this prefix. Make sure to append the / at the end of
|
||||
// directories since this is implied as a directory path name.
|
||||
// If path is a prefix of prefix, then path could still be
|
||||
// building to match. So only skip if path isn't a prefix of prefix
|
||||
// and prefix isn't a prefix of path.
|
||||
if prefix != "" &&
|
||||
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
|
||||
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
|
||||
if err == ErrSkipObj {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("directory to object %q: %w", path, err)
|
||||
}
|
||||
objects = append(objects, res.ObjectVersions...)
|
||||
delMarkers = append(delMarkers, res.DelMarkers...)
|
||||
if res.Truncated {
|
||||
truncated = true
|
||||
nextMarker = path
|
||||
nextVersionIdMarker = res.NextVersionIdMarker
|
||||
return fs.SkipAll
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If object doesn't have prefix, don't include in results.
|
||||
if prefix != "" && !strings.HasPrefix(path, prefix) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if delimiter == "" {
|
||||
// If no delimiter specified, then all files with matching
|
||||
// prefix are included in results
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
|
||||
if err == ErrSkipObj {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("file to object %q: %w", path, err)
|
||||
}
|
||||
objects = append(objects, res.ObjectVersions...)
|
||||
delMarkers = append(delMarkers, res.DelMarkers...)
|
||||
if res.Truncated {
|
||||
truncated = true
|
||||
nextMarker = path
|
||||
nextVersionIdMarker = res.NextVersionIdMarker
|
||||
return fs.SkipAll
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since delimiter is specified, we only want results that
|
||||
// do not contain the delimiter beyond the prefix. If the
|
||||
// delimiter exists past the prefix, then the substring
|
||||
// between the prefix and delimiter is part of common prefixes.
|
||||
//
|
||||
// For example:
|
||||
// prefix = A/
|
||||
// delimiter = /
|
||||
// and objects:
|
||||
// A/file
|
||||
// A/B/file
|
||||
// B/C
|
||||
// would return:
|
||||
// objects: A/file
|
||||
// common prefix: A/B/
|
||||
//
|
||||
// Note: No objects are included past the common prefix since
|
||||
// these are all rolled up into the common prefix.
|
||||
// Note: The delimiter can be anything, so we have to operate on
|
||||
// the full path without any assumptions on posix directory hierarchy
|
||||
// here. Usually the delimiter will be "/", but thats not required.
|
||||
suffix := strings.TrimPrefix(path, prefix)
|
||||
before, _, found := strings.Cut(suffix, delimiter)
|
||||
if !found {
|
||||
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
|
||||
if err == ErrSkipObj {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("file to object %q: %w", path, err)
|
||||
}
|
||||
objects = append(objects, res.ObjectVersions...)
|
||||
delMarkers = append(delMarkers, res.DelMarkers...)
|
||||
|
||||
if res.Truncated {
|
||||
truncated = true
|
||||
nextMarker = path
|
||||
nextVersionIdMarker = res.NextVersionIdMarker
|
||||
return fs.SkipAll
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Common prefixes are a set, so should not have duplicates.
|
||||
// These are abstractly a "directory", so need to include the
|
||||
// delimiter at the end.
|
||||
cpmap[prefix+before+delimiter] = struct{}{}
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
nextMarker = path
|
||||
truncated = true
|
||||
|
||||
return fs.SkipAll
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return WalkVersioningResults{}, err
|
||||
}
|
||||
|
||||
var commonPrefixStrings []string
|
||||
for k := range cpmap {
|
||||
commonPrefixStrings = append(commonPrefixStrings, k)
|
||||
}
|
||||
sort.Strings(commonPrefixStrings)
|
||||
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
|
||||
for _, cp := range commonPrefixStrings {
|
||||
pfx := cp
|
||||
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
|
||||
Prefix: &pfx,
|
||||
})
|
||||
}
|
||||
|
||||
return WalkVersioningResults{
|
||||
CommonPrefixes: commonPrefixes,
|
||||
ObjectVersions: objects,
|
||||
DelMarkers: delMarkers,
|
||||
Truncated: truncated,
|
||||
NextMarker: nextMarker,
|
||||
NextVersionIdMarker: nextVersionIdMarker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
var (
|
||||
chownuid, chowngid bool
|
||||
bucketlinks bool
|
||||
versioningDir string
|
||||
)
|
||||
|
||||
func posixCommand() *cli.Command {
|
||||
@@ -61,6 +62,12 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
|
||||
EnvVars: []string{"VGW_BUCKET_LINKS"},
|
||||
Destination: &bucketlinks,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "versioning-dir",
|
||||
Usage: "the directory path to enable bucket versioning",
|
||||
EnvVars: []string{"VGW_VERSIONING_DIR"},
|
||||
Destination: &versioningDir,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -77,9 +84,10 @@ func runPosix(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
be, err := posix.New(gwroot, meta.XattrMeta{}, posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
|
||||
@@ -22,20 +22,21 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
awsID string
|
||||
awsSecret string
|
||||
endpoint string
|
||||
prefix string
|
||||
dstBucket string
|
||||
partSize int64
|
||||
objSize int64
|
||||
concurrency int
|
||||
files int
|
||||
totalReqs int
|
||||
upload bool
|
||||
download bool
|
||||
pathStyle bool
|
||||
checksumDisable bool
|
||||
awsID string
|
||||
awsSecret string
|
||||
endpoint string
|
||||
prefix string
|
||||
dstBucket string
|
||||
partSize int64
|
||||
objSize int64
|
||||
concurrency int
|
||||
files int
|
||||
totalReqs int
|
||||
upload bool
|
||||
download bool
|
||||
pathStyle bool
|
||||
checksumDisable bool
|
||||
versioningEnabled bool
|
||||
)
|
||||
|
||||
func testCommand() *cli.Command {
|
||||
@@ -87,6 +88,14 @@ func initTestCommands() []*cli.Command {
|
||||
Usage: "Tests the full flow of gateway.",
|
||||
Description: `Runs all the available tests to test the full flow of the gateway.`,
|
||||
Action: getAction(integration.TestFullFlow),
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "versioning-enabled",
|
||||
Usage: "Test the bucket object versioning, if the versioning is enabled",
|
||||
Destination: &versioningEnabled,
|
||||
Aliases: []string{"vs"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "posix",
|
||||
@@ -276,6 +285,9 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
if versioningEnabled {
|
||||
opts = append(opts, integration.WithVersioningEnabled())
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
tf(s)
|
||||
|
||||
39
go.mod
39
go.mod
@@ -7,8 +7,8 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.4
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2
|
||||
github.com/aws/smithy-go v1.20.4
|
||||
github.com/go-ldap/ldap/v3 v3.4.8
|
||||
github.com/gofiber/fiber/v2 v2.52.5
|
||||
@@ -16,13 +16,14 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/nats-io/nats.go v1.37.0
|
||||
github.com/oklog/ulid/v2 v2.1.0
|
||||
github.com/pkg/xattr v0.4.10
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
github.com/smira/go-statsd v1.3.3
|
||||
github.com/urfave/cli/v2 v2.27.4
|
||||
github.com/valyala/fasthttp v1.55.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sys v0.24.0
|
||||
golang.org/x/sys v0.25.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -30,11 +31,11 @@ require (
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
@@ -48,25 +49,25 @@ require (
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.26.0 // indirect
|
||||
golang.org/x/net v0.28.0 // indirect
|
||||
golang.org/x/text v0.17.0 // indirect
|
||||
golang.org/x/crypto v0.27.0 // indirect
|
||||
golang.org/x/net v0.29.0 // indirect
|
||||
golang.org/x/text v0.18.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.31
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.30
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
|
||||
79
go.sum
79
go.sum
@@ -21,42 +21,42 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15 h1:ijB7hr56MngOiELJe0C5aQRaBQ11LveNgWFyG02AUto=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15/go.mod h1:0QEmQSSWMVfiAk93l1/ayR9DQ9+jwni7gHS2NARZXB0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18 h1:9DIp7vhmOPmueCDwpXa45bEbLHHTt1kcxChdTJWWxvI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18/go.mod h1:aJv/Fwz8r56ozwYFRC4bzoeL1L17GYQYemfblOBux1M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 h1:mimdLQkIX1zr8GIPY1ZtALdBQGxcASiBd2MOp8m/dMc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 h1:Roo69qTpfu8OlJ2Tb7pAYVuF0CpuUMB0IYWwYP/4DZM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17/go.mod h1:NcWPxQzGM1USQggaTVwz6VpqMZPX1CvDJLDh6jnOCa4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 h1:GckUnpm4EJOAio1c8o25a+b3lVfwVzC9gnSBqiiNmZM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 h1:FLMkfEiRjhgeDTCjjLoc3URo/TBkgeQbocA78lfkzSI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19/go.mod h1:Vx+GucNSsdhaxs3aZIKfSUjKVGsxN25nX2SRcdhuw08=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 h1:u+EfGmksnJc/x5tq3A+OD7LrMbSSR/5TrKLvkdy/fhY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17/go.mod h1:VaMx6302JHax2vHJWgRo+5n9zvbacs3bLU/23DNQrTY=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 h1:Kp6PWAlXwP1UvIflkIP6MFZYBNDCa4mFCGtxrpICVOg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2/go.mod h1:5FmD/Dqq57gP+XwaUnd5WFPipAuzrf0HmupX27Gvjvc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o=
|
||||
github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
|
||||
github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
@@ -129,6 +129,9 @@ github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
|
||||
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
@@ -189,8 +192,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
@@ -206,8 +209,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -231,8 +234,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
|
||||
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -248,8 +251,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -5,9 +5,11 @@ rm -rf /tmp/gw
|
||||
mkdir /tmp/gw
|
||||
rm -rf /tmp/covdata
|
||||
mkdir /tmp/covdata
|
||||
rm -rf /tmp/versioningdir
|
||||
mkdir /tmp/versioningdir
|
||||
|
||||
# run server in background
|
||||
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
|
||||
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
|
||||
GW_PID=$!
|
||||
|
||||
# wait a second for server to start up
|
||||
@@ -21,7 +23,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow -vs; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_PID
|
||||
exit 1
|
||||
|
||||
@@ -53,7 +53,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
// panic("mock out the DeleteBucketTagging method")
|
||||
// },
|
||||
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error {
|
||||
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
// panic("mock out the DeleteObject method")
|
||||
// },
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) error {
|
||||
@@ -113,7 +113,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// ListMultipartUploadsFunc: func(contextMoqParam context.Context, listMultipartUploadsInput *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
// panic("mock out the ListMultipartUploads method")
|
||||
// },
|
||||
// ListObjectVersionsFunc: func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) {
|
||||
// ListObjectVersionsFunc: func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
// panic("mock out the ListObjectVersions method")
|
||||
// },
|
||||
// ListObjectsFunc: func(contextMoqParam context.Context, listObjectsInput *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
@@ -137,10 +137,10 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutBucketTaggingFunc: func(contextMoqParam context.Context, bucket string, tags map[string]string) error {
|
||||
// panic("mock out the PutBucketTagging method")
|
||||
// },
|
||||
// PutBucketVersioningFunc: func(contextMoqParam context.Context, putBucketVersioningInput *s3.PutBucketVersioningInput) error {
|
||||
// PutBucketVersioningFunc: func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
// panic("mock out the PutBucketVersioning method")
|
||||
// },
|
||||
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
// panic("mock out the PutObject method")
|
||||
// },
|
||||
// PutObjectAclFunc: func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error {
|
||||
@@ -214,7 +214,7 @@ type BackendMock struct {
|
||||
DeleteBucketTaggingFunc func(contextMoqParam context.Context, bucket string) error
|
||||
|
||||
// DeleteObjectFunc mocks the DeleteObject method.
|
||||
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error
|
||||
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
|
||||
// DeleteObjectTaggingFunc mocks the DeleteObjectTagging method.
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) error
|
||||
@@ -274,7 +274,7 @@ type BackendMock struct {
|
||||
ListMultipartUploadsFunc func(contextMoqParam context.Context, listMultipartUploadsInput *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
|
||||
|
||||
// ListObjectVersionsFunc mocks the ListObjectVersions method.
|
||||
ListObjectVersionsFunc func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
|
||||
ListObjectVersionsFunc func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error)
|
||||
|
||||
// ListObjectsFunc mocks the ListObjects method.
|
||||
ListObjectsFunc func(contextMoqParam context.Context, listObjectsInput *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
|
||||
@@ -298,10 +298,10 @@ type BackendMock struct {
|
||||
PutBucketTaggingFunc func(contextMoqParam context.Context, bucket string, tags map[string]string) error
|
||||
|
||||
// PutBucketVersioningFunc mocks the PutBucketVersioning method.
|
||||
PutBucketVersioningFunc func(contextMoqParam context.Context, putBucketVersioningInput *s3.PutBucketVersioningInput) error
|
||||
PutBucketVersioningFunc func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error
|
||||
|
||||
// PutObjectFunc mocks the PutObject method.
|
||||
PutObjectFunc func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error)
|
||||
PutObjectFunc func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error)
|
||||
|
||||
// PutObjectAclFunc mocks the PutObjectAcl method.
|
||||
PutObjectAclFunc func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error
|
||||
@@ -632,8 +632,10 @@ type BackendMock struct {
|
||||
PutBucketVersioning []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// PutBucketVersioningInput is the putBucketVersioningInput argument value.
|
||||
PutBucketVersioningInput *s3.PutBucketVersioningInput
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Status is the status argument value.
|
||||
Status types.BucketVersioningStatus
|
||||
}
|
||||
// PutObject holds details about calls to the PutObject method.
|
||||
PutObject []struct {
|
||||
@@ -1154,7 +1156,7 @@ func (mock *BackendMock) DeleteBucketTaggingCalls() []struct {
|
||||
}
|
||||
|
||||
// DeleteObject calls DeleteObjectFunc.
|
||||
func (mock *BackendMock) DeleteObject(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error {
|
||||
func (mock *BackendMock) DeleteObject(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
if mock.DeleteObjectFunc == nil {
|
||||
panic("BackendMock.DeleteObjectFunc: method is nil but Backend.DeleteObject was just called")
|
||||
}
|
||||
@@ -1898,7 +1900,7 @@ func (mock *BackendMock) ListMultipartUploadsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListObjectVersions calls ListObjectVersionsFunc.
|
||||
func (mock *BackendMock) ListObjectVersions(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) {
|
||||
func (mock *BackendMock) ListObjectVersions(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
if mock.ListObjectVersionsFunc == nil {
|
||||
panic("BackendMock.ListObjectVersionsFunc: method is nil but Backend.ListObjectVersions was just called")
|
||||
}
|
||||
@@ -2202,21 +2204,23 @@ func (mock *BackendMock) PutBucketTaggingCalls() []struct {
|
||||
}
|
||||
|
||||
// PutBucketVersioning calls PutBucketVersioningFunc.
|
||||
func (mock *BackendMock) PutBucketVersioning(contextMoqParam context.Context, putBucketVersioningInput *s3.PutBucketVersioningInput) error {
|
||||
func (mock *BackendMock) PutBucketVersioning(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
if mock.PutBucketVersioningFunc == nil {
|
||||
panic("BackendMock.PutBucketVersioningFunc: method is nil but Backend.PutBucketVersioning was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
PutBucketVersioningInput *s3.PutBucketVersioningInput
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Status types.BucketVersioningStatus
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
PutBucketVersioningInput: putBucketVersioningInput,
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Status: status,
|
||||
}
|
||||
mock.lockPutBucketVersioning.Lock()
|
||||
mock.calls.PutBucketVersioning = append(mock.calls.PutBucketVersioning, callInfo)
|
||||
mock.lockPutBucketVersioning.Unlock()
|
||||
return mock.PutBucketVersioningFunc(contextMoqParam, putBucketVersioningInput)
|
||||
return mock.PutBucketVersioningFunc(contextMoqParam, bucket, status)
|
||||
}
|
||||
|
||||
// PutBucketVersioningCalls gets all the calls that were made to PutBucketVersioning.
|
||||
@@ -2224,12 +2228,14 @@ func (mock *BackendMock) PutBucketVersioning(contextMoqParam context.Context, pu
|
||||
//
|
||||
// len(mockedBackend.PutBucketVersioningCalls())
|
||||
func (mock *BackendMock) PutBucketVersioningCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
PutBucketVersioningInput *s3.PutBucketVersioningInput
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Status types.BucketVersioningStatus
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
PutBucketVersioningInput *s3.PutBucketVersioningInput
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Status types.BucketVersioningStatus
|
||||
}
|
||||
mock.lockPutBucketVersioning.RLock()
|
||||
calls = mock.calls.PutBucketVersioning
|
||||
@@ -2238,7 +2244,7 @@ func (mock *BackendMock) PutBucketVersioningCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObject calls PutObjectFunc.
|
||||
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
if mock.PutObjectFunc == nil {
|
||||
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
|
||||
}
|
||||
|
||||
@@ -382,6 +382,11 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
action := auth.GetObjectAction
|
||||
if versionId != "" {
|
||||
action = auth.GetObjectVersionAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -390,7 +395,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.GetObjectAction,
|
||||
Action: action,
|
||||
})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
@@ -410,11 +415,23 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
VersionId: &versionId,
|
||||
})
|
||||
if err != nil {
|
||||
if res != nil {
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "x-amz-delete-marker",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Key: "Last-Modified",
|
||||
Value: res.LastModified.Format(timefmt),
|
||||
},
|
||||
})
|
||||
}
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionGetObject,
|
||||
Action: metrics.ActionHeadObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
@@ -478,6 +495,15 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
utils.SetMetaHeaders(ctx, res.Metadata)
|
||||
// Set other response headers
|
||||
utils.SetResponseHeaders(ctx, hdrs)
|
||||
// Set version id header
|
||||
if getstring(res.VersionId) != "" {
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "x-amz-version-id",
|
||||
Value: getstring(res.VersionId),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
status := http.StatusOK
|
||||
if acceptRange != "" {
|
||||
@@ -981,8 +1007,8 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
objectOwnership := types.ObjectOwnership(
|
||||
ctx.Get("X-Amz-Object-Ownership", string(types.ObjectOwnershipBucketOwnerEnforced)),
|
||||
)
|
||||
mfa := ctx.Get("X-Amz-Mfa")
|
||||
contentMD5 := ctx.Get("Content-MD5")
|
||||
// mfa := ctx.Get("X-Amz-Mfa")
|
||||
// contentMD5 := ctx.Get("Content-MD5")
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
|
||||
@@ -1136,13 +1162,21 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
err = c.be.PutBucketVersioning(ctx.Context(),
|
||||
&s3.PutBucketVersioningInput{
|
||||
Bucket: &bucket,
|
||||
MFA: &mfa,
|
||||
VersioningConfiguration: &versioningConf,
|
||||
ContentMD5: &contentMD5,
|
||||
})
|
||||
if versioningConf.Status != types.BucketVersioningStatusEnabled &&
|
||||
versioningConf.Status != types.BucketVersioningStatusSuspended {
|
||||
if c.debug {
|
||||
log.Printf("invalid versioning configuration status: %v\n", versioningConf.Status)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketVersioning,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
err = c.be.PutBucketVersioning(ctx.Context(), bucket, versioningConf.Status)
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -1530,6 +1564,8 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
contentType := ctx.Get("Content-Type")
|
||||
contentEncoding := ctx.Get("Content-Encoding")
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
tagging := ctx.Get("x-amz-tagging")
|
||||
|
||||
@@ -1814,6 +1850,14 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
ExpectedBucketOwner: &bucketOwner,
|
||||
CopySourceRange: ©SrcRange,
|
||||
})
|
||||
if err == nil && resp.CopySourceVersionId != "" {
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "x-amz-copy-source-version-id",
|
||||
Value: resp.CopySourceVersionId,
|
||||
},
|
||||
})
|
||||
}
|
||||
return SendXMLResponse(ctx, resp, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -2139,6 +2183,21 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
StorageClass: types.StorageClass(storageClass),
|
||||
})
|
||||
if err == nil {
|
||||
hdrs := []utils.CustomHeader{}
|
||||
if getstring(res.VersionId) != "" {
|
||||
hdrs = append(hdrs, utils.CustomHeader{
|
||||
Key: "x-amz-version-id",
|
||||
Value: getstring(res.VersionId),
|
||||
})
|
||||
}
|
||||
if getstring(res.CopySourceVersionId) != "" {
|
||||
hdrs = append(hdrs, utils.CustomHeader{
|
||||
Key: "x-amz-copy-source-version-id",
|
||||
Value: getstring(res.CopySourceVersionId),
|
||||
})
|
||||
}
|
||||
utils.SetResponseHeaders(ctx, hdrs)
|
||||
|
||||
return SendXMLResponse(ctx, res.CopyObjectResult, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -2230,11 +2289,13 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
ctx.Locals("logReqBody", false)
|
||||
etag, err := c.be.PutObject(ctx.Context(),
|
||||
res, err := c.be.PutObject(ctx.Context(),
|
||||
&s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
ContentLength: &contentLength,
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
Metadata: metadata,
|
||||
Body: body,
|
||||
Tagging: &tagging,
|
||||
@@ -2242,8 +2303,36 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
ObjectLockMode: objLock.ObjectLockMode,
|
||||
ObjectLockLegalHoldStatus: objLock.LegalHoldStatus,
|
||||
})
|
||||
ctx.Response().Header.Set("ETag", etag)
|
||||
return SendResponse(ctx, err,
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
ContentLength: contentLength,
|
||||
EvSender: c.evSender,
|
||||
Action: metrics.ActionPutObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
ObjectSize: contentLength,
|
||||
EventName: s3event.EventObjectCreatedPut,
|
||||
})
|
||||
}
|
||||
hdrs := []utils.CustomHeader{
|
||||
{
|
||||
Key: "ETag",
|
||||
Value: res.ETag,
|
||||
},
|
||||
}
|
||||
|
||||
if res.VersionID != "" {
|
||||
hdrs = append(hdrs, utils.CustomHeader{
|
||||
Key: "x-amz-version-id",
|
||||
Value: res.VersionID,
|
||||
})
|
||||
}
|
||||
|
||||
utils.SetResponseHeaders(ctx, hdrs)
|
||||
|
||||
return SendResponse(ctx, nil,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
@@ -2251,7 +2340,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
EvSender: c.evSender,
|
||||
Action: metrics.ActionPutObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
ObjectETag: &etag,
|
||||
ObjectETag: &res.ETag,
|
||||
ObjectSize: contentLength,
|
||||
EventName: s3event.EventObjectCreatedPut,
|
||||
})
|
||||
@@ -2565,6 +2654,8 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
//TODO: check s3:DeleteObjectVersion policy in case a use tries to delete a version of an object
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
@@ -2600,13 +2691,42 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
err = c.be.DeleteObject(ctx.Context(),
|
||||
res, err := c.be.DeleteObject(ctx.Context(),
|
||||
&s3.DeleteObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
return SendResponse(ctx, err,
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
EvSender: c.evSender,
|
||||
Action: metrics.ActionDeleteObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
EventName: s3event.EventObjectRemovedDelete,
|
||||
Status: http.StatusNoContent,
|
||||
})
|
||||
}
|
||||
|
||||
hdrs := []utils.CustomHeader{}
|
||||
if res.VersionId != nil && *res.VersionId != "" {
|
||||
hdrs = append(hdrs, utils.CustomHeader{
|
||||
Key: "x-amz-version-id",
|
||||
Value: *res.VersionId,
|
||||
})
|
||||
}
|
||||
if res.DeleteMarker != nil && *res.DeleteMarker {
|
||||
hdrs = append(hdrs, utils.CustomHeader{
|
||||
Key: "x-amz-delete-marker",
|
||||
Value: "true",
|
||||
})
|
||||
}
|
||||
|
||||
utils.SetResponseHeaders(ctx, hdrs)
|
||||
|
||||
return SendResponse(ctx, nil,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
@@ -2679,6 +2799,7 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
partNumberQuery := int32(ctx.QueryInt("partNumber", -1))
|
||||
versionId := ctx.Query("versionId")
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
if keyEnd != "" {
|
||||
@@ -2733,8 +2854,21 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
PartNumber: partNumber,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
if err != nil {
|
||||
if res != nil {
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "x-amz-delete-marker",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Key: "Last-Modified",
|
||||
Value: res.LastModified.Format(timefmt),
|
||||
},
|
||||
})
|
||||
}
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -2822,6 +2956,13 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
Value: contentType,
|
||||
})
|
||||
|
||||
if getstring(res.VersionId) != "" {
|
||||
headers = append(headers, utils.CustomHeader{
|
||||
Key: "x-amz-version-id",
|
||||
Value: getstring(res.VersionId),
|
||||
})
|
||||
}
|
||||
|
||||
utils.SetResponseHeaders(ctx, headers)
|
||||
|
||||
return SendResponse(ctx, nil,
|
||||
@@ -2842,6 +2983,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
contentType := ctx.Get("Content-Type")
|
||||
contentEncoding := ctx.Get("Content-Encoding")
|
||||
tagging := ctx.Get("X-Amz-Tagging")
|
||||
|
||||
if keyEnd != "" {
|
||||
@@ -3010,6 +3152,14 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
if getstring(res.VersionId) != "" {
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "x-amz-version-id",
|
||||
Value: getstring(res.VersionId),
|
||||
},
|
||||
})
|
||||
}
|
||||
return SendXMLResponse(ctx, res, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -3071,6 +3221,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
Key: &key,
|
||||
Tagging: &tagging,
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ObjectLockRetainUntilDate: &objLockState.RetainUntilDate,
|
||||
ObjectLockMode: objLockState.ObjectLockMode,
|
||||
ObjectLockLegalHoldStatus: objLockState.LegalHoldStatus,
|
||||
|
||||
@@ -385,8 +385,8 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
|
||||
return &s3.GetBucketVersioningOutput{}, nil
|
||||
},
|
||||
ListObjectVersionsFunc: func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) {
|
||||
return &s3.ListObjectVersionsOutput{}, nil
|
||||
ListObjectVersionsFunc: func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
return s3response.ListVersionsResult{}, nil
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
@@ -677,7 +677,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
PutBucketTaggingFunc: func(contextMoqParam context.Context, bucket string, tags map[string]string) error {
|
||||
return nil
|
||||
},
|
||||
PutBucketVersioningFunc: func(contextMoqParam context.Context, putBucketVersioningInput *s3.PutBucketVersioningInput) error {
|
||||
PutBucketVersioningFunc: func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
return nil
|
||||
},
|
||||
PutBucketPolicyFunc: func(contextMoqParam context.Context, bucket string, policy []byte) error {
|
||||
@@ -968,8 +968,8 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
CopyObjectResult: &types.CopyObjectResult{},
|
||||
}, nil
|
||||
},
|
||||
PutObjectFunc: func(context.Context, *s3.PutObjectInput) (string, error) {
|
||||
return "ETag", nil
|
||||
PutObjectFunc: func(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
return s3response.PutObjectOutput{}, nil
|
||||
},
|
||||
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (string, error) {
|
||||
return "hello", nil
|
||||
@@ -1383,8 +1383,8 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
DeleteObjectFunc: func(context.Context, *s3.DeleteObjectInput) error {
|
||||
return nil
|
||||
DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
return &s3.DeleteObjectOutput{}, nil
|
||||
},
|
||||
AbortMultipartUploadFunc: func(context.Context, *s3.AbortMultipartUploadInput) error {
|
||||
return nil
|
||||
@@ -1414,8 +1414,8 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
DeleteObjectFunc: func(context.Context, *s3.DeleteObjectInput) error {
|
||||
return s3err.GetAPIError(7)
|
||||
DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
},
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)
|
||||
|
||||
@@ -132,6 +132,8 @@ const (
|
||||
ErrMissingSecurityHeader
|
||||
ErrInvalidMetadataDirective
|
||||
ErrKeyTooLong
|
||||
ErrInvalidVersionId
|
||||
ErrNoSuchVersion
|
||||
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
@@ -517,8 +519,12 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrInvalidMetadataDirective: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unknown metadata directive.",
|
||||
},
|
||||
ErrInvalidVersionId: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unknown metadata directive.",
|
||||
Description: "Invalid version id specified",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrKeyTooLong: {
|
||||
@@ -526,6 +532,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Your key is too long.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "The specified version does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
// non aws errors
|
||||
ErrExistingObjectIsDirectory: {
|
||||
|
||||
@@ -23,6 +23,11 @@ import (
|
||||
|
||||
const RFC3339TimeFormat = "2006-01-02T15:04:05.999Z"
|
||||
|
||||
type PutObjectOutput struct {
|
||||
ETag string
|
||||
VersionID string
|
||||
}
|
||||
|
||||
// Part describes part metadata.
|
||||
type Part struct {
|
||||
PartNumber int
|
||||
@@ -40,7 +45,7 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
Alias: (*Alias)(&p),
|
||||
}
|
||||
|
||||
aux.LastModified = p.LastModified.Format(RFC3339TimeFormat)
|
||||
aux.LastModified = p.LastModified.UTC().Format(RFC3339TimeFormat)
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
@@ -77,6 +82,23 @@ type GetObjectAttributesResult struct {
|
||||
ObjectParts *ObjectParts
|
||||
}
|
||||
|
||||
func (r GetObjectAttributesResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type Alias GetObjectAttributesResult
|
||||
aux := &struct {
|
||||
LastModified *string `xml:"LastModified"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&r),
|
||||
}
|
||||
|
||||
if r.LastModified != nil {
|
||||
formattedTime := r.LastModified.UTC().Format(RFC3339TimeFormat)
|
||||
aux.LastModified = &formattedTime
|
||||
}
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
|
||||
type ObjectParts struct {
|
||||
PartNumberMarker int
|
||||
NextPartNumberMarker int
|
||||
@@ -157,7 +179,7 @@ func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
}
|
||||
|
||||
if o.LastModified != nil {
|
||||
formattedTime := o.LastModified.Format(RFC3339TimeFormat)
|
||||
formattedTime := o.LastModified.UTC().Format(RFC3339TimeFormat)
|
||||
aux.LastModified = &formattedTime
|
||||
}
|
||||
|
||||
@@ -183,7 +205,7 @@ func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
Alias: (*Alias)(&u),
|
||||
}
|
||||
|
||||
aux.Initiated = u.Initiated.Format(RFC3339TimeFormat)
|
||||
aux.Initiated = u.Initiated.UTC().Format(RFC3339TimeFormat)
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
@@ -261,6 +283,20 @@ type ListAllMyBucketsEntry struct {
|
||||
CreationDate time.Time
|
||||
}
|
||||
|
||||
func (r ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type Alias ListAllMyBucketsEntry
|
||||
aux := &struct {
|
||||
CreationDate string `xml:"CreationDate"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&r),
|
||||
}
|
||||
|
||||
aux.CreationDate = r.CreationDate.UTC().Format(RFC3339TimeFormat)
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
|
||||
type ListAllMyBucketsList struct {
|
||||
Bucket []ListAllMyBucketsEntry
|
||||
}
|
||||
@@ -271,9 +307,24 @@ type CanonicalUser struct {
|
||||
}
|
||||
|
||||
type CopyObjectResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
CopySourceVersionId string `xml:"-"`
|
||||
}
|
||||
|
||||
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
type Alias CopyObjectResult
|
||||
aux := &struct {
|
||||
LastModified string `xml:"LastModified"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&r),
|
||||
}
|
||||
|
||||
aux.LastModified = r.LastModified.UTC().Format(RFC3339TimeFormat)
|
||||
|
||||
return e.EncodeElement(aux, start)
|
||||
}
|
||||
|
||||
type AccessControlPolicy struct {
|
||||
@@ -315,3 +366,21 @@ type InitiateMultipartUploadResult struct {
|
||||
Key string
|
||||
UploadId string
|
||||
}
|
||||
|
||||
type ListVersionsResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
|
||||
CommonPrefixes []types.CommonPrefix
|
||||
DeleteMarkers []types.DeleteMarkerEntry `xml:"DeleteMarker"`
|
||||
Delimiter *string
|
||||
EncodingType types.EncodingType
|
||||
IsTruncated *bool
|
||||
KeyMarker *string
|
||||
MaxKeys *int32
|
||||
Name *string
|
||||
NextKeyMarker *string
|
||||
NextVersionIdMarker *string
|
||||
Prefix *string
|
||||
RequestCharged types.RequestCharged
|
||||
VersionIdMarker *string
|
||||
Versions []types.ObjectVersion `xml:"Version"`
|
||||
}
|
||||
|
||||
@@ -26,3 +26,4 @@ PASSWORD_ONE=HIJKLMN
|
||||
USERNAME_TWO=HIJKLMN
|
||||
PASSWORD_TWO=OPQRSTU
|
||||
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
|
||||
REMOVE_TEST_FILE_FOLDER=true
|
||||
|
||||
@@ -24,4 +24,5 @@ PASSWORD_ONE=HIJKLMN
|
||||
USERNAME_TWO=HIJKLMN
|
||||
PASSWORD_TWO=OPQRSTU
|
||||
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
|
||||
RECREATE_BUCKETS=true
|
||||
RECREATE_BUCKETS=true
|
||||
REMOVE_TEST_FILE_FOLDER=true
|
||||
73
tests/Dockerfile_direct
Normal file
73
tests/Dockerfile_direct
Normal file
@@ -0,0 +1,73 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG SECRETS_FILE=tests/.secrets.direct
|
||||
ARG CONFIG_FILE=tests/.env.direct
|
||||
ARG AWS_CLI=awscli-exe-linux-aarch64.zip
|
||||
ARG MC_FOLDER=linux-arm64
|
||||
|
||||
ENV TZ=Etc/UTC
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
wget \
|
||||
curl \
|
||||
unzip \
|
||||
tzdata \
|
||||
s3cmd \
|
||||
jq \
|
||||
bc \
|
||||
libxml2-utils \
|
||||
ca-certificates && \
|
||||
update-ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /tmp
|
||||
|
||||
# Install AWS cli
|
||||
RUN curl "https://awscli.amazonaws.com/${AWS_CLI}" -o "awscliv2.zip" && unzip awscliv2.zip && ./aws/install
|
||||
|
||||
# Install mc
|
||||
RUN curl https://dl.min.io/client/mc/release/${MC_FOLDER}/mc \
|
||||
--create-dirs \
|
||||
-o /usr/local/minio-binaries/mc && \
|
||||
chmod -R 755 /usr/local/minio-binaries
|
||||
ENV PATH=/usr/local/minio-binaries:${PATH}
|
||||
|
||||
# Create tester user
|
||||
RUN groupadd -r tester && useradd -r -g tester tester
|
||||
RUN mkdir /home/tester && chown tester:tester /home/tester
|
||||
ENV HOME=/home/tester
|
||||
|
||||
# install bats
|
||||
RUN git clone https://github.com/bats-core/bats-core.git && \
|
||||
cd bats-core && \
|
||||
./install.sh /home/tester
|
||||
|
||||
USER tester
|
||||
RUN mkdir -p /home/tester/tests
|
||||
COPY --chown=tester:tester . /home/tester/tests
|
||||
|
||||
# add bats support libraries
|
||||
RUN git clone https://github.com/bats-core/bats-support.git && rm -rf /home/tester/tests/bats-support && mv bats-support /home/tester/tests
|
||||
RUN git clone https://github.com/ztombol/bats-assert.git && rm -rf /home/tester/tests/bats-assert && mv bats-assert /home/tester/tests
|
||||
|
||||
WORKDIR /home/tester
|
||||
|
||||
RUN . $SECRETS_FILE && \
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE && \
|
||||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile $AWS_PROFILE && \
|
||||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile $AWS_PROFILE && \
|
||||
aws configure set aws_region $AWS_REGION --profile $AWS_PROFILE
|
||||
|
||||
RUN mkdir /tmp/gw
|
||||
|
||||
RUN openssl genpkey -algorithm RSA -out versitygw-docker.pem -pkeyopt rsa_keygen_bits:2048 && \
|
||||
openssl req -new -x509 -key versitygw-docker.pem -out cert-docker.pem -days 365 \
|
||||
-subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
|
||||
ENV WORKSPACE=.
|
||||
ENV VERSITYGW_TEST_ENV=$CONFIG_FILE
|
||||
|
||||
CMD ["tests/run_all.sh"]
|
||||
@@ -3,7 +3,7 @@ FROM ubuntu:latest
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG SECRETS_FILE=tests/.secrets
|
||||
ARG CONFIG_FILE=tests/.env.docker
|
||||
ARG GO_LIBRARY=go1.21.7.linux-arm64.tar.gz
|
||||
ARG GO_LIBRARY=go1.23.1.linux-arm64.tar.gz
|
||||
ARG AWS_CLI=awscli-exe-linux-aarch64.zip
|
||||
ARG MC_FOLDER=linux-arm64
|
||||
|
||||
@@ -19,6 +19,7 @@ RUN apt-get update && \
|
||||
s3cmd \
|
||||
jq \
|
||||
bc \
|
||||
libxml2-utils \
|
||||
ca-certificates && \
|
||||
update-ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
@@ -34,7 +35,7 @@ RUN curl https://dl.min.io/client/mc/release/${MC_FOLDER}/mc \
|
||||
--create-dirs \
|
||||
-o /usr/local/minio-binaries/mc && \
|
||||
chmod -R 755 /usr/local/minio-binaries
|
||||
ENV PATH="/usr/local/minio-binaries":${PATH}
|
||||
ENV PATH=/usr/local/minio-binaries:${PATH}
|
||||
|
||||
# Download Go 1.21 (adjust the version and platform as needed)
|
||||
RUN wget https://golang.org/dl/${GO_LIBRARY}
|
||||
@@ -21,7 +21,7 @@ delete_bucket_policy() {
|
||||
return 1
|
||||
fi
|
||||
local delete_result=0
|
||||
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
|
||||
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2" 2>&1) || delete_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate delpolicy "s3://$2" 2>&1) || delete_result=$?
|
||||
|
||||
@@ -15,6 +15,11 @@
|
||||
# under the License.
|
||||
|
||||
get_bucket_ownership_controls() {
|
||||
if [[ -n "$SKIP_BUCKET_OWNERSHIP_CONTROLS" ]]; then
|
||||
log 5 "Skipping get bucket ownership controls"
|
||||
return 0
|
||||
fi
|
||||
|
||||
record_command "get-bucket-ownership-controls" "client:s3api"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'get bucket ownership controls' command requires bucket name"
|
||||
@@ -32,6 +37,11 @@ get_bucket_ownership_controls() {
|
||||
}
|
||||
|
||||
get_object_ownership_rule() {
|
||||
if [[ -n "$SKIP_BUCKET_OWNERSHIP_CONTROLS" ]]; then
|
||||
log 5 "Skipping get bucket ownership controls"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'get object ownership rule' command requires bucket name"
|
||||
return 1
|
||||
|
||||
@@ -15,13 +15,13 @@
|
||||
# under the License.
|
||||
|
||||
get_object() {
|
||||
log 6 "get_object"
|
||||
record_command "get-object" "client:$1"
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "get object command requires command type, bucket, key, destination"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
get_object_error=$(aws --no-verify-ssl s3 mv "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
@@ -37,7 +37,6 @@ get_object() {
|
||||
log 5 "get object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "error getting object: $get_object_error"
|
||||
export get_object_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
@@ -49,28 +48,35 @@ get_object_with_range() {
|
||||
log 2 "'get object with range' requires bucket, key, range, outfile"
|
||||
return 1
|
||||
fi
|
||||
error=$(aws --no-verify-ssl s3api get-object --bucket "$1" --key "$2" --range "$3" "$4" 2>&1) || local exit_code=$?
|
||||
get_object_error=$(aws --no-verify-ssl s3api get-object --bucket "$1" --key "$2" --range "$3" "$4" 2>&1) || local exit_code=$?
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
log 2 "error getting object with range: $error"
|
||||
log 2 "error getting object with range: $get_object_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_object_with_user() {
|
||||
log 6 "get_object_with_user"
|
||||
record_command "get-object" "client:$1"
|
||||
if [ $# -ne 6 ]; then
|
||||
log 2 "'get object with user' command requires command type, bucket, key, save location, aws ID, aws secret key"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
if [[ $1 == 's3' ]] || [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
get_object_error=$(AWS_ACCESS_KEY_ID="$5" AWS_SECRET_ACCESS_KEY="$6" aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
log 5 "s3cmd filename: $3"
|
||||
get_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --access_key="$5" --secret_key="$6" get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
log 5 "save location: $4"
|
||||
get_object_error=$(mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "'get object with user' command not implemented for '$1'"
|
||||
log 2 "'get_object_with_user' not implemented for client '$1'"
|
||||
return 1
|
||||
fi
|
||||
log 5 "put object exit code: $exit_code"
|
||||
log 5 "get object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "error getting object: $get_object_error"
|
||||
return 1
|
||||
|
||||
@@ -24,7 +24,10 @@ source ./tests/report.sh
|
||||
head_bucket() {
|
||||
log 6 "head_bucket"
|
||||
record_command "head-bucket" "client:$1"
|
||||
assert [ $# -eq 2 ]
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'head_bucket' command requires client, bucket name"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]] || [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
|
||||
bucket_info=$(aws --no-verify-ssl s3api head-bucket --bucket "$2" 2>&1) || exit_code=$?
|
||||
|
||||
@@ -31,6 +31,8 @@ list_buckets() {
|
||||
buckets=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3:// 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
buckets=$(mc --insecure ls "$MC_ALIAS" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'rest' ]]; then
|
||||
list_buckets_rest || exit_code=$?
|
||||
else
|
||||
echo "list buckets command not implemented for '$1'"
|
||||
return 1
|
||||
@@ -40,7 +42,7 @@ list_buckets() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]] || [[ $1 == 'rest' ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -112,4 +114,33 @@ list_buckets_s3api() {
|
||||
IFS=$'\n' read -rd '' -a bucket_array <<<"$names"
|
||||
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
list_buckets_rest() {
|
||||
generate_hash_for_payload ""
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
# shellcheck disable=SC2154
|
||||
canonical_request="GET
|
||||
/
|
||||
|
||||
host:${AWS_ENDPOINT_URL#*//}
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
|
||||
log 2 "error generating sts string"
|
||||
return 1
|
||||
fi
|
||||
|
||||
get_signature
|
||||
# shellcheck disable=SC2034,SC2154
|
||||
reply=$(curl -ks "$AWS_ENDPOINT_URL" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "x-amz-content-sha256: $payload_hash" \
|
||||
-H "x-amz-date: $current_date_time" 2>&1)
|
||||
parse_bucket_list
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/util_list_objects.sh
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
@@ -35,6 +37,9 @@ list_objects() {
|
||||
output=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2" 2>&1) || result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
output=$(mc --insecure ls "$MC_ALIAS"/"$2" 2>&1) || result=$?
|
||||
elif [[ $1 == 'rest' ]]; then
|
||||
list_objects_rest "$2" || result=$?
|
||||
return $result
|
||||
else
|
||||
fail "invalid command type $1"
|
||||
return 1
|
||||
@@ -126,3 +131,41 @@ list_objects_with_prefix() {
|
||||
export objects
|
||||
return 0
|
||||
}
|
||||
|
||||
list_objects_rest() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'list_objects_rest' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
generate_hash_for_payload ""
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
|
||||
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
|
||||
# shellcheck disable=SC2154
|
||||
canonical_request="GET
|
||||
/$1
|
||||
|
||||
host:$aws_endpoint_url_address
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
log 5 "canonical request: $canonical_request"
|
||||
|
||||
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
|
||||
log 2 "error generating sts string"
|
||||
return 1
|
||||
fi
|
||||
get_signature
|
||||
# shellcheck disable=SC2154
|
||||
reply=$(curl -ks "$header://$aws_endpoint_url_address/$1" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "x-amz-content-sha256: $payload_hash" \
|
||||
-H "x-amz-date: $current_date_time" 2>&1)
|
||||
log 5 "reply: $reply"
|
||||
parse_objects_list_rest
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ reset_bucket_acl() {
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
cat <<EOF > "$test_file_folder/$acl_file"
|
||||
cat <<EOF > "$TEST_FILE_FOLDER/$acl_file"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
@@ -73,7 +73,7 @@ reset_bucket_acl() {
|
||||
}
|
||||
}
|
||||
EOF
|
||||
if ! put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$test_file_folder/$acl_file"; then
|
||||
if ! put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$acl_file"; then
|
||||
log 2 "error putting bucket acl (s3api)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -16,6 +16,11 @@
|
||||
|
||||
# fail if unable to put bucket ownership controls
|
||||
put_bucket_ownership_controls() {
|
||||
if [[ -n "$SKIP_BUCKET_OWNERSHIP_CONTROLS" ]]; then
|
||||
log 5 "Skipping get bucket ownership controls"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log 6 "put_bucket_ownership_controls"
|
||||
record_command "put-bucket-ownership-controls" "client:s3api"
|
||||
assert [ $# -eq 2 ]
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
no_certs:
|
||||
build:
|
||||
@@ -30,7 +28,8 @@ services:
|
||||
direct:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile_test_bats
|
||||
args:
|
||||
- CONFIG_FILE=tests/.env.direct
|
||||
- SECRETS_FILE=tests/.secrets.direct
|
||||
dockerfile: Dockerfile_direct
|
||||
volumes:
|
||||
- ./.env.direct:/home/tester/tests/.env.direct
|
||||
- ./.secrets.direct:/home/tester/tests/.secrets.direct
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
version: "3"
|
||||
services:
|
||||
posix:
|
||||
build:
|
||||
158
tests/env.sh
158
tests/env.sh
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bats
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
@@ -14,24 +14,32 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
source ./tests/versity.sh
|
||||
|
||||
base_setup() {
|
||||
check_env_vars
|
||||
if [ "$RUN_VERSITYGW" == "true" ]; then
|
||||
run_versity_app
|
||||
fi
|
||||
}
|
||||
|
||||
check_env_vars() {
|
||||
check_universal_vars
|
||||
#if ! check_universal_vars; then
|
||||
# log 2 "error checking universal params"
|
||||
# return 1
|
||||
#fi
|
||||
if [[ $RUN_VERSITYGW == "true" ]]; then
|
||||
check_versity_vars
|
||||
fi
|
||||
if [[ $RUN_S3CMD == "true" ]]; then
|
||||
assert [ -n "$S3CMD_CONFIG" ]
|
||||
if [ -z "$S3CMD_CONFIG" ]; then
|
||||
log 1 "S3CMD_CONFIG param missing"
|
||||
exit 1
|
||||
fi
|
||||
export S3CMD_CONFIG
|
||||
fi
|
||||
if [[ $RUN_MC == "true" ]]; then
|
||||
assert [ -n "$MC_ALIAS" ]
|
||||
if [ -z "$MC_ALIAS" ]; then
|
||||
log 1 "MC_ALIAS param missing"
|
||||
exit 1
|
||||
fi
|
||||
export MC_ALIAS
|
||||
fi
|
||||
return 0
|
||||
@@ -66,37 +74,98 @@ check_universal_vars() {
|
||||
export LOG_LEVEL_INT=$LOG_LEVEL
|
||||
fi
|
||||
|
||||
assert [ -n "$AWS_ACCESS_KEY_ID" ]
|
||||
assert [ -n "$AWS_SECRET_ACCESS_KEY" ]
|
||||
assert [ -n "$AWS_REGION" ]
|
||||
assert [ -n "$AWS_PROFILE" ]
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||
log 1 "AWS_ACCESS_KEY_ID missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||
log 1 "AWS_SECRET_ACCESS_KEY missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$AWS_REGION" ]; then
|
||||
log 1 "AWS_REGION missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$AWS_PROFILE" ]; then
|
||||
log 1 "AWS_PROFILE missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
assert [ -n "$AWS_ENDPOINT_URL" ]
|
||||
if [ -z "$AWS_ENDPOINT_URL" ]; then
|
||||
log 1 "AWS_ENDPOINT_URL missing"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$RUN_VERSITYGW" != "true" ] && [ "$RUN_VERSITYGW" != "false" ]; then
|
||||
fail "RUN_VERSITYGW must be 'true' or 'false'"
|
||||
fi
|
||||
<<<<<<< HEAD
|
||||
|
||||
assert [ -n "$BUCKET_ONE_NAME" ]
|
||||
assert [ -n "$BUCKET_TWO_NAME" ]
|
||||
assert [ -n "$RECREATE_BUCKETS" ]
|
||||
if [ "$RECREATE_BUCKETS" != "true" ] && [ "$RECREATE_BUCKETS" != "false" ]; then
|
||||
fail "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
if [ -z "$BUCKET_ONE_NAME" ]; then
|
||||
log 1 "BUCKET_ONE_NAME missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$BUCKET_TWO_NAME" ]; then
|
||||
log 1 "BUCKET_TWO_NAME missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$RECREATE_BUCKETS" ]; then
|
||||
log 1 "RECREATE_BUCKETS missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" != "true" ] && [ "$RECREATE_BUCKETS" != "false" ]; then
|
||||
<<<<<<< HEAD
|
||||
log 1 "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$TEST_FILE_FOLDER" ]; then
|
||||
log 1 "TEST_FILE_FOLDER missing"
|
||||
exit 1
|
||||
=======
|
||||
fail "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
=======
|
||||
if [[ -n "$VERSITY_LOG_FILE" ]]; then
|
||||
export VERSITY_LOG_FILE
|
||||
fi
|
||||
if [[ -n "$DIRECT" ]]; then
|
||||
export DIRECT
|
||||
fi
|
||||
if [[ -n "$DIRECT_DISPLAY_NAME" ]]; then
|
||||
export DIRECT_DISPLAY_NAME
|
||||
fi
|
||||
if [[ -n "$COVERAGE_DB" ]]; then
|
||||
export COVERAGE_DB
|
||||
>>>>>>> 95e62d2 (fix: Merge conflicts resolved)
|
||||
>>>>>>> 8595c19 (feat: Added integration tests for bucket object versioning. Made a couple of bug fixes in the versioning implementation)
|
||||
fi
|
||||
assert [ -n "$TEST_FILE_FOLDER" ]
|
||||
# exporting these since they're needed for subshells
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE AWS_ENDPOINT_URL
|
||||
}
|
||||
|
||||
check_versity_vars() {
|
||||
assert [ -n "$LOCAL_FOLDER" ]
|
||||
assert [ -n "$VERSITY_EXE" ]
|
||||
assert [ -n "$BACKEND" ]
|
||||
if [ -z "$LOCAL_FOLDER" ]; then
|
||||
log 1 "LOCAL_FOLDER missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$VERSITY_EXE" ]; then
|
||||
log 1 "VERSITY_EXE missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$BACKEND" ]; then
|
||||
log 1 "BACKEND missing"
|
||||
exit 1
|
||||
fi
|
||||
export LOCAL_FOLDER VERSITY_EXE BACKEND
|
||||
|
||||
if [ "$BACKEND" == 's3' ]; then
|
||||
assert [ -n "$AWS_ACCESS_KEY_ID_TWO" ]
|
||||
assert [ -n "$AWS_SECRET_ACCESS_KEY_TWO" ]
|
||||
if [ -z "$AWS_ACCESS_KEY_ID_TWO" ]; then
|
||||
log 1 "AWS_ACCESS_KEY_ID_TWO missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$AWS_SECRET_ACCESS_KEY_TWO" ]; then
|
||||
log 1 "AWS_SECRET_ACCESS_KEY_TWO missing"
|
||||
exit 1
|
||||
fi
|
||||
export AWS_ACCESS_KEY_ID_TWO AWS_SECRET_ACCESS_KEY_TWO
|
||||
fi
|
||||
|
||||
@@ -110,31 +179,52 @@ check_versity_vars() {
|
||||
}
|
||||
|
||||
check_user_vars() {
|
||||
assert [ -n "$USERNAME_ONE" ]
|
||||
assert [ -n "$PASSWORD_ONE" ]
|
||||
assert [ -n "$USERNAME_TWO" ]
|
||||
assert [ -n "$PASSWORD_TWO" ]
|
||||
if [ -z "$USERNAME_ONE" ]; then
|
||||
log 1 "USERNAME_ONE missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$PASSWORD_ONE" ]; then
|
||||
log 1 "PASSWORD_ONE missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$USERNAME_TWO" ]; then
|
||||
log 1 "USERNAME_TWO missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$PASSWORD_TWO" ]; then
|
||||
log 1 "PASSWORD_TWO missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$IAM_TYPE" ]]; then
|
||||
export IAM_TYPE="folder"
|
||||
fi
|
||||
if [[ "$IAM_TYPE" == "folder" ]]; then
|
||||
assert [ -n "$USERS_FOLDER" ]
|
||||
if [ -z "$USERS_FOLDER" ]; then
|
||||
log 1 "USERS_FOLDER missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -d "$USERS_FOLDER" ]; then
|
||||
mkdir_error=$(mkdir "$USERS_FOLDER" 2>&1)
|
||||
assert_success "error creating users folder: $mkdir_error"
|
||||
if ! mkdir_error=$(mkdir "$USERS_FOLDER" 2>&1); then
|
||||
log 1 "error creating users folder: $mkdir_error"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
IAM_PARAMS="--iam-dir=$USERS_FOLDER"
|
||||
export IAM_PARAMS
|
||||
return 0
|
||||
fi
|
||||
if [[ $IAM_TYPE == "s3" ]]; then
|
||||
assert [ -n "$USERS_BUCKET" ]
|
||||
if [ -z "$USERS_BUCKET" ]; then
|
||||
log 1 "error creating USERS_BUCKET"
|
||||
exit 1
|
||||
fi
|
||||
IAM_PARAMS="--s3-iam-access $AWS_ACCESS_KEY_ID --s3-iam-secret $AWS_SECRET_ACCESS_KEY \
|
||||
--s3-iam-region us-east-1 --s3-iam-bucket $USERS_BUCKET --s3-iam-endpoint $AWS_ENDPOINT_URL \
|
||||
--s3-iam-noverify"
|
||||
export IAM_PARAMS
|
||||
return 0
|
||||
fi
|
||||
fail "unrecognized IAM_TYPE value: $IAM_TYPE"
|
||||
log 1 "unrecognized IAM_TYPE value: $IAM_TYPE"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@@ -132,7 +132,6 @@ func TestPutObject(s *S3Conf) {
|
||||
PutObject_special_chars(s)
|
||||
PutObject_invalid_long_tags(s)
|
||||
PutObject_missing_object_lock_retention_config(s)
|
||||
PutObject_name_too_long(s)
|
||||
PutObject_with_object_lock(s)
|
||||
PutObject_success(s)
|
||||
PutObject_invalid_credentials(s)
|
||||
@@ -144,7 +143,7 @@ func TestHeadObject(s *S3Conf) {
|
||||
HeadObject_non_existing_mp(s)
|
||||
HeadObject_mp_success(s)
|
||||
HeadObject_non_existing_dir_object(s)
|
||||
HeadObject_name_too_long(s)
|
||||
HeadObject_with_contenttype(s)
|
||||
HeadObject_success(s)
|
||||
}
|
||||
|
||||
@@ -152,8 +151,6 @@ func TestGetObjectAttributes(s *S3Conf) {
|
||||
GetObjectAttributes_non_existing_bucket(s)
|
||||
GetObjectAttributes_non_existing_object(s)
|
||||
GetObjectAttributes_existing_object(s)
|
||||
GetObjectAttributes_multipart_upload(s)
|
||||
GetObjectAttributes_multipart_upload_truncated(s)
|
||||
}
|
||||
|
||||
func TestGetObject(s *S3Conf) {
|
||||
@@ -161,6 +158,7 @@ func TestGetObject(s *S3Conf) {
|
||||
GetObject_invalid_ranges(s)
|
||||
GetObject_with_meta(s)
|
||||
GetObject_success(s)
|
||||
GetObject_directory_success(s)
|
||||
GetObject_by_range_success(s)
|
||||
GetObject_by_range_resp_status(s)
|
||||
GetObject_non_existing_dir_object(s)
|
||||
@@ -190,7 +188,6 @@ func TestListObjectsV2(s *S3Conf) {
|
||||
|
||||
func TestDeleteObject(s *S3Conf) {
|
||||
DeleteObject_non_existing_object(s)
|
||||
DeleteObject_name_too_long(s)
|
||||
DeleteObject_non_existing_dir_object(s)
|
||||
DeleteObject_success(s)
|
||||
DeleteObject_success_status_code(s)
|
||||
@@ -471,6 +468,9 @@ func TestFullFlow(s *S3Conf) {
|
||||
TestGetObjectLegalHold(s)
|
||||
TestWORMProtection(s)
|
||||
TestAccessControl(s)
|
||||
if s.versioningEnabled {
|
||||
TestVersioning(s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPosix(s *S3Conf) {
|
||||
@@ -478,6 +478,9 @@ func TestPosix(s *S3Conf) {
|
||||
PutObject_overwrite_file_obj(s)
|
||||
PutObject_dir_obj_with_data(s)
|
||||
CreateMultipartUpload_dir_obj(s)
|
||||
PutObject_name_too_long(s)
|
||||
HeadObject_name_too_long(s)
|
||||
DeleteObject_name_too_long(s)
|
||||
}
|
||||
|
||||
func TestIAM(s *S3Conf) {
|
||||
@@ -502,6 +505,45 @@ func TestAccessControl(s *S3Conf) {
|
||||
AccessControl_copy_object_with_starting_slash_for_user(s)
|
||||
}
|
||||
|
||||
func TestVersioning(s *S3Conf) {
|
||||
// PutBucketVersioning action
|
||||
PutBucketVersioning_non_existing_bucket(s)
|
||||
PutBucketVersioning_invalid_status(s)
|
||||
PutBucketVersioning_success(s)
|
||||
// GetBucketVersioning action
|
||||
GetBucketVersioning_non_existing_bucket(s)
|
||||
GetBucketVersioning_success(s)
|
||||
Versioning_PutObject_success(s)
|
||||
// CopyObject action
|
||||
Versioning_CopyObject_success(s)
|
||||
Versioning_CopyObject_non_existing_version_id(s)
|
||||
Versioning_CopyObject_from_an_object_version(s)
|
||||
// HeadObject action
|
||||
Versioning_HeadObject_invalid_versionId(s)
|
||||
Versioning_HeadObject_success(s)
|
||||
Versioning_HeadObject_delete_marker(s)
|
||||
// GetObject action
|
||||
Versioning_GetObject_invalid_versionId(s)
|
||||
Versioning_GetObject_success(s)
|
||||
Versioning_GetObject_delete_marker(s)
|
||||
// DeleteObject(s) actions
|
||||
Versioning_DeleteObject_delete_object_version(s)
|
||||
Versioning_DeleteObject_delete_a_delete_marker(s)
|
||||
Versioning_DeleteObjects_success(s)
|
||||
Versioning_DeleteObjects_delete_deleteMarkers(s)
|
||||
// ListObjectVersions
|
||||
ListObjectVersions_non_existing_bucket(s)
|
||||
ListObjectVersions_list_single_object_versions(s)
|
||||
ListObjectVersions_list_multiple_object_versions(s)
|
||||
ListObjectVersions_multiple_object_versions_truncated(s)
|
||||
ListObjectVersions_with_delete_markers(s)
|
||||
// Multipart upload
|
||||
Versioning_Multipart_Upload_success(s)
|
||||
Versioning_Multipart_Upload_overwrite_an_object(s)
|
||||
Versioning_UploadPartCopy_non_existing_versionId(s)
|
||||
Versioning_UploadPartCopy_from_an_object_version(s)
|
||||
}
|
||||
|
||||
type IntTests map[string]func(s *S3Conf) error
|
||||
|
||||
func GetIntTests() IntTests {
|
||||
@@ -596,16 +638,16 @@ func GetIntTests() IntTests {
|
||||
"HeadObject_mp_success": HeadObject_mp_success,
|
||||
"HeadObject_non_existing_dir_object": HeadObject_non_existing_dir_object,
|
||||
"HeadObject_name_too_long": HeadObject_name_too_long,
|
||||
"HeadObject_with_contenttype": HeadObject_with_contenttype,
|
||||
"HeadObject_success": HeadObject_success,
|
||||
"GetObjectAttributes_non_existing_bucket": GetObjectAttributes_non_existing_bucket,
|
||||
"GetObjectAttributes_non_existing_object": GetObjectAttributes_non_existing_object,
|
||||
"GetObjectAttributes_existing_object": GetObjectAttributes_existing_object,
|
||||
"GetObjectAttributes_multipart_upload": GetObjectAttributes_multipart_upload,
|
||||
"GetObjectAttributes_multipart_upload_truncated": GetObjectAttributes_multipart_upload_truncated,
|
||||
"GetObject_non_existing_key": GetObject_non_existing_key,
|
||||
"GetObject_invalid_ranges": GetObject_invalid_ranges,
|
||||
"GetObject_with_meta": GetObject_with_meta,
|
||||
"GetObject_success": GetObject_success,
|
||||
"GetObject_directory_success": GetObject_directory_success,
|
||||
"GetObject_by_range_success": GetObject_by_range_success,
|
||||
"GetObject_by_range_resp_status": GetObject_by_range_resp_status,
|
||||
"GetObject_non_existing_dir_object": GetObject_non_existing_dir_object,
|
||||
@@ -810,5 +852,33 @@ func GetIntTests() IntTests {
|
||||
"AccessControl_root_PutBucketAcl": AccessControl_root_PutBucketAcl,
|
||||
"AccessControl_user_PutBucketAcl_with_policy_access": AccessControl_user_PutBucketAcl_with_policy_access,
|
||||
"AccessControl_copy_object_with_starting_slash_for_user": AccessControl_copy_object_with_starting_slash_for_user,
|
||||
"PutBucketVersioning_non_existing_bucket": PutBucketVersioning_non_existing_bucket,
|
||||
"PutBucketVersioning_invalid_status": PutBucketVersioning_invalid_status,
|
||||
"PutBucketVersioning_success": PutBucketVersioning_success,
|
||||
"GetBucketVersioning_non_existing_bucket": GetBucketVersioning_non_existing_bucket,
|
||||
"GetBucketVersioning_success": GetBucketVersioning_success,
|
||||
"Versioning_PutObject_success": Versioning_PutObject_success,
|
||||
"Versioning_CopyObject_success": Versioning_CopyObject_success,
|
||||
"Versioning_CopyObject_non_existing_version_id": Versioning_CopyObject_non_existing_version_id,
|
||||
"Versioning_CopyObject_from_an_object_version": Versioning_CopyObject_from_an_object_version,
|
||||
"Versioning_HeadObject_invalid_versionId": Versioning_HeadObject_invalid_versionId,
|
||||
"Versioning_HeadObject_success": Versioning_HeadObject_success,
|
||||
"Versioning_HeadObject_delete_marker": Versioning_HeadObject_delete_marker,
|
||||
"Versioning_GetObject_invalid_versionId": Versioning_GetObject_invalid_versionId,
|
||||
"Versioning_GetObject_success": Versioning_GetObject_success,
|
||||
"Versioning_GetObject_delete_marker": Versioning_GetObject_delete_marker,
|
||||
"Versioning_DeleteObject_delete_object_version": Versioning_DeleteObject_delete_object_version,
|
||||
"Versioning_DeleteObject_delete_a_delete_marker": Versioning_DeleteObject_delete_a_delete_marker,
|
||||
"Versioning_DeleteObjects_success": Versioning_DeleteObjects_success,
|
||||
"Versioning_DeleteObjects_delete_deleteMarkers": Versioning_DeleteObjects_delete_deleteMarkers,
|
||||
"ListObjectVersions_non_existing_bucket": ListObjectVersions_non_existing_bucket,
|
||||
"ListObjectVersions_list_single_object_versions": ListObjectVersions_list_single_object_versions,
|
||||
"ListObjectVersions_list_multiple_object_versions": ListObjectVersions_list_multiple_object_versions,
|
||||
"ListObjectVersions_multiple_object_versions_truncated": ListObjectVersions_multiple_object_versions_truncated,
|
||||
"ListObjectVersions_with_delete_markers": ListObjectVersions_with_delete_markers,
|
||||
"Versioning_Multipart_Upload_success": Versioning_Multipart_Upload_success,
|
||||
"Versioning_Multipart_Upload_overwrite_an_object": Versioning_Multipart_Upload_overwrite_an_object,
|
||||
"Versioning_UploadPartCopy_non_existing_versionId": Versioning_UploadPartCopy_non_existing_versionId,
|
||||
"Versioning_UploadPartCopy_from_an_object_version": Versioning_UploadPartCopy_from_an_object_version,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,15 +31,16 @@ import (
|
||||
)
|
||||
|
||||
type S3Conf struct {
|
||||
awsID string
|
||||
awsSecret string
|
||||
awsRegion string
|
||||
endpoint string
|
||||
checksumDisable bool
|
||||
pathStyle bool
|
||||
PartSize int64
|
||||
Concurrency int
|
||||
debug bool
|
||||
awsID string
|
||||
awsSecret string
|
||||
awsRegion string
|
||||
endpoint string
|
||||
checksumDisable bool
|
||||
pathStyle bool
|
||||
PartSize int64
|
||||
Concurrency int
|
||||
debug bool
|
||||
versioningEnabled bool
|
||||
}
|
||||
|
||||
func NewS3Conf(opts ...Option) *S3Conf {
|
||||
@@ -80,6 +81,9 @@ func WithConcurrency(c int) Option {
|
||||
func WithDebug() Option {
|
||||
return func(s *S3Conf) { s.debug = true }
|
||||
}
|
||||
func WithVersioningEnabled() Option {
|
||||
return func(s *S3Conf) { s.versioningEnabled = true }
|
||||
}
|
||||
|
||||
func (c *S3Conf) getCreds() credentials.StaticCredentialsProvider {
|
||||
// TODO support token/IAM
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -24,11 +24,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
rnd "math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -69,7 +71,25 @@ func setup(s *S3Conf, bucket string, opts ...setupOpt) error {
|
||||
ObjectOwnership: cfg.Ownership,
|
||||
})
|
||||
cancel()
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.VersioningEnabled {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
|
||||
Bucket: &bucket,
|
||||
VersioningConfiguration: &types.VersioningConfiguration{
|
||||
Status: types.BucketVersioningStatusEnabled,
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func teardown(s *S3Conf, bucket string) error {
|
||||
@@ -89,24 +109,31 @@ func teardown(s *S3Conf, bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
in := &s3.ListObjectsV2Input{Bucket: &bucket}
|
||||
in := &s3.ListObjectVersionsInput{Bucket: &bucket}
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.ListObjectsV2(ctx, in)
|
||||
out, err := s3client.ListObjectVersions(ctx, in)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list objects: %w", err)
|
||||
}
|
||||
|
||||
for _, item := range out.Contents {
|
||||
err = deleteObject(&bucket, item.Key, nil)
|
||||
for _, item := range out.Versions {
|
||||
err = deleteObject(&bucket, item.Key, item.VersionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, item := range out.DeleteMarkers {
|
||||
err = deleteObject(&bucket, item.Key, item.VersionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if out.IsTruncated != nil && *out.IsTruncated {
|
||||
in.ContinuationToken = out.ContinuationToken
|
||||
in.KeyMarker = out.KeyMarker
|
||||
in.VersionIdMarker = out.NextVersionIdMarker
|
||||
} else {
|
||||
break
|
||||
}
|
||||
@@ -121,8 +148,9 @@ func teardown(s *S3Conf, bucket string) error {
|
||||
}
|
||||
|
||||
type setupCfg struct {
|
||||
LockEnabled bool
|
||||
Ownership types.ObjectOwnership
|
||||
LockEnabled bool
|
||||
VersioningEnabled bool
|
||||
Ownership types.ObjectOwnership
|
||||
}
|
||||
|
||||
type setupOpt func(*setupCfg)
|
||||
@@ -133,6 +161,9 @@ func withLock() setupOpt {
|
||||
func withOwnership(o types.ObjectOwnership) setupOpt {
|
||||
return func(s *setupCfg) { s.Ownership = o }
|
||||
}
|
||||
func withVersioning() setupOpt {
|
||||
return func(s *setupCfg) { s.VersioningEnabled = true }
|
||||
}
|
||||
|
||||
func actionHandler(s *S3Conf, testName string, handler func(s3client *s3.Client, bucket string) error, opts ...setupOpt) error {
|
||||
runF(testName)
|
||||
@@ -282,33 +313,61 @@ func checkSdkApiErr(err error, code string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func putObjects(client *s3.Client, objs []string, bucket string) error {
|
||||
func putObjects(client *s3.Client, objs []string, bucket string) ([]types.Object, error) {
|
||||
var contents []types.Object
|
||||
var size int64
|
||||
for _, key := range objs {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := client.PutObject(ctx, &s3.PutObjectInput{
|
||||
res, err := client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Key: &key,
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
k := key
|
||||
etag := strings.Trim(*res.ETag, `"`)
|
||||
contents = append(contents, types.Object{
|
||||
Key: &k,
|
||||
ETag: &etag,
|
||||
StorageClass: types.ObjectStorageClassStandard,
|
||||
Size: &size,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
||||
sort.SliceStable(contents, func(i, j int) bool {
|
||||
return *contents[i].Key < *contents[j].Key
|
||||
})
|
||||
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func putObjectWithData(lgth int64, input *s3.PutObjectInput, client *s3.Client) (csum [32]byte, data []byte, err error) {
|
||||
data = make([]byte, lgth)
|
||||
type putObjectOutput struct {
|
||||
csum [32]byte
|
||||
data []byte
|
||||
res *s3.PutObjectOutput
|
||||
}
|
||||
|
||||
func putObjectWithData(lgth int64, input *s3.PutObjectInput, client *s3.Client) (*putObjectOutput, error) {
|
||||
data := make([]byte, lgth)
|
||||
rand.Read(data)
|
||||
csum = sha256.Sum256(data)
|
||||
csum := sha256.Sum256(data)
|
||||
r := bytes.NewReader(data)
|
||||
input.Body = r
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = client.PutObject(ctx, input)
|
||||
res, err := client.PutObject(ctx, input)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return
|
||||
return &putObjectOutput{
|
||||
csum: csum,
|
||||
data: data,
|
||||
res: res,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createMp(s3client *s3.Client, bucket, key string) (*s3.CreateMultipartUploadOutput, error) {
|
||||
@@ -428,12 +487,15 @@ func getPtr(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
// mp1 needs to be the response from the server
|
||||
// mp2 needs to be the expected values
|
||||
// The keys from the server are always converted to lowercase
|
||||
func areMapsSame(mp1, mp2 map[string]string) bool {
|
||||
if len(mp1) != len(mp2) {
|
||||
return false
|
||||
}
|
||||
for key, val := range mp1 {
|
||||
if mp2[key] != val {
|
||||
for key, val := range mp2 {
|
||||
if mp1[strings.ToLower(key)] != val {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -483,22 +545,6 @@ func compareObjects(list1, list2 []types.Object) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Creates a list of types.Object with the provided objects keys: objs []string
|
||||
func createEmptyObjectsList(objs []string) (result []types.Object) {
|
||||
size := int64(0)
|
||||
for _, obj := range objs {
|
||||
o := obj
|
||||
result = append(result, types.Object{
|
||||
Key: &o,
|
||||
Size: &size,
|
||||
StorageClass: types.ObjectStorageClassStandard,
|
||||
ETag: &emptyObjETag,
|
||||
})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func comparePrefixes(list1 []string, list2 []types.CommonPrefix) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
@@ -519,21 +565,40 @@ func comparePrefixes(list1 []string, list2 []types.CommonPrefix) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func compareDelObjects(list1 []string, list2 []types.DeletedObject) bool {
|
||||
func compareDelObjects(list1, list2 []types.DeletedObject) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
|
||||
elementMap := make(map[string]bool)
|
||||
|
||||
for _, elem := range list1 {
|
||||
elementMap[elem] = true
|
||||
}
|
||||
|
||||
for _, elem := range list2 {
|
||||
if _, found := elementMap[*elem.Key]; !found {
|
||||
for i, obj := range list1 {
|
||||
if *obj.Key != *list2[i].Key {
|
||||
return false
|
||||
}
|
||||
|
||||
if obj.VersionId != nil {
|
||||
if list2[i].VersionId == nil {
|
||||
return false
|
||||
}
|
||||
if *obj.VersionId != *list2[i].VersionId {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if obj.DeleteMarkerVersionId != nil {
|
||||
if list2[i].DeleteMarkerVersionId == nil {
|
||||
return false
|
||||
}
|
||||
if *obj.DeleteMarkerVersionId != *list2[i].DeleteMarkerVersionId {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if obj.DeleteMarker != nil {
|
||||
if list2[i].DeleteMarker == nil {
|
||||
return false
|
||||
}
|
||||
if *obj.DeleteMarker != *list2[i].DeleteMarker {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
@@ -771,3 +836,124 @@ func checkWORMProtection(client *s3.Client, bucket, object string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createObjVersions(client *s3.Client, bucket, object string, count int) ([]types.ObjectVersion, error) {
|
||||
versions := []types.ObjectVersion{}
|
||||
for i := 0; i < count; i++ {
|
||||
rNumber, err := rand.Int(rand.Reader, big.NewInt(100000))
|
||||
dataLength := rNumber.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := putObjectWithData(dataLength, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
}, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isLatest := i == count-1
|
||||
|
||||
versions = append(versions, types.ObjectVersion{
|
||||
ETag: r.res.ETag,
|
||||
IsLatest: &isLatest,
|
||||
Key: &object,
|
||||
Size: &dataLength,
|
||||
VersionId: r.res.VersionId,
|
||||
})
|
||||
}
|
||||
|
||||
versions = reverseSlice(versions)
|
||||
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
// ReverseSlice reverses a slice of any type
|
||||
func reverseSlice[T any](s []T) []T {
|
||||
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func compareVersions(v1, v2 []types.ObjectVersion) bool {
|
||||
if len(v1) != len(v2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, version := range v1 {
|
||||
if version.Key == nil || v2[i].Key == nil {
|
||||
return false
|
||||
}
|
||||
if *version.Key != *v2[i].Key {
|
||||
return false
|
||||
}
|
||||
|
||||
if version.VersionId == nil || v2[i].VersionId == nil {
|
||||
return false
|
||||
}
|
||||
if *version.VersionId != *v2[i].VersionId {
|
||||
return false
|
||||
}
|
||||
|
||||
if version.IsLatest == nil || v2[i].IsLatest == nil {
|
||||
return false
|
||||
}
|
||||
if *version.IsLatest != *v2[i].IsLatest {
|
||||
return false
|
||||
}
|
||||
|
||||
if version.Size == nil || v2[i].Size == nil {
|
||||
return false
|
||||
}
|
||||
if *version.Size != *v2[i].Size {
|
||||
return false
|
||||
}
|
||||
|
||||
if version.ETag == nil || v2[i].ETag == nil {
|
||||
return false
|
||||
}
|
||||
if *version.ETag != *v2[i].ETag {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareDelMarkers(d1, d2 []types.DeleteMarkerEntry) bool {
|
||||
if len(d1) != len(d2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, dEntry := range d1 {
|
||||
if dEntry.Key == nil || d2[i].Key == nil {
|
||||
return false
|
||||
}
|
||||
if *dEntry.Key != *d2[i].Key {
|
||||
return false
|
||||
}
|
||||
|
||||
if dEntry.IsLatest == nil || d2[i].IsLatest == nil {
|
||||
return false
|
||||
}
|
||||
if *dEntry.IsLatest != *d2[i].IsLatest {
|
||||
return false
|
||||
}
|
||||
|
||||
if dEntry.VersionId == nil || d2[i].VersionId == nil {
|
||||
return false
|
||||
}
|
||||
if *dEntry.VersionId != *d2[i].VersionId {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getBoolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
32
tests/run.sh
32
tests/run.sh
@@ -19,12 +19,14 @@ show_help() {
|
||||
echo "Usage: $0 [option...]"
|
||||
echo " -h, --help Display this help message and exit"
|
||||
echo " -s, --static Don't remove buckets between tests"
|
||||
echo " aws Run tests with aws (s3api) cli"
|
||||
echo " s3api Run tests with s3api cli"
|
||||
echo " s3api-non-policy Run policy tests with s3api cli"
|
||||
echo " s3api-policy Run policy tests with s3api cli"
|
||||
echo " s3 Run tests with s3 cli"
|
||||
echo " s3cmd Run tests with s3cmd utility"
|
||||
echo " mc Run tests with mc utility"
|
||||
echo " aws-user Run user tests with aws cli"
|
||||
echo " rest Run tests with rest cli"
|
||||
echo " s3api-user Run user tests with aws cli"
|
||||
}
|
||||
|
||||
handle_param() {
|
||||
@@ -33,7 +35,7 @@ handle_param() {
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
s3|s3api|aws|s3cmd|mc|aws-user)
|
||||
s3|s3api|s3cmd|mc|s3api-user|rest|s3api-policy|s3api-non-policy)
|
||||
set_command_type "$1"
|
||||
;;
|
||||
*) # Handle unrecognized options or positional arguments
|
||||
@@ -62,14 +64,26 @@ if [[ -z "$VERSITYGW_TEST_ENV" ]] && [[ $BYPASS_ENV_FILE != "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit_code=0
|
||||
case $command_type in
|
||||
s3api|aws)
|
||||
echo "Running aws tests ..."
|
||||
s3api)
|
||||
echo "Running all s3api tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_s3api.sh || exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
"$HOME"/bin/bats ./tests/test_s3api_policy.sh || exit_code=$?
|
||||
fi
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
"$HOME"/bin/bats ./tests/test_user_aws.sh || exit_code=$?
|
||||
fi
|
||||
;;
|
||||
s3api-policy)
|
||||
echo "Running s3api policy tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_s3api_policy.sh || exit_code=$?
|
||||
;;
|
||||
s3api-non-policy)
|
||||
echo "Running s3api non-policy tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_s3api.sh || exit_code=$?
|
||||
;;
|
||||
s3)
|
||||
echo "Running s3 tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_s3.sh || exit_code=$?
|
||||
@@ -85,8 +99,12 @@ case $command_type in
|
||||
echo "Running mc tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_mc.sh || exit_code=$?
|
||||
;;
|
||||
aws-user)
|
||||
echo "Running aws user tests ..."
|
||||
rest)
|
||||
echo "Running rest tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_rest.sh || exit_code=$?
|
||||
;;
|
||||
s3api-user)
|
||||
echo "Running s3api user tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_user_aws.sh || exit_code=$?
|
||||
esac
|
||||
|
||||
|
||||
@@ -19,16 +19,15 @@ if [[ -z "$VERSITYGW_TEST_ENV" ]] && [[ $BYPASS_ENV_FILE != "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ./tests/run.sh aws; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh s3; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh s3cmd; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh mc; then
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
# print config for test results info
|
||||
grep -v ^# "$VERSITYGW_TEST_ENV"
|
||||
|
||||
status=0
|
||||
|
||||
for cmd in s3api s3 s3cmd mc rest; do
|
||||
if ! ./tests/run.sh "$cmd"; then
|
||||
status=1
|
||||
fi
|
||||
done
|
||||
|
||||
exit $status
|
||||
|
||||
@@ -14,20 +14,18 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/env.sh
|
||||
source ./tests/report.sh
|
||||
source ./tests/setup_mc.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/versity.sh
|
||||
|
||||
# bats setup function
|
||||
setup() {
|
||||
check_env_vars
|
||||
if [ "$RUN_VERSITYGW" == "true" ]; then
|
||||
if ! run_versity_app; then
|
||||
log 2 "error starting versity apps"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
base_setup
|
||||
|
||||
log 4 "Running test $BATS_TEST_NAME"
|
||||
if [[ $LOG_LEVEL -ge 5 ]]; then
|
||||
@@ -44,10 +42,7 @@ setup() {
|
||||
fi
|
||||
|
||||
if [[ $RUN_MC == true ]]; then
|
||||
if ! check_add_mc_alias; then
|
||||
log 2 "mc alias check/add failed"
|
||||
return 1
|
||||
fi
|
||||
check_add_mc_alias
|
||||
fi
|
||||
|
||||
export AWS_PROFILE
|
||||
@@ -62,6 +57,19 @@ setup() {
|
||||
|
||||
# bats teardown function
|
||||
teardown() {
|
||||
# shellcheck disable=SC2154
|
||||
if ! delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_ONE_NAME or contents"
|
||||
fi
|
||||
if ! delete_bucket_or_contents_if_exists "s3api" "$BUCKET_TWO_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_TWO_NAME or contents"
|
||||
fi
|
||||
if [ "$REMOVE_TEST_FILE_FOLDER" == "true" ]; then
|
||||
log 6 "removing test file folder"
|
||||
if ! error=$(rm -rf "${TEST_FILE_FOLDER:?}" 2>&1); then
|
||||
log 3 "unable to remove test file folder: $error"
|
||||
fi
|
||||
fi
|
||||
stop_versity
|
||||
if [[ $LOG_LEVEL -ge 5 ]]; then
|
||||
end_time=$(date +%s)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/env.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/commands/create_bucket.sh
|
||||
|
||||
@@ -40,15 +40,15 @@ create_bucket_if_not_exists() {
|
||||
return 0
|
||||
}
|
||||
|
||||
if ! setup; then
|
||||
log 2 "error starting versity to set up static buckets"
|
||||
exit 1
|
||||
fi
|
||||
base_setup
|
||||
if ! create_bucket_if_not_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error creating static bucket one"
|
||||
elif ! create_bucket_if_not_exists "s3api" "$BUCKET_TWO_NAME"; then
|
||||
log 2 "error creating static bucket two"
|
||||
fi
|
||||
if ! teardown; then
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
RECREATE_BUCKETS=false
|
||||
if ! stop_versity; then
|
||||
log 2 "error stopping versity"
|
||||
fi
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
|
||||
if ! setup; then
|
||||
if ! base_setup; then
|
||||
log 2 "error starting versity to set up static buckets"
|
||||
exit 1
|
||||
fi
|
||||
@@ -27,6 +27,6 @@ elif ! delete_bucket_recursive "s3" "$BUCKET_TWO_NAME"; then
|
||||
log 2 "error creating static bucket two"
|
||||
fi
|
||||
log 4 "buckets deleted successfully"
|
||||
if ! teardown; then
|
||||
if ! stop_versity; then
|
||||
log 2 "error stopping versity"
|
||||
fi
|
||||
@@ -17,41 +17,48 @@
|
||||
source ./tests/commands/delete_objects.sh
|
||||
source ./tests/commands/list_objects_v2.sh
|
||||
source ./tests/commands/list_parts.sh
|
||||
source ./tests/util_get_bucket_acl.sh
|
||||
source ./tests/util_get_object_attributes.sh
|
||||
source ./tests/util_get_object_retention.sh
|
||||
source ./tests/util_head_object.sh
|
||||
source ./tests/util_legal_hold.sh
|
||||
source ./tests/util_list_objects.sh
|
||||
|
||||
test_abort_multipart_upload_aws_root() {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_test_files "$bucket_file"
|
||||
run create_test_file "$bucket_file"
|
||||
assert_success
|
||||
# shellcheck disable=SC2154
|
||||
run dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1
|
||||
assert_success "error creating file"
|
||||
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
|
||||
assert_success
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "abort failed"
|
||||
run run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file" 4
|
||||
assert_success
|
||||
|
||||
if object_exists "aws" "$BUCKET_ONE_NAME" "$bucket_file"; then
|
||||
fail "Upload file exists after abort"
|
||||
fi
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
run object_exists "aws" "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_failure 1
|
||||
}
|
||||
|
||||
test_complete_multipart_upload_aws_root() {
|
||||
local bucket_file="bucket-file"
|
||||
run create_test_files "$bucket_file"
|
||||
assert_success
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
|
||||
assert_success
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "error performing multipart upload"
|
||||
run multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file" 4
|
||||
assert_success
|
||||
|
||||
download_and_compare_file "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "error downloading and comparing file"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
run download_and_compare_file "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER/$bucket_file-copy"
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_create_multipart_upload_properties_aws_root() {
|
||||
@@ -64,7 +71,6 @@ test_create_multipart_upload_properties_aws_root() {
|
||||
local expected_retention_mode="GOVERNANCE"
|
||||
local expected_tag_key="TestTag"
|
||||
local expected_tag_val="TestTagVal"
|
||||
local five_seconds_later
|
||||
|
||||
os_name="$(uname)"
|
||||
if [[ "$os_name" == "Darwin" ]]; then
|
||||
@@ -75,84 +81,70 @@ test_create_multipart_upload_properties_aws_root() {
|
||||
later=$(date -d "$now 15 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test file"
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
run create_test_files "$bucket_file"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME" || fail "error deleting bucket, or checking for existence"
|
||||
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
|
||||
assert_success
|
||||
|
||||
run delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
# in static bucket config, bucket will still exist
|
||||
bucket_exists "s3api" "$BUCKET_ONE_NAME" || local exists_result=$?
|
||||
[[ $exists_result -ne 2 ]] || fail "error checking for bucket existence"
|
||||
if [[ $exists_result -eq 1 ]]; then
|
||||
create_bucket_object_lock_enabled "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
if ! bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
run create_bucket_object_lock_enabled "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
fi
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting log config"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "LOG CONFIG: $log_config"
|
||||
|
||||
log 5 "LATER: $later"
|
||||
multipart_upload_with_params "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 \
|
||||
run multipart_upload_with_params "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file" 4 \
|
||||
"$expected_content_type" \
|
||||
"{\"$expected_meta_key\": \"$expected_meta_val\"}" \
|
||||
"$expected_hold_status" \
|
||||
"$expected_retention_mode" \
|
||||
"$later" \
|
||||
"$expected_tag_key=$expected_tag_val" || fail "error performing multipart upload"
|
||||
assert_success
|
||||
|
||||
head_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting metadata"
|
||||
# shellcheck disable=SC2154
|
||||
raw_metadata=$(echo "$metadata" | grep -v "InsecureRequestWarning")
|
||||
log 5 "raw metadata: $raw_metadata"
|
||||
run get_and_verify_metadata "$bucket_file" "$expected_content_type" "$expected_meta_key" "$expected_meta_val" \
|
||||
"$expected_hold_status" "$expected_retention_mode" "$later"
|
||||
assert_success
|
||||
|
||||
content_type=$(echo "$raw_metadata" | jq -r ".ContentType")
|
||||
[[ $content_type == "$expected_content_type" ]] || fail "content type mismatch ($content_type, $expected_content_type)"
|
||||
meta_val=$(echo "$raw_metadata" | jq -r ".Metadata.$expected_meta_key")
|
||||
[[ $meta_val == "$expected_meta_val" ]] || fail "metadata val mismatch ($meta_val, $expected_meta_val)"
|
||||
hold_status=$(echo "$raw_metadata" | jq -r ".ObjectLockLegalHoldStatus")
|
||||
[[ $hold_status == "$expected_hold_status" ]] || fail "hold status mismatch ($hold_status, $expected_hold_status)"
|
||||
retention_mode=$(echo "$raw_metadata" | jq -r ".ObjectLockMode")
|
||||
[[ $retention_mode == "$expected_retention_mode" ]] || fail "retention mode mismatch ($retention_mode, $expected_retention_mode)"
|
||||
retain_until_date=$(echo "$raw_metadata" | jq -r ".ObjectLockRetainUntilDate")
|
||||
[[ $retain_until_date == "$later"* ]] || fail "retention date mismatch ($retain_until_date, $five_seconds_later)"
|
||||
run get_and_check_bucket_tags "$BUCKET_ONE_NAME" "$expected_tag_key" "$expected_tag_val"
|
||||
assert_success
|
||||
|
||||
get_object_tagging "aws" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting tagging"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "tags: $tags"
|
||||
tag_key=$(echo "$tags" | jq -r ".TagSet[0].Key")
|
||||
[[ $tag_key == "$expected_tag_key" ]] || fail "tag mismatch ($tag_key, $expected_tag_key)"
|
||||
tag_val=$(echo "$tags" | jq -r ".TagSet[0].Value")
|
||||
[[ $tag_val == "$expected_tag_val" ]] || fail "tag mismatch ($tag_val, $expected_tag_val)"
|
||||
run put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "OFF"
|
||||
assert_success
|
||||
|
||||
put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "OFF" || fail "error disabling legal hold"
|
||||
head_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting metadata"
|
||||
run get_and_check_legal_hold "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "OFF"
|
||||
assert_success
|
||||
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "error getting object"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "files not equal"
|
||||
|
||||
sleep 15
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
run download_and_compare_file "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER/$bucket_file-copy" || fail "error getting object"
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_delete_objects_aws_root() {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || fail "error creating test files"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run create_test_files "$object_one" "$object_two"
|
||||
assert_success
|
||||
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || fail "error adding object one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || fail "error adding object two"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
delete_objects "$BUCKET_ONE_NAME" "$object_one" "$object_two" || fail "error deleting objects"
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER"/"$object_one" "$BUCKET_ONE_NAME" "$object_one"
|
||||
assert_success
|
||||
|
||||
object_exists "s3api" "$BUCKET_ONE_NAME" "$object_one" || local object_one_exists_result=$?
|
||||
[[ $object_one_exists_result -eq 1 ]] || fail "object $object_one not deleted"
|
||||
object_exists "s3api" "$BUCKET_ONE_NAME" "$object_two" || local object_two_exists_result=$?
|
||||
[[ $object_two_exists_result -eq 1 ]] || fail "object $object_two not deleted"
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER"/"$object_two" "$BUCKET_ONE_NAME" "$object_two"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
run delete_objects "$BUCKET_ONE_NAME" "$object_one" "$object_two"
|
||||
assert_success
|
||||
|
||||
run object_exists "s3api" "$BUCKET_ONE_NAME" "$object_one"
|
||||
assert_failure 1
|
||||
|
||||
run object_exists "s3api" "$BUCKET_ONE_NAME" "$object_two"
|
||||
assert_failure 1
|
||||
}
|
||||
|
||||
test_get_bucket_acl_aws_root() {
|
||||
@@ -160,63 +152,64 @@ test_get_bucket_acl_aws_root() {
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
skip
|
||||
fi
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || fail "error retreving ACL"
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "ACL: $acl"
|
||||
id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Owner.ID')
|
||||
[[ $id == "$AWS_ACCESS_KEY_ID" ]] || fail "Acl mismatch"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
run get_bucket_acl_and_check_owner "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_get_object_full_range_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
echo -n "0123456789" > "$test_file_folder/$bucket_file"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run create_test_files "$bucket_file" 0
|
||||
assert_success
|
||||
echo -n "0123456789" > "$TEST_FILE_FOLDER/$bucket_file"
|
||||
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error putting object"
|
||||
get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=9-15" "$test_file_folder/$bucket_file-range" || fail "error getting range"
|
||||
[[ "$(cat "$test_file_folder/$bucket_file-range")" == "9" ]] || fail "byte range not copied properly"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
run get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=9-15" "$TEST_FILE_FOLDER/$bucket_file-range"
|
||||
assert_success
|
||||
|
||||
assert [ "$(cat "$TEST_FILE_FOLDER/$bucket_file-range")" == "9" ]
|
||||
}
|
||||
|
||||
test_get_object_invalid_range_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
run create_test_files "$bucket_file"
|
||||
assert_success
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error putting object"
|
||||
get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=0-0" "$test_file_folder/$bucket_file-range" || local get_result=$?
|
||||
[[ $get_result -ne 0 ]] || fail "Get object with zero range returned no error"
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
run get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=0-0" "$TEST_FILE_FOLDER/$bucket_file-range"
|
||||
assert_failure
|
||||
}
|
||||
|
||||
test_put_object_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
run create_test_files "$bucket_file"
|
||||
assert_success
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
setup_bucket "s3api" "$BUCKET_TWO_NAME"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
copy_error=$(aws --no-verify-ssl s3api copy-object --copy-source "$BUCKET_ONE_NAME/$bucket_file" --key "$bucket_file" --bucket "$BUCKET_TWO_NAME" 2>&1) || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Error copying file: $copy_error"
|
||||
copy_file "s3://$BUCKET_TWO_NAME/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || file "files don't match"
|
||||
run setup_buckets "s3api" "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_TWO_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
run copy_object "s3api" "$BUCKET_ONE_NAME/$bucket_file" "$BUCKET_TWO_NAME" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
run download_and_compare_file "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER/${bucket_file}_copy"
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_create_bucket_invalid_name_aws_root() {
|
||||
@@ -224,29 +217,23 @@ test_create_bucket_invalid_name_aws_root() {
|
||||
return
|
||||
fi
|
||||
|
||||
create_bucket_invalid_name "aws" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Invalid name test failed"
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
[[ "$bucket_create_error" == *"Invalid bucket name "* ]] || fail "unexpected error: $bucket_create_error"
|
||||
run create_and_check_bucket_invalid_name "aws"
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_get_object_attributes_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
run create_test_file "$bucket_file"
|
||||
assert_success
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "failed to add object to bucket"
|
||||
get_object_attributes "$BUCKET_ONE_NAME" "$bucket_file" || failed "failed to get object attributes"
|
||||
# shellcheck disable=SC2154
|
||||
has_object_size=$(echo "$attributes" | jq -e '.ObjectSize' 2>&1) || fail "error checking for ObjectSize parameters: $has_object_size"
|
||||
if [[ $has_object_size -eq 0 ]]; then
|
||||
object_size=$(echo "$attributes" | jq -r ".ObjectSize")
|
||||
[[ $object_size == 0 ]] || fail "Incorrect object size: $object_size"
|
||||
else
|
||||
fail "ObjectSize parameter missing: $attributes"
|
||||
fi
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
run get_and_check_object_size "$BUCKET_ONE_NAME" "$bucket_file" 10
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_get_put_object_legal_hold_aws_root() {
|
||||
@@ -259,37 +246,34 @@ test_get_put_object_legal_hold_aws_root() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
legal_hold_retention_setup "$username" "$password" "$bucket_file"
|
||||
run legal_hold_retention_setup "$username" "$password" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "$lock_config"
|
||||
enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled")
|
||||
[[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'"
|
||||
run get_check_object_lock_config_enabled "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "ON" || fail "error putting legal hold on object"
|
||||
get_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting object legal hold status"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "$legal_hold"
|
||||
hold_status=$(echo "$legal_hold" | grep -v "InsecureRequestWarning" | jq -r ".LegalHold.Status" 2>&1) || fail "error obtaining hold status: $hold_status"
|
||||
[[ $hold_status == "ON" ]] || fail "Status should be 'ON', is '$hold_status'"
|
||||
run put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "ON"
|
||||
assert_success
|
||||
|
||||
echo "fdkljafajkfs" > "$test_file_folder/$bucket_file"
|
||||
if put_object_with_user "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password"; then
|
||||
fail "able to overwrite object with hold"
|
||||
fi
|
||||
run get_and_check_legal_hold "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "ON"
|
||||
assert_success
|
||||
|
||||
echo "fdkljafajkfs" > "$TEST_FILE_FOLDER/$bucket_file"
|
||||
run put_object_with_user "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password"
|
||||
assert_failure 1
|
||||
# shellcheck disable=SC2154
|
||||
#[[ $put_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $put_object_error"
|
||||
|
||||
if delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password"; then
|
||||
fail "able to delete object with hold"
|
||||
fi
|
||||
run delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password"
|
||||
assert_failure 1
|
||||
# shellcheck disable=SC2154
|
||||
[[ $delete_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $delete_object_error"
|
||||
put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "OFF" || fail "error removing legal hold on object"
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password" || fail "error deleting object after removing legal hold"
|
||||
assert_output --partial "Object is WORM protected and cannot be overwritten"
|
||||
|
||||
delete_bucket_recursive "s3api" "$BUCKET_ONE_NAME"
|
||||
run put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "OFF"
|
||||
assert_success
|
||||
|
||||
run delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password"
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_get_put_object_retention_aws_root() {
|
||||
@@ -302,12 +286,11 @@ test_get_put_object_retention_aws_root() {
|
||||
skip
|
||||
fi
|
||||
|
||||
legal_hold_retention_setup "$username" "$secret_key" "$bucket_file"
|
||||
run legal_hold_retention_setup "$username" "$secret_key" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration"
|
||||
log 5 "$lock_config"
|
||||
enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled")
|
||||
[[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'"
|
||||
run get_check_object_lock_config_enabled "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
retention_date=$(TZ="UTC" date -v+5S +"%Y-%m-%dT%H:%M:%S")
|
||||
@@ -315,30 +298,23 @@ test_get_put_object_retention_aws_root() {
|
||||
retention_date=$(TZ="UTC" date -d "+5 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
log 5 "retention date: $retention_date"
|
||||
put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date" || fail "failed to add object retention"
|
||||
get_object_retention "$BUCKET_ONE_NAME" "$bucket_file" || fail "failed to get object retention"
|
||||
log 5 "$retention"
|
||||
retention=$(echo "$retention" | grep -v "InsecureRequestWarning")
|
||||
mode=$(echo "$retention" | jq -r ".Retention.Mode")
|
||||
retain_until_date=$(echo "$retention" | jq -r ".Retention.RetainUntilDate")
|
||||
[[ $mode == "GOVERNANCE" ]] || fail "retention mode should be governance, is $mode"
|
||||
[[ $retain_until_date == "$retention_date"* ]] || fail "retain until date should be $retention_date, is $retain_until_date"
|
||||
|
||||
echo "fdkljafajkfs" > "$test_file_folder/$bucket_file"
|
||||
put_object_with_user "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local put_result=$?
|
||||
[[ $put_result -ne 0 ]] || fail "able to overwrite object with hold"
|
||||
run put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date"
|
||||
assert_success
|
||||
|
||||
run get_check_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "$retention_date"
|
||||
assert_success
|
||||
|
||||
echo "fdkljafajkfs" > "$TEST_FILE_FOLDER/$bucket_file"
|
||||
run put_object_with_user "s3api" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key"
|
||||
assert_failure 1
|
||||
# shellcheck disable=SC2154
|
||||
[[ $put_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error"
|
||||
assert_output --partial "Object is WORM protected and cannot be overwritten"
|
||||
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local delete_result=$?
|
||||
[[ $delete_result -ne 0 ]] || fail "able to delete object with hold"
|
||||
[[ $delete_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error"
|
||||
|
||||
sleep 5
|
||||
|
||||
delete_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error deleting object"
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
run delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key"
|
||||
assert_failure 1
|
||||
# shellcheck disable=SC2154
|
||||
assert_output --partial "Object is WORM protected and cannot be overwritten"
|
||||
}
|
||||
|
||||
test_retention_bypass_aws_root() {
|
||||
@@ -351,12 +327,11 @@ test_retention_bypass_aws_root() {
|
||||
secret_key=$PASSWORD_ONE
|
||||
policy_file="policy_file"
|
||||
|
||||
legal_hold_retention_setup "$username" "$secret_key" "$bucket_file"
|
||||
run legal_hold_retention_setup "$username" "$secret_key" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration"
|
||||
log 5 "$lock_config"
|
||||
enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled")
|
||||
[[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'"
|
||||
run get_check_object_lock_config_enabled "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
retention_date=$(TZ="UTC" date -v+30S +"%Y-%m-%dT%H:%M:%S")
|
||||
@@ -364,149 +339,103 @@ test_retention_bypass_aws_root() {
|
||||
retention_date=$(TZ="UTC" date -d "+30 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
log 5 "retention date: $retention_date"
|
||||
put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date" || fail "failed to add object retention"
|
||||
if delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file"; then
|
||||
log 2 "able to delete object despite retention"
|
||||
return 1
|
||||
fi
|
||||
cat <<EOF > "$test_file_folder/$policy_file"
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": "$username",
|
||||
"Action": ["s3:BypassGovernanceRetention","s3:DeleteObject"],
|
||||
"Resource": "arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting bucket policy"
|
||||
delete_object_bypass_retention "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || fail "error deleting object and bypassing retention"
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file" "$policy_file"
|
||||
|
||||
run put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date"
|
||||
assert_success
|
||||
|
||||
run delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_failure 1
|
||||
|
||||
run setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "2012-10-17" "Allow" "$username" \
|
||||
"[\"s3:BypassGovernanceRetention\",\"s3:DeleteObject\"]" "arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
assert_success
|
||||
|
||||
run put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file"
|
||||
assert_success
|
||||
|
||||
run delete_object_bypass_retention "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key"
|
||||
assert_success
|
||||
}
|
||||
|
||||
legal_hold_retention_setup() {
|
||||
[[ $# -eq 3 ]] || fail "legal hold or retention setup requires username, secret key, bucket file"
|
||||
assert [ $# -eq 3 ]
|
||||
|
||||
delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME" || fail "error deleting bucket, or checking for existence"
|
||||
setup_user "$1" "$2" "user" || fail "error creating user if nonexistent"
|
||||
create_test_files "$3" || fail "error creating test files"
|
||||
run delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run setup_user "$1" "$2" "user"
|
||||
assert_success
|
||||
|
||||
run create_test_file "$3"
|
||||
assert_success
|
||||
|
||||
#create_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
if [[ $RECREATE_BUCKETS == "true" ]]; then
|
||||
create_bucket_object_lock_enabled "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
run create_bucket_object_lock_enabled "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$1" || fail "error changing bucket ownership"
|
||||
get_bucket_policy "s3api" "$BUCKET_ONE_NAME" || fail "error getting bucket policy"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "POLICY: $bucket_policy"
|
||||
get_bucket_owner "$BUCKET_ONE_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "owner: $bucket_owner"
|
||||
#put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls"
|
||||
put_object_with_user "s3api" "$test_file_folder/$3" "$BUCKET_ONE_NAME" "$3" "$1" "$2" || fail "failed to add object to bucket"
|
||||
|
||||
run change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$1"
|
||||
assert_success
|
||||
|
||||
run put_object_with_user "s3api" "$TEST_FILE_FOLDER/$3" "$BUCKET_ONE_NAME" "$3" "$1" "$2"
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_s3api_list_objects_v1_aws_root() {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local copy_result_one=$?
|
||||
[[ $copy_result_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_result_two=$?
|
||||
[[ $copy_result_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
run create_test_files "$object_one" "$object_two"
|
||||
assert_success
|
||||
|
||||
list_objects_s3api_v1 "$BUCKET_ONE_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
key_one=$(echo "$objects" | jq -r '.Contents[0].Key')
|
||||
[[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)"
|
||||
size_one=$(echo "$objects" | jq -r '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch ($size_one, 0)"
|
||||
key_two=$(echo "$objects" | jq -r '.Contents[1].Key')
|
||||
[[ $key_two == "$object_two" ]] || fail "Object two mismatch ($key_two, $object_two)"
|
||||
size_two=$(echo "$objects" | jq '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch ($size_two, ${#object_two_data})"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER"/"$object_one" "$BUCKET_ONE_NAME" "$object_one"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER"/"$object_two" "$BUCKET_ONE_NAME" "$object_two"
|
||||
assert_success
|
||||
|
||||
run list_check_objects_v1 "$BUCKET_ONE_NAME" "$object_one" 10 "$object_two" 10
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_s3api_list_objects_v2_aws_root() {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local copy_object_one=$?
|
||||
[[ $copy_object_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_object_two=$?
|
||||
[[ $copy_object_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
run create_test_files "$object_one" "$object_two"
|
||||
assert_success
|
||||
|
||||
list_objects_v2 "$BUCKET_ONE_NAME" || fail "error listing objects (v2)"
|
||||
key_one=$(echo "$objects" | jq -r '.Contents[0].Key')
|
||||
[[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)"
|
||||
size_one=$(echo "$objects" | jq -r '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch ($size_one, 0)"
|
||||
key_two=$(echo "$objects" | jq -r '.Contents[1].Key')
|
||||
[[ $key_two == "$object_two" ]] || fail "Object two mismatch ($key_two, $object_two)"
|
||||
size_two=$(echo "$objects" | jq -r '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch ($size_two, ${#object_two_data})"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER"/"$object_one" "$BUCKET_ONE_NAME" "$object_one"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER"/"$object_two" "$BUCKET_ONE_NAME" "$object_two"
|
||||
assert_success
|
||||
|
||||
run list_check_objects_v2 "$BUCKET_ONE_NAME" "$object_one" 10 "$object_two" 10
|
||||
assert_success
|
||||
}
|
||||
|
||||
test_multipart_upload_list_parts_aws_root() {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test file"
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run create_test_file "$bucket_file" 0
|
||||
assert_success
|
||||
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
|
||||
assert_success
|
||||
|
||||
start_multipart_upload_and_list_parts "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "listing multipart upload parts failed"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
declare -a parts_map
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "parts: $parts"
|
||||
for i in {0..3}; do
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
part=$(echo "$parts" | grep -v "InsecureRequestWarning" | jq -r ".[$i]" 2>&1) || fail "error getting part: $part"
|
||||
part_number=$(echo "$part" | jq ".PartNumber" 2>&1) || fail "error parsing part number: $part_number"
|
||||
[[ $part_number != "" ]] || fail "error: blank part number"
|
||||
run start_multipart_upload_list_check_parts "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file"
|
||||
assert_success
|
||||
|
||||
etag=$(echo "$part" | jq ".ETag" 2>&1) || fail "error parsing etag: $etag"
|
||||
[[ $etag != "" ]] || fail "error: blank etag"
|
||||
# shellcheck disable=SC2004
|
||||
parts_map[$part_number]=$etag
|
||||
done
|
||||
[[ ${#parts_map[@]} -ne 0 ]] || fail "error loading multipart upload parts to check"
|
||||
|
||||
for i in {0..3}; do
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
listed_part=$(echo "$listed_parts" | grep -v "InsecureRequestWarning" | jq -r ".Parts[$i]" 2>&1) || fail "error parsing listed part: $listed_part"
|
||||
part_number=$(echo "$listed_part" | jq ".PartNumber" 2>&1) || fail "error parsing listed part number: $part_number"
|
||||
etag=$(echo "$listed_part" | jq ".ETag" 2>&1) || fail "error getting listed etag: $etag"
|
||||
[[ ${parts_map[$part_number]} == "$etag" ]] || fail "error: etags don't match (part number: $part_number, etags ${parts_map[$part_number]},$etag)"
|
||||
done
|
||||
|
||||
run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file" 4
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
run run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER/$bucket_file" 4
|
||||
assert_success
|
||||
}
|
||||
|
||||
@@ -33,21 +33,33 @@ source ./tests/commands/put_object_tagging.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
source ./tests/commands/put_public_access_block.sh
|
||||
|
||||
# param: command type
|
||||
# fail on test failure
|
||||
test_common_multipart_upload() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "multipart upload command missing command type"
|
||||
return 1
|
||||
fi
|
||||
assert [ $# -eq 1 ]
|
||||
|
||||
bucket_file="largefile"
|
||||
run create_large_file "$bucket_file"
|
||||
assert_success
|
||||
|
||||
create_large_file "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test file for multipart upload"
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
if [ "$1" == 's3' ]; then
|
||||
run copy_file_locally "$TEST_FILE_FOLDER/$bucket_file" "$TEST_FILE_FOLDER/$bucket_file-copy"
|
||||
assert_success
|
||||
fi
|
||||
|
||||
put_object "$1" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "failed to copy file"
|
||||
run put_object "$1" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
if [ "$1" == 's3' ]; then
|
||||
run move_file_locally "$TEST_FILE_FOLDER/$bucket_file-copy" "$TEST_FILE_FOLDER/$bucket_file"
|
||||
assert_success
|
||||
fi
|
||||
|
||||
run download_and_compare_file "$1" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER/$bucket_file-copy"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
@@ -63,7 +75,8 @@ test_common_create_delete_bucket() {
|
||||
|
||||
assert [ $# -eq 1 ]
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
bucket_exists "$1" "$BUCKET_ONE_NAME" || fail "failed bucket existence check"
|
||||
|
||||
@@ -74,108 +87,125 @@ test_common_copy_object() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "copy object test requires command type"
|
||||
fi
|
||||
local object_name="test-object"
|
||||
create_test_files "$object_name" || fail "error creating test file"
|
||||
echo "test data" > "$test_file_folder/$object_name"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
setup_bucket "$1" "$BUCKET_TWO_NAME"
|
||||
local object_name="test-object"
|
||||
run create_test_file "$object_name"
|
||||
assert_success
|
||||
|
||||
run setup_buckets "$1" "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
|
||||
assert_success
|
||||
|
||||
if [[ $1 == 's3' ]]; then
|
||||
copy_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to copy object to bucket one"
|
||||
copy_object "$1" "$TEST_FILE_FOLDER/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to copy object to bucket one"
|
||||
else
|
||||
put_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to put object to bucket one"
|
||||
put_object "$1" "$TEST_FILE_FOLDER/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to put object to bucket one"
|
||||
fi
|
||||
if [[ $1 == 's3' ]]; then
|
||||
copy_object "$1" "s3://$BUCKET_ONE_NAME/$object_name" "$BUCKET_TWO_NAME" "$object_name" || fail "object not copied to bucket two"
|
||||
else
|
||||
copy_object "$1" "$BUCKET_ONE_NAME/$object_name" "$BUCKET_TWO_NAME" "$object_name" || fail "object not copied to bucket two"
|
||||
fi
|
||||
get_object "$1" "$BUCKET_TWO_NAME" "$object_name" "$test_file_folder/$object_name-copy" || fail "failed to retrieve object"
|
||||
|
||||
compare_files "$test_file_folder/$object_name" "$test_file_folder/$object_name-copy" || fail "files not the same"
|
||||
run download_and_compare_file "$1" "$TEST_FILE_FOLDER/$object_name" "$BUCKET_TWO_NAME" "$object_name" "$TEST_FILE_FOLDER/$object_name-copy"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "$1" "$BUCKET_TWO_NAME"
|
||||
delete_test_files "$object_name" "$object_name-copy"
|
||||
}
|
||||
|
||||
# param: client
|
||||
# fail on error
|
||||
test_common_put_object_with_data() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "put object test requires command type"
|
||||
fi
|
||||
assert [ $# -eq 1 ]
|
||||
|
||||
local object_name="test-object"
|
||||
create_test_files "$object_name" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Error creating test file"
|
||||
echo "test data" > "$test_file_folder"/"$object_name"
|
||||
run create_test_file "$object_name"
|
||||
assert_success
|
||||
|
||||
test_common_put_object "$1" "$object_name"
|
||||
}
|
||||
|
||||
# param: client
|
||||
# fail on error
|
||||
test_common_put_object_no_data() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "put object test requires command type"
|
||||
fi
|
||||
assert [ $# -eq 1 ]
|
||||
|
||||
local object_name="test-object"
|
||||
create_test_files "$object_name" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Error creating test file"
|
||||
run create_test_file "$object_name" 0
|
||||
assert_success
|
||||
|
||||
test_common_put_object "$1" "$object_name"
|
||||
}
|
||||
|
||||
# params: client, filename
|
||||
# fail on test failure
|
||||
test_common_put_object() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
fail "put object test requires command type, file"
|
||||
assert [ $# -eq 2 ]
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
# s3 erases file locally, so we need to copy it first
|
||||
if [ "$1" == 's3' ]; then
|
||||
run copy_file_locally "$TEST_FILE_FOLDER/$2" "$TEST_FILE_FOLDER/${2}-copy"
|
||||
assert_success
|
||||
fi
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
run put_object "$1" "$TEST_FILE_FOLDER/$2" "$BUCKET_ONE_NAME" "$2"
|
||||
assert_success
|
||||
|
||||
put_object "$1" "$test_file_folder/$2" "$BUCKET_ONE_NAME" "$2" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
object_exists "$1" "$BUCKET_ONE_NAME" "$2" || local exists_result_one=$?
|
||||
[[ $exists_result_one -eq 0 ]] || fail "Object not added to bucket"
|
||||
if [ "$1" == 's3' ]; then
|
||||
run move_file_locally "$TEST_FILE_FOLDER/${2}-copy" "$TEST_FILE_FOLDER/$2"
|
||||
assert_success
|
||||
fi
|
||||
|
||||
delete_object "$1" "$BUCKET_ONE_NAME" "$2" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "Failed to delete object"
|
||||
object_exists "$1" "$BUCKET_ONE_NAME" "$2" || local exists_result_two=$?
|
||||
[[ $exists_result_two -eq 1 ]] || fail "Object not removed from bucket"
|
||||
run download_and_compare_file "$1" "$TEST_FILE_FOLDER/$2" "$BUCKET_ONE_NAME" "$2" "$TEST_FILE_FOLDER/${2}-copy"
|
||||
assert_success
|
||||
|
||||
run delete_object "$1" "$BUCKET_ONE_NAME" "$2"
|
||||
assert_success
|
||||
|
||||
run object_exists "$1" "$BUCKET_ONE_NAME" "$2"
|
||||
assert_failure 1
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$2"
|
||||
delete_test_files "$2" "${2}-copy"
|
||||
}
|
||||
|
||||
test_common_put_get_object() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "put, get object test requires command type"
|
||||
fail "put, get object test requires client"
|
||||
fi
|
||||
|
||||
local object_name="test-object"
|
||||
run create_test_files "$object_name"
|
||||
assert_success
|
||||
|
||||
create_test_files "$object_name" || fail "error creating test file"
|
||||
echo "test data" > "$test_file_folder"/"$object_name"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if [[ $1 == 's3' ]]; then
|
||||
copy_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to add object to bucket"
|
||||
copy_object "$1" "$TEST_FILE_FOLDER/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to add object to bucket"
|
||||
else
|
||||
put_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to add object to bucket"
|
||||
put_object "$1" "$TEST_FILE_FOLDER/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to add object to bucket"
|
||||
fi
|
||||
object_exists "$1" "$BUCKET_ONE_NAME" "$object_name" || fail "object not added to bucket"
|
||||
|
||||
get_object "$1" "$BUCKET_ONE_NAME" "$object_name" "$test_file_folder/${object_name}_copy" || fail "failed to get object"
|
||||
compare_files "$test_file_folder"/"$object_name" "$test_file_folder/${object_name}_copy" || fail "objects are different"
|
||||
run download_and_compare_file "$1" "$TEST_FILE_FOLDER/$object_name" "$BUCKET_ONE_NAME" "$object_name" "$TEST_FILE_FOLDER/${2}-copy"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_name" "${object_name}_copy"
|
||||
delete_test_files "$object_name" "${object_name}-copy"
|
||||
}
|
||||
|
||||
test_common_get_set_versioning() {
|
||||
local object_name="test-object"
|
||||
create_test_files "$object_name" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Error creating test file"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
run create_test_files "$object_name"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
get_bucket_versioning "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "error getting bucket versioning"
|
||||
@@ -197,8 +227,8 @@ test_common_list_buckets() {
|
||||
fail "List buckets test requires one argument"
|
||||
fi
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
setup_bucket "$1" "$BUCKET_TWO_NAME"
|
||||
run setup_buckets "$1" "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
|
||||
assert_success
|
||||
|
||||
list_buckets "$1"
|
||||
local bucket_one_found=false
|
||||
@@ -235,13 +265,18 @@ test_common_list_objects() {
|
||||
object_one="test-file-one"
|
||||
object_two="test-file-two"
|
||||
|
||||
create_test_files $object_one $object_two
|
||||
echo "test data" > "$test_file_folder"/"$object_one"
|
||||
echo "test data 2" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
put_object "$1" "$test_file_folder"/$object_one "$BUCKET_ONE_NAME" "$object_one" || local result_two=$?
|
||||
run create_test_files $object_one $object_two
|
||||
assert_success
|
||||
|
||||
echo "test data" > "$TEST_FILE_FOLDER"/"$object_one"
|
||||
echo "test data 2" > "$TEST_FILE_FOLDER"/"$object_two"
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "$1" "$TEST_FILE_FOLDER"/$object_one "$BUCKET_ONE_NAME" "$object_one" || local result_two=$?
|
||||
[[ result_two -eq 0 ]] || fail "Error adding object one"
|
||||
put_object "$1" "$test_file_folder"/$object_two "$BUCKET_ONE_NAME" "$object_two" || local result_three=$?
|
||||
put_object "$1" "$TEST_FILE_FOLDER"/$object_two "$BUCKET_ONE_NAME" "$object_two" || local result_three=$?
|
||||
[[ result_three -eq 0 ]] || fail "Error adding object two"
|
||||
|
||||
list_objects "$1" "$BUCKET_ONE_NAME"
|
||||
@@ -272,7 +307,8 @@ test_common_set_get_delete_bucket_tags() {
|
||||
local key="test_key"
|
||||
local value="test_value"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags first time"
|
||||
|
||||
@@ -294,7 +330,8 @@ test_common_set_get_delete_bucket_tags() {
|
||||
[[ $tag_set_key == "$key" ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == "$value" ]] || fail "Value mismatch"
|
||||
fi
|
||||
delete_bucket_tagging "$1" "$BUCKET_ONE_NAME"
|
||||
run delete_bucket_tagging "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags third time"
|
||||
|
||||
@@ -312,9 +349,13 @@ test_common_set_get_object_tags() {
|
||||
local key="test_key"
|
||||
local value="test_value"
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'"
|
||||
run create_test_files "$bucket_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "$1" "$TEST_FILE_FOLDER"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
get_object_tagging "$1" "$BUCKET_ONE_NAME" $bucket_file || fail "Error getting object tags"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
@@ -350,25 +391,27 @@ test_common_presigned_url_utf8_chars() {
|
||||
local bucket_file="my-$%^&*;"
|
||||
local bucket_file_copy="bucket-file-copy"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
run create_test_file "$bucket_file"
|
||||
assert_success
|
||||
dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || put_result=$?
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "$1" "$TEST_FILE_FOLDER"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "Failed to add object $bucket_file"
|
||||
|
||||
create_presigned_url "$1" "$BUCKET_ONE_NAME" "$bucket_file" || presigned_result=$?
|
||||
[[ $presigned_result -eq 0 ]] || fail "presigned url creation failure"
|
||||
|
||||
error=$(curl -k -v "$presigned_url" -o "$test_file_folder"/"$bucket_file_copy") || curl_result=$?
|
||||
error=$(curl -k -v "$presigned_url" -o "$TEST_FILE_FOLDER"/"$bucket_file_copy") || curl_result=$?
|
||||
if [[ $curl_result -ne 0 ]]; then
|
||||
fail "error downloading file with curl: $error"
|
||||
fi
|
||||
compare_files "$test_file_folder"/"$bucket_file" "$test_file_folder"/"$bucket_file_copy" || compare_result=$?
|
||||
compare_files "$TEST_FILE_FOLDER"/"$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file_copy" || compare_result=$?
|
||||
if [[ $compare_result -ne 0 ]]; then
|
||||
echo "file one: $(cat "$test_file_folder"/"$bucket_file")"
|
||||
echo "file two: $(cat "$test_file_folder"/"$bucket_file_copy")"
|
||||
echo "file one: $(cat "$TEST_FILE_FOLDER"/"$bucket_file")"
|
||||
echo "file two: $(cat "$TEST_FILE_FOLDER"/"$bucket_file_copy")"
|
||||
fail "files don't match"
|
||||
fi
|
||||
|
||||
@@ -381,11 +424,13 @@ test_common_list_objects_file_count() {
|
||||
echo "list objects greater than 1000 missing command type"
|
||||
return 1
|
||||
fi
|
||||
create_test_file_count 1001 || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "error creating test files"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object_multiple "$1" "$test_file_folder/file_*" "$BUCKET_ONE_NAME" || local put_result=$?
|
||||
run create_test_file_count 1001
|
||||
assert_success
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object_multiple "$1" "$TEST_FILE_FOLDER/file_*" "$BUCKET_ONE_NAME" || local put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "Failed to copy files to bucket"
|
||||
list_objects "$1" "$BUCKET_ONE_NAME"
|
||||
if [[ $LOG_LEVEL -ge 5 ]]; then
|
||||
@@ -403,11 +448,13 @@ test_common_delete_object_tagging() {
|
||||
tag_key="key"
|
||||
tag_value="value"
|
||||
|
||||
create_test_files "$bucket_file" || fail "Error creating test files"
|
||||
run create_test_files "$bucket_file"
|
||||
assert_success
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket"
|
||||
put_object "$1" "$TEST_FILE_FOLDER"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket"
|
||||
|
||||
put_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || fail "failed to add tags to object"
|
||||
|
||||
@@ -422,8 +469,11 @@ test_common_delete_object_tagging() {
|
||||
}
|
||||
|
||||
test_common_get_bucket_location() {
|
||||
[[ $# -eq 1 ]] || fail "test common get bucket location missing command type"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert [ $# -eq 1 ]
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
get_bucket_location "$1" "$BUCKET_ONE_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
[[ $bucket_location == "null" ]] || [[ $bucket_location == "us-east-1" ]] || fail "wrong location: '$bucket_location'"
|
||||
@@ -434,7 +484,9 @@ test_put_bucket_acl_s3cmd() {
|
||||
# https://github.com/versity/versitygw/issues/695
|
||||
skip
|
||||
fi
|
||||
setup_bucket "s3cmd" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3cmd" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls"
|
||||
|
||||
username=$USERNAME_ONE
|
||||
@@ -482,8 +534,11 @@ test_common_put_bucket_acl() {
|
||||
# https://github.com/versity/versitygw/issues/716
|
||||
skip
|
||||
fi
|
||||
[[ $# -eq 1 ]] || fail "test common put bucket acl missing command type"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert [ $# -eq 1 ]
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls"
|
||||
|
||||
username=$USERNAME_ONE
|
||||
@@ -508,7 +563,7 @@ test_common_put_bucket_acl() {
|
||||
grantee="{\"ID\": \"$username\", \"Type\": \"CanonicalUser\"}"
|
||||
fi
|
||||
|
||||
cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
cat <<EOF > "$TEST_FILE_FOLDER"/"$acl_file"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
@@ -523,7 +578,7 @@ cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
EOF
|
||||
|
||||
log 6 "before 1st put acl"
|
||||
put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$test_file_folder"/"$acl_file" || fail "error putting first acl"
|
||||
put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER"/"$acl_file" || fail "error putting first acl"
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || fail "error retrieving second ACL"
|
||||
|
||||
log 5 "Acls after 1st put: $acl"
|
||||
@@ -531,7 +586,7 @@ EOF
|
||||
permission=$(echo "$public_grants" | jq -r '.Permission' 2>&1) || fail "error getting permission: $permission"
|
||||
[[ $permission == "READ" ]] || fail "incorrect permission ($permission)"
|
||||
|
||||
cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
cat <<EOF > "$TEST_FILE_FOLDER"/"$acl_file"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
@@ -548,7 +603,7 @@ cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
}
|
||||
EOF
|
||||
|
||||
put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$test_file_folder"/"$acl_file" || fail "error putting second acl"
|
||||
put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER"/"$acl_file" || fail "error putting second acl"
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || fail "error retrieving second ACL"
|
||||
|
||||
log 5 "Acls after 2nd put: $acl"
|
||||
@@ -566,7 +621,8 @@ test_common_get_put_delete_bucket_policy() {
|
||||
|
||||
policy_file="policy_file"
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file"
|
||||
run create_test_file "$policy_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
#principal="*"
|
||||
@@ -578,7 +634,7 @@ test_common_get_put_delete_bucket_policy() {
|
||||
action="s3:GetObject"
|
||||
resource="arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
|
||||
cat <<EOF > "$test_file_folder"/$policy_file
|
||||
cat <<EOF > "$TEST_FILE_FOLDER"/$policy_file
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -591,13 +647,14 @@ test_common_get_put_delete_bucket_policy() {
|
||||
]
|
||||
}
|
||||
EOF
|
||||
log 5 "POLICY: $(cat "$test_file_folder/$policy_file")"
|
||||
log 5 "POLICY: $(cat "$TEST_FILE_FOLDER/$policy_file")"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
check_for_empty_policy "$1" "$BUCKET_ONE_NAME" || fail "policy not empty"
|
||||
|
||||
put_bucket_policy "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$policy_file" || fail "error putting bucket policy"
|
||||
put_bucket_policy "$1" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER"/"$policy_file" || fail "error putting bucket policy"
|
||||
|
||||
get_bucket_policy "$1" "$BUCKET_ONE_NAME" || fail "error getting bucket policy after setting"
|
||||
|
||||
@@ -629,11 +686,11 @@ EOF
|
||||
test_common_ls_directory_object() {
|
||||
test_file="a"
|
||||
|
||||
run create_test_files "$test_file"
|
||||
assert_success "error creating file"
|
||||
run create_test_file "$test_file" 0
|
||||
assert_success
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success "error setting up bucket"
|
||||
assert_success
|
||||
|
||||
if [ "$1" == 's3cmd' ]; then
|
||||
put_object_client="s3api"
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_create_bucket.sh
|
||||
source ./tests/commands/delete_bucket_policy.sh
|
||||
source ./tests/commands/get_bucket_policy.sh
|
||||
source ./tests/commands/put_bucket_policy.sh
|
||||
@@ -43,12 +43,17 @@ export RUN_MC=true
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
skip "will not test bucket deletion in static bucket test config"
|
||||
fi
|
||||
setup_bucket "mc" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "mc" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
delete_bucket "mc" "$BUCKET_ONE_NAME" || fail "error deleting bucket"
|
||||
}
|
||||
|
||||
# delete-bucket-policy
|
||||
@test "test_get_put_delete_bucket_policy" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_common_get_put_delete_bucket_policy "mc"
|
||||
}
|
||||
|
||||
@@ -122,14 +127,18 @@ export RUN_MC=true
|
||||
}
|
||||
|
||||
@test "test_get_bucket_info_mc" {
|
||||
setup_bucket "mc" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "mc" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
head_bucket "mc" "$BUCKET_ONE_NAME"
|
||||
[[ $bucket_info == *"$BUCKET_ONE_NAME"* ]] || fail "failure to retrieve correct bucket info: $bucket_info"
|
||||
delete_bucket_or_contents "mc" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_info_doesnt_exist_mc" {
|
||||
setup_bucket "mc" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "mc" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
head_bucket "mc" "$BUCKET_ONE_NAME"a || local info_result=$?
|
||||
[[ $info_result -eq 1 ]] || fail "bucket info for non-existent bucket returned"
|
||||
[[ $bucket_info == *"does not exist"* ]] || fail "404 not returned for non-existent bucket info"
|
||||
|
||||
33
tests/test_rest.sh
Executable file
33
tests/test_rest.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/commands/list_buckets.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
source ./tests/logger.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_rest.sh
|
||||
source ./tests/util_list_buckets.sh
|
||||
source ./tests/util_list_objects.sh
|
||||
|
||||
@test "test_rest_list_objects" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
run create_test_files "$test_file"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
|
||||
run list_check_objects_rest "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "test_authorization_list_buckets" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run list_check_buckets_rest
|
||||
assert_success
|
||||
}
|
||||
@@ -59,7 +59,9 @@ source ./tests/util_file.sh
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
skip "will not test bucket deletion in static bucket test config"
|
||||
fi
|
||||
setup_bucket "s3" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
delete_bucket "s3" "$BUCKET_ONE_NAME" || fail "error deleting bucket"
|
||||
}
|
||||
|
||||
|
||||
@@ -14,19 +14,17 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_aws.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_create_bucket.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/util_lock_config.sh
|
||||
source ./tests/util_multipart.sh
|
||||
source ./tests/util_tags.sh
|
||||
source ./tests/util_users.sh
|
||||
source ./tests/test_aws_root_inner.sh
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/test_s3api_policy.sh
|
||||
source ./tests/commands/copy_object.sh
|
||||
source ./tests/commands/delete_bucket_policy.sh
|
||||
source ./tests/commands/delete_object_tagging.sh
|
||||
@@ -90,6 +88,9 @@ export RUN_USERS=true
|
||||
|
||||
# delete-bucket-policy
|
||||
@test "test_get_put_delete_bucket_policy" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_common_get_put_delete_bucket_policy "aws"
|
||||
}
|
||||
|
||||
@@ -208,10 +209,13 @@ export RUN_USERS=true
|
||||
abort_all_multipart_uploads "$BUCKET_ONE_NAME" || fail "error aborting all uploads"
|
||||
fi
|
||||
|
||||
create_test_files "$bucket_file_one" "$bucket_file_two" || fail "error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run create_test_files "$bucket_file_one" "$bucket_file_two"
|
||||
assert_success
|
||||
|
||||
create_and_list_multipart_uploads "$BUCKET_ONE_NAME" "$test_file_folder"/"$bucket_file_one" "$test_file_folder"/"$bucket_file_two" || fail "failed to list multipart uploads"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
create_and_list_multipart_uploads "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER"/"$bucket_file_one" "$TEST_FILE_FOLDER"/"$bucket_file_two" || fail "failed to list multipart uploads"
|
||||
|
||||
local key_one
|
||||
local key_two
|
||||
@@ -222,8 +226,8 @@ export RUN_USERS=true
|
||||
key_two=$(echo "$raw_uploads" | jq -r '.Uploads[1].Key' 2>&1) || fail "error getting key two: $key_two"
|
||||
key_one=${key_one//\"/}
|
||||
key_two=${key_two//\"/}
|
||||
[[ "$test_file_folder/$bucket_file_one" == *"$key_one" ]] || fail "Key mismatch ($test_file_folder/$bucket_file_one, $key_one)"
|
||||
[[ "$test_file_folder/$bucket_file_two" == *"$key_two" ]] || fail "Key mismatch ($test_file_folder/$bucket_file_two, $key_two)"
|
||||
[[ "$TEST_FILE_FOLDER/$bucket_file_one" == *"$key_one" ]] || fail "Key mismatch ($TEST_FILE_FOLDER/$bucket_file_one, $key_one)"
|
||||
[[ "$TEST_FILE_FOLDER/$bucket_file_two" == *"$key_two" ]] || fail "Key mismatch ($TEST_FILE_FOLDER/$bucket_file_two, $key_two)"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file_one" "$bucket_file_two"
|
||||
@@ -232,14 +236,17 @@ export RUN_USERS=true
|
||||
@test "test-multipart-upload-from-bucket" {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error adding data to test file"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run create_test_file "$bucket_file"
|
||||
assert_success
|
||||
dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1 || fail "error adding data to test file"
|
||||
|
||||
multipart_upload_from_bucket "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "error performing multipart upload"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file-copy" "$test_file_folder/$bucket_file-copy" || fail "error getting object"
|
||||
compare_files "$test_file_folder"/$bucket_file-copy "$test_file_folder"/$bucket_file || fail "data doesn't match"
|
||||
multipart_upload_from_bucket "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file" 4 || fail "error performing multipart upload"
|
||||
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file-copy" "$TEST_FILE_FOLDER/$bucket_file-copy" || fail "error getting object"
|
||||
compare_files "$TEST_FILE_FOLDER"/$bucket_file-copy "$TEST_FILE_FOLDER"/$bucket_file || fail "data doesn't match"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
@@ -247,11 +254,13 @@ export RUN_USERS=true
|
||||
|
||||
@test "test_multipart_upload_from_bucket_range_too_large" {
|
||||
local bucket_file="bucket-file"
|
||||
run create_large_file "$bucket_file"
|
||||
assert_success
|
||||
|
||||
create_large_file "$bucket_file"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
multipart_upload_from_bucket_range "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 "bytes=0-1000000000" || local upload_result=$?
|
||||
multipart_upload_from_bucket_range "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file" 4 "bytes=0-1000000000" || local upload_result=$?
|
||||
[[ $upload_result -eq 1 ]] || fail "multipart upload with overly large range should have failed"
|
||||
log 5 "error: $upload_part_copy_error"
|
||||
[[ $upload_part_copy_error == *"Range specified is not valid"* ]] || [[ $upload_part_copy_error == *"InvalidRange"* ]] || fail "unexpected error: $upload_part_copy_error"
|
||||
@@ -262,18 +271,20 @@ export RUN_USERS=true
|
||||
|
||||
@test "test_multipart_upload_from_bucket_range_valid" {
|
||||
local bucket_file="bucket-file"
|
||||
run create_large_file "$bucket_file"
|
||||
assert_success
|
||||
|
||||
create_large_file "$bucket_file"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
range_max=$((5*1024*1024-1))
|
||||
multipart_upload_from_bucket_range "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 "bytes=0-$range_max" || fail "upload failure"
|
||||
multipart_upload_from_bucket_range "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file" 4 "bytes=0-$range_max" || fail "upload failure"
|
||||
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file-copy" "$test_file_folder/$bucket_file-copy" || fail "error retrieving object after upload"
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file-copy" "$TEST_FILE_FOLDER/$bucket_file-copy" || fail "error retrieving object after upload"
|
||||
if [[ $(uname) == 'Darwin' ]]; then
|
||||
object_size=$(stat -f%z "$test_file_folder/$bucket_file-copy")
|
||||
object_size=$(stat -f%z "$TEST_FILE_FOLDER/$bucket_file-copy")
|
||||
else
|
||||
object_size=$(stat --format=%s "$test_file_folder/$bucket_file-copy")
|
||||
object_size=$(stat --format=%s "$TEST_FILE_FOLDER/$bucket_file-copy")
|
||||
fi
|
||||
[[ object_size -eq $((range_max*4+4)) ]] || fail "object size mismatch ($object_size, $((range_max*4+4)))"
|
||||
|
||||
@@ -288,12 +299,17 @@ export RUN_USERS=true
|
||||
@test "test-list-objects-delimiter" {
|
||||
folder_name="two"
|
||||
object_name="three"
|
||||
create_test_folder "$folder_name"
|
||||
create_test_files "$folder_name"/"$object_name"
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run create_test_folder "$folder_name"
|
||||
assert_success
|
||||
|
||||
put_object "aws" "$test_file_folder/$folder_name/$object_name" "$BUCKET_ONE_NAME" "$folder_name/$object_name" || fail "failed to add object to bucket"
|
||||
run create_test_file "$folder_name"/"$object_name"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "aws" "$TEST_FILE_FOLDER/$folder_name/$object_name" "$BUCKET_ONE_NAME" "$folder_name/$object_name" || fail "failed to add object to bucket"
|
||||
|
||||
list_objects_s3api_v1 "$BUCKET_ONE_NAME" "/"
|
||||
prefix=$(echo "${objects[@]}" | jq -r ".CommonPrefixes[0].Prefix" 2>&1) || fail "error getting object prefix from object list: $prefix"
|
||||
@@ -307,62 +323,6 @@ export RUN_USERS=true
|
||||
delete_test_files $folder_name
|
||||
}
|
||||
|
||||
@test "test_put_policy_invalid_action" {
|
||||
test_s3api_policy_invalid_action
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_with_user" {
|
||||
test_s3api_policy_get_object_with_user
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_specific_file" {
|
||||
test_s3api_policy_get_object_specific_file
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_file_wildcard" {
|
||||
test_s3api_policy_get_object_file_wildcard
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_folder_wildcard" {
|
||||
test_s3api_policy_get_object_folder_wildcard
|
||||
}
|
||||
|
||||
@test "test_policy_allow_deny" {
|
||||
test_s3api_policy_allow_deny
|
||||
}
|
||||
|
||||
@test "test_policy_deny" {
|
||||
test_s3api_policy_deny
|
||||
}
|
||||
|
||||
@test "test_policy_put_wildcard" {
|
||||
test_s3api_policy_put_wildcard
|
||||
}
|
||||
|
||||
@test "test_policy_delete" {
|
||||
test_s3api_policy_delete
|
||||
}
|
||||
|
||||
@test "test_policy_get_bucket_policy" {
|
||||
test_s3api_policy_get_bucket_policy
|
||||
}
|
||||
|
||||
@test "test_policy_list_multipart_uploads" {
|
||||
test_s3api_policy_list_multipart_uploads
|
||||
}
|
||||
|
||||
@test "test_policy_put_bucket_policy" {
|
||||
test_s3api_policy_put_bucket_policy
|
||||
}
|
||||
|
||||
@test "test_policy_delete_bucket_policy" {
|
||||
test_s3api_policy_delete_bucket_policy
|
||||
}
|
||||
|
||||
@test "test_policy_get_bucket_acl" {
|
||||
test_s3api_policy_get_bucket_acl
|
||||
}
|
||||
|
||||
# ensure that lists of files greater than a size of 1000 (pagination) are returned properly
|
||||
#@test "test_list_objects_file_count" {
|
||||
# test_common_list_objects_file_count "aws"
|
||||
@@ -383,12 +343,14 @@ export RUN_USERS=true
|
||||
# setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
# [[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
# put_object "aws" "$test_file_folder"/"$file_name" "$BUCKET_ONE_NAME"/"$file_name" || local put_object=$?
|
||||
# put_object "aws" "$TEST_FILE_FOLDER"/"$file_name" "$BUCKET_ONE_NAME"/"$file_name" || local put_object=$?
|
||||
# [[ $put_object -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
#}
|
||||
|
||||
@test "test_head_bucket" {
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
head_bucket "aws" "$BUCKET_ONE_NAME" || fail "error getting bucket info"
|
||||
log 5 "INFO: $bucket_info"
|
||||
region=$(echo "$bucket_info" | grep -v "InsecureRequestWarning" | jq -r ".BucketRegion" 2>&1) || fail "error getting bucket region: $region"
|
||||
@@ -401,7 +363,9 @@ export RUN_USERS=true
|
||||
}
|
||||
|
||||
@test "test_head_bucket_doesnt_exist" {
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
head_bucket "aws" "$BUCKET_ONE_NAME"a || local info_result=$?
|
||||
[[ $info_result -eq 1 ]] || fail "bucket info for non-existent bucket returned"
|
||||
[[ $bucket_info == *"404"* ]] || fail "404 not returned for non-existent bucket info"
|
||||
@@ -413,11 +377,13 @@ export RUN_USERS=true
|
||||
test_key="x-test-data"
|
||||
test_value="test-value"
|
||||
|
||||
create_test_files "$object_one" || fail "error creating test files"
|
||||
run create_test_files "$object_one"
|
||||
assert_success
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
object="$test_file_folder"/"$object_one"
|
||||
object="$TEST_FILE_FOLDER"/"$object_one"
|
||||
put_object_with_metadata "aws" "$object" "$BUCKET_ONE_NAME" "$object_one" "$test_key" "$test_value" || fail "failed to add object to bucket"
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_one" || fail "object not found after being added to bucket"
|
||||
|
||||
@@ -431,30 +397,6 @@ export RUN_USERS=true
|
||||
delete_test_files "$object_one"
|
||||
}
|
||||
|
||||
@test "test_policy_abort_multipart_upload" {
|
||||
test_s3api_policy_abort_multipart_upload
|
||||
}
|
||||
|
||||
@test "test_policy_two_principals" {
|
||||
test_s3api_policy_two_principals
|
||||
}
|
||||
|
||||
@test "test_policy_put_bucket_tagging" {
|
||||
test_s3api_policy_put_bucket_tagging
|
||||
}
|
||||
|
||||
@test "test_policy_get_bucket_tagging" {
|
||||
test_s3api_policy_get_bucket_tagging
|
||||
}
|
||||
|
||||
@test "test_policy_list_upload_parts" {
|
||||
test_s3api_policy_list_upload_parts
|
||||
}
|
||||
|
||||
@test "test_policy_put_acl" {
|
||||
test_s3api_policy_put_acl
|
||||
}
|
||||
|
||||
@test "test_put_object_lock_configuration" {
|
||||
bucket_name=$BUCKET_ONE_NAME
|
||||
if [[ $RECREATE_BUCKETS == "true" ]]; then
|
||||
|
||||
456
tests/test_s3api_policy.sh
Normal file → Executable file
456
tests/test_s3api_policy.sh
Normal file → Executable file
@@ -14,15 +14,167 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/logger.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util_multipart.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/util_policy.sh
|
||||
source ./tests/util_tags.sh
|
||||
source ./tests/util_users.sh
|
||||
source ./tests/commands/get_bucket_policy.sh
|
||||
source ./tests/commands/get_bucket_tagging.sh
|
||||
source ./tests/commands/get_object.sh
|
||||
source ./tests/commands/put_bucket_policy.sh
|
||||
source ./tests/commands/put_bucket_tagging.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
|
||||
export RUN_USERS=true
|
||||
|
||||
@test "test_put_policy_invalid_action" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_invalid_action
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_with_user" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_get_object_with_user
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_specific_file" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_get_object_specific_file
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_file_wildcard" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_get_object_file_wildcard
|
||||
}
|
||||
|
||||
@test "test_policy_get_object_folder_wildcard" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_get_object_folder_wildcard
|
||||
}
|
||||
|
||||
@test "test_policy_allow_deny" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_allow_deny
|
||||
}
|
||||
|
||||
@test "test_policy_deny" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_deny
|
||||
}
|
||||
|
||||
@test "test_policy_put_wildcard" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_put_wildcard
|
||||
}
|
||||
|
||||
@test "test_policy_delete" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_delete
|
||||
}
|
||||
|
||||
@test "test_policy_get_bucket_policy" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_get_bucket_policy
|
||||
}
|
||||
|
||||
@test "test_policy_list_multipart_uploads" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_list_multipart_uploads
|
||||
}
|
||||
|
||||
@test "test_policy_put_bucket_policy" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_put_bucket_policy
|
||||
}
|
||||
|
||||
@test "test_policy_delete_bucket_policy" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_delete_bucket_policy
|
||||
}
|
||||
|
||||
@test "test_policy_get_bucket_acl" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_get_bucket_acl
|
||||
}
|
||||
|
||||
@test "test_policy_abort_multipart_upload" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_abort_multipart_upload
|
||||
}
|
||||
|
||||
@test "test_policy_two_principals" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_two_principals
|
||||
}
|
||||
|
||||
@test "test_policy_put_bucket_tagging" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_put_bucket_tagging
|
||||
}
|
||||
|
||||
@test "test_policy_get_bucket_tagging" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_get_bucket_tagging
|
||||
}
|
||||
|
||||
@test "test_policy_list_upload_parts" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_list_upload_parts
|
||||
}
|
||||
|
||||
@test "test_policy_put_acl" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
test_s3api_policy_put_acl
|
||||
}
|
||||
|
||||
test_s3api_policy_invalid_action() {
|
||||
policy_file="policy_file"
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file"
|
||||
run create_test_file "$policy_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="*"
|
||||
@@ -30,13 +182,14 @@ test_s3api_policy_invalid_action() {
|
||||
resource="arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
check_for_empty_policy "s3api" "$BUCKET_ONE_NAME" || fail "policy not empty"
|
||||
|
||||
if put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file"; then
|
||||
if put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file"; then
|
||||
fail "put succeeded despite malformed policy"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
@@ -52,18 +205,20 @@ test_s3api_policy_get_object_with_user() {
|
||||
test_file="test_file"
|
||||
|
||||
log 5 "username: $USERNAME_ONE, password: $PASSWORD_ONE"
|
||||
create_test_files "$test_file" "$policy_file" || fail "error creating policy file"
|
||||
echo "$BATS_TEST_NAME" >> "$test_file_folder/$test_file"
|
||||
run create_test_files "$test_file" "$policy_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
action="s3:GetObject"
|
||||
resource="arn:aws:s3:::$BUCKET_ONE_NAME/$test_file"
|
||||
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "2012-10-17" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "2012-10-17" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "error copying object"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "error copying object"
|
||||
|
||||
if ! check_for_empty_policy "s3api" "$BUCKET_ONE_NAME"; then
|
||||
delete_bucket_policy "s3api" "$BUCKET_ONE_NAME" || fail "error deleting policy"
|
||||
@@ -71,15 +226,16 @@ test_s3api_policy_get_object_with_user() {
|
||||
fi
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"; then
|
||||
fail "get object with user succeeded despite lack of permissions"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
|
||||
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object after permissions"
|
||||
compare_files "$test_file_folder/$test_file" "$test_file_folder/$test_file-copy" || fail "files not equal"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
run download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@@ -90,9 +246,8 @@ test_s3api_policy_get_object_specific_file() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" "$test_file" "$test_file_two" || fail "error creating policy file"
|
||||
echo "$BATS_TEST_NAME" >> "$test_file_folder/$test_file"
|
||||
echo "$BATS_TEST_NAME-2" >> "$test_file_folder/$test_file_two"
|
||||
run create_test_files "$policy_file" "$test_file" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -101,15 +256,19 @@ test_s3api_policy_get_object_specific_file() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "error copying object"
|
||||
put_object "s3api" "$test_file_folder/$test_file_two" "$BUCKET_ONE_NAME" "$test_file_two" || fail "error copying object"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object after permissions"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_two" "$test_file_folder/$test_file_two-copy" "$username" "$password"; then
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "error copying object"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file_two" "$BUCKET_ONE_NAME" "$test_file_two" || fail "error copying object"
|
||||
|
||||
run download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"
|
||||
assert_success
|
||||
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_two" "$TEST_FILE_FOLDER/$test_file_two-copy" "$username" "$password"; then
|
||||
fail "get object with user succeeded despite lack of permissions"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
@@ -124,8 +283,8 @@ test_s3api_policy_get_object_file_wildcard() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" "$policy_file_two" "$policy_file_three" || fail "error creating policy file"
|
||||
echo "$BATS_TEST_NAME" >> "$test_file_folder/$policy_file"
|
||||
run create_test_files "$policy_file" "$policy_file_two" "$policy_file_three"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -134,20 +293,27 @@ test_s3api_policy_get_object_file_wildcard() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user account"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "s3api" "$test_file_folder/$policy_file" "$BUCKET_ONE_NAME" "$policy_file" || fail "error copying object one"
|
||||
put_object "s3api" "$test_file_folder/$policy_file_two" "$BUCKET_ONE_NAME" "$policy_file_two" || fail "error copying object two"
|
||||
put_object "s3api" "$test_file_folder/$policy_file_three" "$BUCKET_ONE_NAME" "$policy_file_three" || fail "error copying object three"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$policy_file" "$test_file_folder/$policy_file" "$username" "$password" || fail "error getting object one after permissions"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$policy_file_two" "$test_file_folder/$policy_file_two" "$username" "$password" || fail "error getting object two after permissions"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$policy_file_three" "$test_file_folder/$policy_file_three" "$username" "$password"; then
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$policy_file" "$BUCKET_ONE_NAME" "$policy_file" || fail "error copying object one"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$policy_file_two" "$BUCKET_ONE_NAME" "$policy_file_two" || fail "error copying object two"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$policy_file_three" "$BUCKET_ONE_NAME" "$policy_file_three" || fail "error copying object three"
|
||||
|
||||
run download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$policy_file" "$BUCKET_ONE_NAME" "$policy_file" "$TEST_FILE_FOLDER/$policy_file-copy" "$username" "$password"
|
||||
assert_success
|
||||
|
||||
run download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$policy_file_two" "$BUCKET_ONE_NAME" "$policy_file_two" "$TEST_FILE_FOLDER/$policy_file_two-copy" "$username" "$password"
|
||||
assert_success
|
||||
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$policy_file_three" "$TEST_FILE_FOLDER/$policy_file_three" "$username" "$password"; then
|
||||
fail "get object three with user succeeded despite lack of permissions"
|
||||
fi
|
||||
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@@ -158,9 +324,11 @@ test_s3api_policy_get_object_folder_wildcard() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_folder "$test_folder" || fail "error creating test folder"
|
||||
create_test_files "$test_folder/$test_file" "$policy_file" || fail "error creating policy file, test file"
|
||||
echo "$BATS_TEST_NAME" >> "$test_file_folder/$test_folder/$test_file"
|
||||
run create_test_folder "$test_folder"
|
||||
assert_success
|
||||
|
||||
run create_test_files "$test_folder/$test_file" "$policy_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -170,12 +338,12 @@ test_s3api_policy_get_object_folder_wildcard() {
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
|
||||
put_object "s3api" "$test_file_folder/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" || fail "error copying object to bucket"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" || fail "error copying object to bucket"
|
||||
|
||||
download_and_compare_file_with_user "s3api" "$test_file_folder/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error downloading and comparing file"
|
||||
download_and_compare_file_with_user "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password" || fail "error downloading and comparing file"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$test_folder/$test_file" "$policy_file"
|
||||
}
|
||||
@@ -186,18 +354,22 @@ test_s3api_policy_allow_deny() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" "$test_file" || fail "error creating policy file"
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run create_test_files "$policy_file" "$test_file"
|
||||
assert_success
|
||||
|
||||
setup_policy_with_double_statement "$test_file_folder/$policy_file" "dummy" \
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
setup_policy_with_double_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" \
|
||||
"Deny" "$username" "s3:GetObject" "arn:aws:s3:::$BUCKET_ONE_NAME/$test_file" \
|
||||
"Allow" "$username" "s3:GetObject" "arn:aws:s3:::$BUCKET_ONE_NAME/$test_file"
|
||||
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "error copying object to bucket"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "error copying object to bucket"
|
||||
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object despite deny statement"
|
||||
fi
|
||||
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
|
||||
@@ -213,20 +385,24 @@ test_s3api_policy_deny() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$test_file_one" "$test_file_two" "$policy_file" || fail "error creating policy file, test file"
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run create_test_files "$test_file_one" "$test_file_two" "$policy_file"
|
||||
assert_success
|
||||
|
||||
setup_policy_with_double_statement "$test_file_folder/$policy_file" "dummy" \
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
setup_policy_with_double_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" \
|
||||
"Deny" "$username" "s3:GetObject" "arn:aws:s3:::$BUCKET_ONE_NAME/$test_file_two" \
|
||||
"Allow" "$username" "s3:GetObject" "arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
|
||||
log 5 "Policy: $(cat "$test_file_folder/$policy_file")"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
put_object "s3api" "$test_file_folder/$test_file_one" "$BUCKET_ONE_NAME" "$test_file_one" || fail "error copying object one"
|
||||
put_object "s3api" "$test_file_folder/$test_file_one" "$BUCKET_ONE_NAME" "$test_file_two" || fail "error copying object two"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_one" "$test_file_folder/$test_file_one-copy" "$username" "$password" || fail "error getting object"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_two" "$test_file_folder/$test_file_two-copy" "$username" "$password"; then
|
||||
log 5 "Policy: $(cat "$TEST_FILE_FOLDER/$policy_file")"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file_one" "$BUCKET_ONE_NAME" "$test_file_one" || fail "error copying object one"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file_one" "$BUCKET_ONE_NAME" "$test_file_two" || fail "error copying object two"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_one" "$TEST_FILE_FOLDER/$test_file_one-copy" "$username" "$password" || fail "error getting object"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_two" "$TEST_FILE_FOLDER/$test_file_two-copy" "$username" "$password"; then
|
||||
fail "able to get object despite deny statement"
|
||||
fi
|
||||
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
|
||||
@@ -241,9 +417,11 @@ test_s3api_policy_put_wildcard() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_folder "$test_folder" || fail "error creating test folder"
|
||||
create_test_files "$test_folder/$test_file" "$policy_file" || fail "error creating policy file, test file"
|
||||
echo "$BATS_TEST_NAME" >> "$test_file_folder/$test_folder/$test_file"
|
||||
run create_test_folder "$test_folder"
|
||||
assert_success
|
||||
|
||||
run create_test_files "$test_folder/$test_file" "$policy_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -253,20 +431,20 @@ test_s3api_policy_put_wildcard() {
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
log 5 "Policy: $(cat "$test_file_folder/$policy_file")"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
if put_object_with_user "s3api" "$test_file_folder/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password"; then
|
||||
log 5 "Policy: $(cat "$TEST_FILE_FOLDER/$policy_file")"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
if put_object_with_user "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password"; then
|
||||
fail "able to put object despite not being allowed"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
[[ "$put_object_error" == *"Access Denied"* ]] || fail "invalid put object error: $put_object_error"
|
||||
put_object_with_user "s3api" "$test_file_folder/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$username" "$password" || fail "error putting file despite policy permissions"
|
||||
put_object_with_user "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$username" "$password" || fail "error putting file despite policy permissions"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$test_folder/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object without permissions"
|
||||
fi
|
||||
[[ "$get_object_error" == *"Access Denied"* ]] || fail "invalid get object error: $get_object_error"
|
||||
download_and_compare_file "s3api" "$test_file_folder/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$test_file_folder/$test_file-copy" || fail "files don't match"
|
||||
download_and_compare_file "s3api" "$TEST_FILE_FOLDER/$test_folder/$test_file" "$BUCKET_ONE_NAME" "$test_folder/$test_file" "$TEST_FILE_FOLDER/$test_file-copy" || fail "files don't match"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$test_folder/$test_file" "$test_file-copy" "$policy_file"
|
||||
}
|
||||
@@ -278,9 +456,8 @@ test_s3api_policy_delete() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$test_file_one" "$test_file_two" "$policy_file" || fail "error creating policy file, test files"
|
||||
echo "$BATS_TEST_NAME" >> "$test_file_folder/$test_file_one"
|
||||
echo "$BATS_TEST_NAME" >> "$test_file_folder/$test_file_two"
|
||||
run create_test_files "$test_file_one" "$test_file_two" "$policy_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -289,13 +466,15 @@ test_s3api_policy_delete() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
log 5 "Policy: $(cat "$test_file_folder/$policy_file")"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object "s3api" "$test_file_folder/$test_file_one" "$BUCKET_ONE_NAME" "$test_file_one" || fail "error copying object one"
|
||||
put_object "s3api" "$test_file_folder/$test_file_two" "$BUCKET_ONE_NAME" "$test_file_two" || fail "error copying object two"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
log 5 "Policy: $(cat "$TEST_FILE_FOLDER/$policy_file")"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file_one" "$BUCKET_ONE_NAME" "$test_file_one" || fail "error copying object one"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file_two" "$BUCKET_ONE_NAME" "$test_file_two" || fail "error copying object two"
|
||||
if delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file_one" "$username" "$password"; then
|
||||
fail "able to delete object despite lack of permissions"
|
||||
fi
|
||||
@@ -311,7 +490,8 @@ test_s3api_policy_get_bucket_policy() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file, test files"
|
||||
run create_test_file "$policy_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -320,19 +500,21 @@ test_s3api_policy_get_bucket_policy() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
if get_bucket_policy_with_user "$BUCKET_ONE_NAME" "$username" "$password"; then
|
||||
fail "able to retrieve bucket policy despite lack of permissions"
|
||||
fi
|
||||
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
get_bucket_policy_with_user "$BUCKET_ONE_NAME" "$username" "$password" || fail "error getting bucket policy despite permissions"
|
||||
# shellcheck disable=SC2154
|
||||
echo "$bucket_policy" > "$test_file_folder/$policy_file-copy"
|
||||
log 5 "ORIG: $(cat "$test_file_folder/$policy_file")"
|
||||
log 5 "COPY: $(cat "$test_file_folder/$policy_file-copy")"
|
||||
compare_files "$test_file_folder/$policy_file" "$test_file_folder/$policy_file-copy" || fail "policies not equal"
|
||||
echo "$bucket_policy" > "$TEST_FILE_FOLDER/$policy_file-copy"
|
||||
log 5 "ORIG: $(cat "$TEST_FILE_FOLDER/$policy_file")"
|
||||
log 5 "COPY: $(cat "$TEST_FILE_FOLDER/$policy_file-copy")"
|
||||
compare_files "$TEST_FILE_FOLDER/$policy_file" "$TEST_FILE_FOLDER/$policy_file-copy" || fail "policies not equal"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$policy_file" "$policy_file-copy"
|
||||
}
|
||||
@@ -343,8 +525,11 @@ test_s3api_policy_list_multipart_uploads() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file, test files"
|
||||
create_large_file "$test_file"
|
||||
run create_test_file "$policy_file"
|
||||
assert_success
|
||||
|
||||
run create_large_file "$test_file"
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -352,13 +537,15 @@ test_s3api_policy_list_multipart_uploads() {
|
||||
resource="arn:aws:s3:::$BUCKET_ONE_NAME"
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
get_bucket_policy "s3api" "$BUCKET_ONE_NAME" || fail "error getting bucket policy"
|
||||
log 5 "BUCKET POLICY: $bucket_policy"
|
||||
get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || fail "error getting bucket ACL"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "ACL: $acl"
|
||||
run setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource"
|
||||
run setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource"
|
||||
assert_success "failed to set up policy"
|
||||
run create_multipart_upload "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success "failed to create multipart upload"
|
||||
@@ -367,7 +554,7 @@ test_s3api_policy_list_multipart_uploads() {
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
[[ "$list_multipart_uploads_error" == *"Access Denied"* ]] || fail "invalid list multipart uploads error: $list_multipart_uploads_error"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
list_multipart_uploads_with_user "$BUCKET_ONE_NAME" "$username" "$password" || fail "error listing multipart uploads"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "$uploads"
|
||||
@@ -383,7 +570,8 @@ test_s3api_policy_put_bucket_policy() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file, test files"
|
||||
run create_test_file "$policy_file" 0
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -392,21 +580,23 @@ test_s3api_policy_put_bucket_policy() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
if put_bucket_policy_with_user "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" "$username" "$password"; then
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
if put_bucket_policy_with_user "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" "$username" "$password"; then
|
||||
fail "able to retrieve bucket policy despite lack of permissions"
|
||||
fi
|
||||
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file_two" "dummy" "$effect" "$principal" "s3:GetBucketPolicy" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy_with_user "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file_two" "$username" "$password" || fail "error putting bucket policy despite permissions"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file_two" "dummy" "$effect" "$principal" "s3:GetBucketPolicy" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy_with_user "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file_two" "$username" "$password" || fail "error putting bucket policy despite permissions"
|
||||
get_bucket_policy_with_user "$BUCKET_ONE_NAME" "$username" "$password" || fail "error getting bucket policy despite permissions"
|
||||
# shellcheck disable=SC2154
|
||||
echo "$bucket_policy" > "$test_file_folder/$policy_file-copy"
|
||||
log 5 "ORIG: $(cat "$test_file_folder/$policy_file_two")"
|
||||
log 5 "COPY: $(cat "$test_file_folder/$policy_file-copy")"
|
||||
compare_files "$test_file_folder/$policy_file_two" "$test_file_folder/$policy_file-copy" || fail "policies not equal"
|
||||
echo "$bucket_policy" > "$TEST_FILE_FOLDER/$policy_file-copy"
|
||||
log 5 "ORIG: $(cat "$TEST_FILE_FOLDER/$policy_file_two")"
|
||||
log 5 "COPY: $(cat "$TEST_FILE_FOLDER/$policy_file-copy")"
|
||||
compare_files "$TEST_FILE_FOLDER/$policy_file_two" "$TEST_FILE_FOLDER/$policy_file-copy" || fail "policies not equal"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$policy_file" "$policy_file_two" "$policy_file-copy"
|
||||
}
|
||||
@@ -416,7 +606,8 @@ test_s3api_policy_delete_bucket_policy() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file, test files"
|
||||
run create_test_file "$policy_file" 0
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -425,12 +616,14 @@ test_s3api_policy_delete_bucket_policy() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if delete_bucket_policy_with_user "$BUCKET_ONE_NAME" "$username" "$password"; then
|
||||
fail "able to delete bucket policy with user $username without right permissions"
|
||||
fi
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
delete_bucket_policy_with_user "$BUCKET_ONE_NAME" "$username" "$password" || fail "unable to delete bucket policy"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$policy_file"
|
||||
@@ -441,7 +634,8 @@ test_s3api_policy_get_bucket_acl() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file, test files"
|
||||
run create_test_file "$policy_file" 0
|
||||
assert_success
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
@@ -450,12 +644,14 @@ test_s3api_policy_get_bucket_acl() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if get_bucket_acl_with_user "$BUCKET_ONE_NAME" "$username" "$password"; then
|
||||
fail "user able to get bucket ACLs despite permissions"
|
||||
fi
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource" || fail "failed to set up policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
get_bucket_acl_with_user "$BUCKET_ONE_NAME" "$username" "$password" || fail "error getting bucket ACL despite permissions"
|
||||
}
|
||||
|
||||
@@ -464,9 +660,15 @@ test_s3api_policy_abort_multipart_upload() {
|
||||
test_file="test_file"
|
||||
username=$USERNAME_ONE
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file"
|
||||
create_large_file "$test_file"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run create_test_file "$policy_file"
|
||||
assert_success
|
||||
|
||||
run create_large_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if [[ $DIRECT == "true" ]]; then
|
||||
setup_user_direct "$username" "user" "$BUCKET_ONE_NAME" || fail "error setting up direct user $username"
|
||||
principal="{\"AWS\": \"arn:aws:iam::$DIRECT_AWS_USER_ID:user/$username\"}"
|
||||
@@ -480,10 +682,10 @@ test_s3api_policy_abort_multipart_upload() {
|
||||
principal="\"$username\""
|
||||
fi
|
||||
|
||||
setup_policy_with_double_statement "$test_file_folder/$policy_file" "2012-10-17" \
|
||||
setup_policy_with_double_statement "$TEST_FILE_FOLDER/$policy_file" "2012-10-17" \
|
||||
"Allow" "$principal" "s3:PutObject" "arn:aws:s3:::$BUCKET_ONE_NAME/*" \
|
||||
"Deny" "$principal" "s3:AbortMultipartUpload" "arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting first policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting first policy"
|
||||
|
||||
create_multipart_upload_with_user "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "error creating multipart upload"
|
||||
# shellcheck disable=SC2154
|
||||
@@ -493,9 +695,9 @@ test_s3api_policy_abort_multipart_upload() {
|
||||
# shellcheck disable=SC2154
|
||||
[[ "$abort_multipart_upload_error" == *"AccessDenied"* ]] || fail "unexpected abort error: $abort_multipart_upload_error"
|
||||
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "2012-10-17" "Allow" "$principal" "s3:AbortMultipartUpload" "arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "2012-10-17" "Allow" "$principal" "s3:AbortMultipartUpload" "arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
abort_multipart_upload_with_user "$BUCKET_ONE_NAME" "$test_file" "$upload_id" "$username" "$password" || fail "error aborting multipart upload despite permissions"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
@@ -549,11 +751,11 @@ test_s3api_policy_put_bucket_tagging() {
|
||||
run setup_user "$USERNAME_ONE" "$PASSWORD_ONE" "user"
|
||||
assert_success "error setting up user"
|
||||
|
||||
run setup_policy_with_single_statement "$test_file_folder/$policy_file" "2012-10-17" "Allow" "$USERNAME_ONE" "s3:PutBucketTagging" "arn:aws:s3:::$BUCKET_ONE_NAME"
|
||||
run setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "2012-10-17" "Allow" "$USERNAME_ONE" "s3:PutBucketTagging" "arn:aws:s3:::$BUCKET_ONE_NAME"
|
||||
assert_success "error setting up policy"
|
||||
run put_bucket_tagging_with_user "$BUCKET_ONE_NAME" "$tag_key" "$tag_value" "$USERNAME_ONE" "$PASSWORD_ONE"
|
||||
assert_failure
|
||||
run put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file"
|
||||
run put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file"
|
||||
assert_success "error putting policy"
|
||||
run put_bucket_tagging_with_user "$BUCKET_ONE_NAME" "$tag_key" "$tag_value" "$USERNAME_ONE" "$PASSWORD_ONE"
|
||||
assert_success "unable to put bucket tagging despite user permissions"
|
||||
@@ -569,20 +771,23 @@ test_s3api_policy_put_acl() {
|
||||
username=$USERNAME_ONE
|
||||
password=$PASSWORD_ONE
|
||||
|
||||
create_test_files "$policy_file" || fail "error creating policy file"
|
||||
create_large_file "$test_file"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run create_test_file "$policy_file" 0
|
||||
assert_success
|
||||
run create_large_file "$test_file"
|
||||
assert_success
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls"
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error setting up user $username"
|
||||
|
||||
setup_policy_with_single_statement "$test_file_folder/$policy_file" "2012-10-17" "Allow" "$username" "s3:PutBucketAcl" "arn:aws:s3:::$BUCKET_ONE_NAME"
|
||||
setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "2012-10-17" "Allow" "$username" "s3:PutBucketAcl" "arn:aws:s3:::$BUCKET_ONE_NAME"
|
||||
if [[ $DIRECT == "true" ]]; then
|
||||
put_public_access_block_enable_public_acls "$BUCKET_ONE_NAME" || fail "error enabling public ACLs"
|
||||
fi
|
||||
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting policy"
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file" || fail "error putting policy"
|
||||
|
||||
put_bucket_canned_acl_with_user "$BUCKET_ONE_NAME" "public-read" "$username" "$password" || fail "error putting canned acl"
|
||||
get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || fail "error getting bucket acl"
|
||||
@@ -613,12 +818,13 @@ test_s3api_policy_get_bucket_tagging() {
|
||||
run create_test_files "$policy_file"
|
||||
assert_success "error creating test files"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run setup_user "$USERNAME_ONE" "$PASSWORD_ONE" "user"
|
||||
assert_success "error creating user '$USERNAME_ONE'"
|
||||
|
||||
run setup_policy_with_single_statement "$test_file_folder/$policy_file" "2012-10-17" "Allow" "$USERNAME_ONE" "s3:GetBucketTagging" "arn:aws:s3:::$BUCKET_ONE_NAME"
|
||||
run setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "2012-10-17" "Allow" "$USERNAME_ONE" "s3:GetBucketTagging" "arn:aws:s3:::$BUCKET_ONE_NAME"
|
||||
assert_success "error setting up policy"
|
||||
|
||||
run put_bucket_tagging "s3api" "$BUCKET_ONE_NAME" "$tag_key" "$tag_value"
|
||||
@@ -627,7 +833,7 @@ test_s3api_policy_get_bucket_tagging() {
|
||||
run get_bucket_tagging_with_user "$USERNAME_ONE" "$PASSWORD_ONE" "$BUCKET_ONE_NAME"
|
||||
assert_failure
|
||||
|
||||
run put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file"
|
||||
run put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file"
|
||||
assert_success "error putting policy"
|
||||
run get_and_check_bucket_tags_with_user "$USERNAME_ONE" "$PASSWORD_ONE" "$BUCKET_ONE_NAME" "$tag_key" "$tag_value"
|
||||
assert_success "get and check bucket tags failed"
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
source ./tests/setup.sh
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_create_bucket.sh
|
||||
source ./tests/util_users.sh
|
||||
source ./tests/commands/delete_bucket_policy.sh
|
||||
source ./tests/commands/get_bucket_policy.sh
|
||||
@@ -58,6 +58,10 @@ export RUN_USERS=true
|
||||
|
||||
# delete-bucket-policy
|
||||
@test "test_get_put_delete_bucket_policy" {
|
||||
if [[ -n $SKIP_POLICY ]]; then
|
||||
skip "will not test policy actions with SKIP_POLICY set"
|
||||
fi
|
||||
|
||||
test_common_get_put_delete_bucket_policy "s3cmd"
|
||||
}
|
||||
|
||||
@@ -109,14 +113,18 @@ export RUN_USERS=true
|
||||
}
|
||||
|
||||
@test "test_get_bucket_info_s3cmd" {
|
||||
setup_bucket "s3cmd" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3cmd" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
head_bucket "s3cmd" "$BUCKET_ONE_NAME"
|
||||
[[ $bucket_info == *"s3://$BUCKET_ONE_NAME"* ]] || fail "failure to retrieve correct bucket info: $bucket_info"
|
||||
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_info_doesnt_exist_s3cmd" {
|
||||
setup_bucket "s3cmd" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "s3cmd" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
head_bucket "s3cmd" "$BUCKET_ONE_NAME"a || local info_result=$?
|
||||
[[ $info_result -eq 1 ]] || fail "bucket info for non-existent bucket returned"
|
||||
[[ $bucket_info == *"404"* ]] || fail "404 not returned for non-existent bucket info"
|
||||
|
||||
@@ -49,15 +49,19 @@ export RUN_USERS=true
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user if nonexistent"
|
||||
create_test_files "$test_file" || fail "error creating test files"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
run create_test_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object despite not being bucket owner"
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
}
|
||||
|
||||
@test "test_userplus_get_object" {
|
||||
@@ -66,15 +70,19 @@ export RUN_USERS=true
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "admin" || fail "error creating user if nonexistent"
|
||||
create_test_files "$test_file" || fail "error creating test files"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
run create_test_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object despite not being bucket owner"
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
}
|
||||
|
||||
@test "test_user_delete_object" {
|
||||
@@ -83,14 +91,18 @@ export RUN_USERS=true
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user if nonexistent"
|
||||
create_test_files "$test_file" || fail "error creating test files"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
run create_test_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object despite not being bucket owner"
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "error deleting object"
|
||||
}
|
||||
|
||||
@@ -100,14 +112,18 @@ export RUN_USERS=true
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "admin" || fail "error creating user if nonexistent"
|
||||
create_test_file_with_size "$test_file" 10 || fail "error creating test file"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
put_object_with_user "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
compare_files "$test_file_folder/$test_file" "$test_file_folder/$test_file-copy" || fail "files don't match"
|
||||
run create_test_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
put_object_with_user "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
compare_files "$TEST_FILE_FOLDER/$test_file" "$TEST_FILE_FOLDER/$test_file-copy" || fail "files don't match"
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "error deleting object"
|
||||
if get_object "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy"; then
|
||||
if get_object "s3api" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"; then
|
||||
fail "file not successfully deleted"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
@@ -122,8 +138,13 @@ export RUN_USERS=true
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user if nonexistent"
|
||||
create_large_file "$test_file" || fail "error creating test file"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
|
||||
run create_large_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
create_multipart_upload_with_user "$BUCKET_ONE_NAME" "dummy" "$username" "$password" || fail "unable to create multipart upload"
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util_users.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_create_bucket.sh
|
||||
source ./tests/commands/list_buckets.sh
|
||||
|
||||
test_admin_user() {
|
||||
@@ -37,7 +37,9 @@ test_admin_user() {
|
||||
fi
|
||||
create_user_with_user "$admin_username" "$admin_password" "$user_username" "$user_password" "user" || fail "failed to create user '$user_username'"
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
delete_bucket_or_contents_if_exists "aws" "versity-gwtest-admin-bucket"
|
||||
create_bucket_with_user "aws" "versity-gwtest-admin-bucket" "$admin_username" "$admin_password" || fail "error creating bucket with admin user"
|
||||
|
||||
@@ -93,7 +95,9 @@ test_user_user() {
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error setting up user"
|
||||
delete_bucket_or_contents_if_exists "aws" "versity-gwtest-user-bucket"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
if create_bucket_with_user "aws" "versity-gwtest-user-bucket" "$username" "$password"; then
|
||||
fail "creating bucket with 'user' account failed to return error"
|
||||
@@ -136,7 +140,9 @@ test_userplus_operation() {
|
||||
|
||||
delete_bucket_or_contents_if_exists "aws" "versity-gwtest-userplus-bucket"
|
||||
setup_user "$username" "$password" "userplus" || fail "error creating user '$username'"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
|
||||
run setup_bucket "aws" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
create_bucket_with_user "aws" "versity-gwtest-userplus-bucket" "$username" "$password" || fail "error creating bucket with user '$username'"
|
||||
|
||||
|
||||
226
tests/util.sh
226
tests/util.sh
@@ -14,7 +14,7 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_create_bucket.sh
|
||||
source ./tests/util_mc.sh
|
||||
source ./tests/logger.sh
|
||||
source ./tests/commands/abort_multipart_upload.sh
|
||||
@@ -43,26 +43,32 @@ source ./tests/commands/upload_part.sh
|
||||
source ./tests/util_users.sh
|
||||
|
||||
# recursively delete an AWS bucket
|
||||
# param: bucket name
|
||||
# param: client, bucket name
|
||||
# fail if error
|
||||
delete_bucket_recursive() {
|
||||
log 6 "delete_bucket_recursive"
|
||||
assert [ $# -eq 2 ]
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'delete_bucket_recursive' requires client, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rb s3://"$2" --force 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
|
||||
delete_bucket_recursive_s3api "$2"
|
||||
if ! delete_bucket_recursive_s3api "$2"; then
|
||||
log 2 "error deleting bucket recursively (s3api)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" --recursive 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(delete_bucket_recursive_mc "$2") || exit_code="$?"
|
||||
error=$(delete_bucket_recursive_mc "$2" 2>&1) || exit_code="$?"
|
||||
else
|
||||
log 2 "invalid command type '$1'"
|
||||
assert [ 1 ]
|
||||
log 2 "invalid client '$1'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
@@ -70,22 +76,26 @@ delete_bucket_recursive() {
|
||||
return 0
|
||||
else
|
||||
log 2 "error deleting bucket recursively: $error"
|
||||
assert [ 1 ]
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: bucket name
|
||||
# return 0 for success, 1 for error
|
||||
add_governance_bypass_policy() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'add governance bypass policy' command requires command ID"
|
||||
log 2 "'add governance bypass policy' command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
test_file_folder=$PWD
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
create_test_file_folder
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
cat <<EOF > "$test_file_folder/policy-bypass-governance.txt"
|
||||
cat <<EOF > "$TEST_FILE_FOLDER/policy-bypass-governance.txt"
|
||||
{
|
||||
"Version": "dummy",
|
||||
"Statement": [
|
||||
@@ -98,14 +108,18 @@ add_governance_bypass_policy() {
|
||||
]
|
||||
}
|
||||
EOF
|
||||
if ! put_bucket_policy "s3api" "$1" "$test_file_folder/policy-bypass-governance.txt"; then
|
||||
if ! put_bucket_policy "s3api" "$1" "$TEST_FILE_FOLDER/policy-bypass-governance.txt"; then
|
||||
log 2 "error putting governance bypass policy"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
log_bucket_policy() {
|
||||
assert [ $# -eq 1 ]
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'log_bucket_policy' requires bucket name"
|
||||
return
|
||||
fi
|
||||
if ! get_bucket_policy "s3api" "$1"; then
|
||||
log 2 "error getting bucket policy"
|
||||
return
|
||||
@@ -183,28 +197,38 @@ check_and_disable_object_lock_config() {
|
||||
|
||||
# restore bucket to pre-test state (or prep for deletion)
|
||||
# param: bucket name
|
||||
# fail on error
|
||||
# return 0 on success, 1 on error
|
||||
clear_bucket_s3api() {
|
||||
log 6 "clear_bucket_s3api"
|
||||
|
||||
assert [ $# -eq 1 ]
|
||||
|
||||
if [[ $LOG_LEVEL_INT -ge 5 ]]; then
|
||||
run log_bucket_policy "$1"
|
||||
assert_success "error logging bucket policy"
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'clear_bucket_s3api' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
run list_and_delete_objects "$1"
|
||||
assert_success "error listing and delete objects"
|
||||
if [[ $LOG_LEVEL_INT -ge 5 ]]; then
|
||||
if ! log_bucket_policy "$1"; then
|
||||
log 2 "error logging bucket policy"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
run delete_bucket_policy "s3api" "$1"
|
||||
assert_success "error deleting bucket policy"
|
||||
if ! list_and_delete_objects "$1"; then
|
||||
log 2 "error listing and deleting objects"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! delete_bucket_policy "s3api" "$1"; then
|
||||
log 2 "error deleting bucket policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
#run check_ownership_rule_and_reset_acl "$1"
|
||||
#assert_success "error checking ownership rule and resetting acl"
|
||||
|
||||
run check_and_disable_object_lock_config "$1"
|
||||
assert_success "error checking and disabling object lock config"
|
||||
if ! check_and_disable_object_lock_config "$1"; then
|
||||
log 2 "error checking and disabling object lock config"
|
||||
return 1
|
||||
fi
|
||||
|
||||
#if ! change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$AWS_ACCESS_KEY_ID"; then
|
||||
# log 2 "error changing bucket owner back to root"
|
||||
@@ -281,38 +305,53 @@ log_worm_protection() {
|
||||
}
|
||||
|
||||
# params: bucket name
|
||||
# fail if unable to delete bucket
|
||||
# return 0 if able to delete recursively, 1 if not
|
||||
delete_bucket_recursive_s3api() {
|
||||
log 6 "delete_bucket_recursive_s3api"
|
||||
assert [ $# -eq 1 ]
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'delete_bucket_recursive_s3api' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
clear_bucket_s3api "$1"
|
||||
|
||||
run delete_bucket 's3api' "$1"
|
||||
assert_success "error deleting bucket"
|
||||
if ! clear_bucket_s3api "$1"; then
|
||||
log 2 "error clearing bucket (s3api)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! delete_bucket 's3api' "$1"; then
|
||||
log 2 "error deleting bucket"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: client, bucket name
|
||||
# fail if error
|
||||
# return 0 on success, 1 on error
|
||||
delete_bucket_contents() {
|
||||
log 6 "delete_bucket_contents"
|
||||
assert [ $# -eq 2 ]
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'delete_bucket_contents' requires client, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
|
||||
clear_bucket_s3api "$2"
|
||||
return 0
|
||||
if ! clear_bucket_s3api "$2"; then
|
||||
log 2 "error clearing bucket (s3api)"
|
||||
return 1
|
||||
fi
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
delete_bucket_recursive "s3cmd" "$1"
|
||||
return 0
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
delete_bucket_recursive "mc" "$1"
|
||||
return 0
|
||||
elif [[ $1 == "s3" ]]; then
|
||||
delete_bucket_recursive "s3" "$1"
|
||||
else
|
||||
log 2 "unrecognized client: '$1'"
|
||||
return 1
|
||||
fi
|
||||
assert [ 1 ]
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if bucket exists
|
||||
@@ -396,57 +435,73 @@ get_object_ownership_rule_and_update_acl() {
|
||||
}
|
||||
|
||||
# params: client, bucket name
|
||||
# fail if error
|
||||
# return 0 for success, 1 for error
|
||||
delete_bucket_or_contents() {
|
||||
log 6 "delete_bucket_or_contents"
|
||||
assert [ $# -eq 2 ]
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'delete_bucket_or_contents' requires client, bucket name"
|
||||
return 1
|
||||
fi
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
delete_bucket_contents "$1" "$2"
|
||||
if ! delete_bucket_contents "$1" "$2"; then
|
||||
log 2 "error deleting bucket contents"
|
||||
return 1
|
||||
fi
|
||||
|
||||
run delete_bucket_policy "$1" "$2"
|
||||
assert_success "error deleting bucket policies"
|
||||
if ! delete_bucket_policy "$1" "$2"; then
|
||||
log 2 "error deleting bucket policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
run get_object_ownership_rule_and_update_acl "$2"
|
||||
assert_success "error getting object ownership rule and updating acl"
|
||||
|
||||
run abort_all_multipart_uploads "$2"
|
||||
assert_success "error aborting multipart uploads"
|
||||
if ! get_object_ownership_rule_and_update_acl "$2"; then
|
||||
log 2 "error getting object ownership rule and updating ACL"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! abort_all_multipart_uploads "$2"; then
|
||||
log 2 "error aborting all multipart uploads"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket contents, policy, ACL deletion success"
|
||||
return 0
|
||||
fi
|
||||
run delete_bucket_recursive "$1" "$2"
|
||||
assert_success "error with recursive bucket delete"
|
||||
if ! delete_bucket_recursive "$1" "$2"; then
|
||||
log 2 "error with recursive bucket delete"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket deletion success"
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: client, bucket name
|
||||
# fail if unable to delete bucket (RECREATE_BUCKETS=true) or contents (RECREATE_BUCKETS=false)
|
||||
# return 0 for success, 1 for error
|
||||
delete_bucket_or_contents_if_exists() {
|
||||
log 6 "delete_bucket_or_contents_if_exists"
|
||||
|
||||
assert [ $# -eq 2 ]
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'delete_bucket_or_contents_if_exists' requires client, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if bucket_exists "$1" "$2"; then
|
||||
delete_bucket_or_contents "$1" "$2"
|
||||
if ! delete_bucket_or_contents "$1" "$2"; then
|
||||
log 2 "error deleting bucket and/or contents"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket and/or bucket data deletion success"
|
||||
return 0
|
||||
fi
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
log 2 "When RECREATE_BUCKETS isn't set to \"true\", buckets should be pre-created by user"
|
||||
assert [ 1 ]
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: client, bucket name(s)
|
||||
# return 0 for success, 1 for failure
|
||||
setup_buckets() {
|
||||
if [ $# -lt 1 ]; then
|
||||
log 2 "'setup_buckets' command requires bucket names"
|
||||
if [ $# -lt 2 ]; then
|
||||
log 2 "'setup_buckets' command requires client, bucket names"
|
||||
return 1
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! setup_bucket "$name"; then
|
||||
for name in "${@:2}"; do
|
||||
if ! setup_bucket "$1" "$name"; then
|
||||
log 2 "error setting up bucket $name"
|
||||
return 1
|
||||
fi
|
||||
@@ -455,36 +510,41 @@ setup_buckets() {
|
||||
}
|
||||
|
||||
# params: client, bucket name
|
||||
# fail if bucket is not properly set up
|
||||
# return 0 on successful setup, 1 on error
|
||||
setup_bucket() {
|
||||
log 6 "setup_bucket"
|
||||
|
||||
assert [ $# -eq 2 ]
|
||||
|
||||
if [[ $1 == "s3cmd" ]]; then
|
||||
log 5 "putting bucket ownership controls"
|
||||
if bucket_exists "s3cmd" "$2"; then
|
||||
run put_bucket_ownership_controls "$2" "BucketOwnerPreferred"
|
||||
assert_success "error putting bucket ownership controls"
|
||||
fi
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'setup_bucket' requires client, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
delete_bucket_or_contents_if_exists "$1" "$2"
|
||||
if ! bucket_exists "$1" "$2" && [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
log 2 "When RECREATE_BUCKETS isn't set to \"true\", buckets should be pre-created by user"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! delete_bucket_or_contents_if_exists "$1" "$2"; then
|
||||
log 2 "error deleting bucket or contents if they exist"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 5 "util.setup_bucket: command type: $1, bucket name: $2"
|
||||
if [[ $RECREATE_BUCKETS == "true" ]]; then
|
||||
run create_bucket "$1" "$2"
|
||||
assert_success "error creating bucket"
|
||||
log 5 "bucket creation success"
|
||||
|
||||
if [[ $1 == "s3cmd" ]]; then
|
||||
log 5 "putting bucket ownership controls"
|
||||
run put_bucket_ownership_controls "$2" "BucketOwnerPreferred"
|
||||
assert_success "error putting bucket ownership controls"
|
||||
if ! create_bucket "$1" "$2"; then
|
||||
log 2 "error creating bucket"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
log 5 "skipping bucket re-creation"
|
||||
fi
|
||||
|
||||
if [[ $1 == "s3cmd" ]]; then
|
||||
log 5 "putting bucket ownership controls"
|
||||
if bucket_exists "s3cmd" "$2" && ! put_bucket_ownership_controls "$2" "BucketOwnerPreferred"; then
|
||||
log 2 "error putting bucket ownership controls"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
@@ -41,3 +41,21 @@ create_bucket_invalid_name() {
|
||||
fi
|
||||
export bucket_create_error
|
||||
}
|
||||
|
||||
create_and_check_bucket_invalid_name() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'create_and_check_bucket_invalid_name' requires client"
|
||||
return 1
|
||||
fi
|
||||
if ! create_bucket_invalid_name "$1"; then
|
||||
log 2 "error creating bucket with invalid name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "$bucket_create_error" != *"Invalid bucket name "* ]]; then
|
||||
log 2 "unexpected error: $bucket_create_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -18,14 +18,13 @@ source ./tests/logger.sh
|
||||
|
||||
# create a test file and export folder. do so in temp folder
|
||||
# params: filenames
|
||||
# return 0 for success, 1 for failure
|
||||
# fail if error
|
||||
create_test_files() {
|
||||
log 6 "create_test_files"
|
||||
if [ $# -lt 1 ]; then
|
||||
log 2 "'create_test_files' requires minimum of one file name"
|
||||
log 2 "'create_test_files' requires file names"
|
||||
return 1
|
||||
fi
|
||||
#test_file_folder=$PWD
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
@@ -38,50 +37,53 @@ create_test_files() {
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
#export test_file_folder
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: filename, size (optional, defaults to 10)
|
||||
create_test_file() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'create_test_file' requires name"
|
||||
if [[ ( $# -lt 1 ) || ( $# -gt 2 ) ]]; then
|
||||
log 2 "'create_test_file' requires filename, size (optional)"
|
||||
return 1
|
||||
fi
|
||||
if [[ -e "$TEST_FILE_FOLDER/$name" ]]; then
|
||||
if ! error=$(rm "$TEST_FILE_FOLDER/$name" 2>&1); then
|
||||
log 2 "error removing old test file: $error"
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
if ! error=$(touch "$TEST_FILE_FOLDER/$name"); then
|
||||
log 2 "error creating new test file: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_test_file_with_size() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'create test file with size' function requires name, size"
|
||||
return 1
|
||||
fi
|
||||
if ! create_test_file_folder "$1"; then
|
||||
log 2 "error creating test file"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(dd if=/dev/urandom of="$TEST_FILE_FOLDER"/"$1" bs=1 count="$2" 2>&1); then
|
||||
log 2 "error writing file data: $error"
|
||||
if [[ -e "$TEST_FILE_FOLDER/$1" ]]; then
|
||||
if ! error=$(rm "$TEST_FILE_FOLDER/$1" 2>&1); then
|
||||
log 2 "error removing existing file: $error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
if ! error=$(touch "$TEST_FILE_FOLDER/$1"); then
|
||||
log 2 "error creating new file: $error"
|
||||
return 1
|
||||
fi
|
||||
if [ -z "$2" ]; then
|
||||
file_size=10
|
||||
else
|
||||
file_size="$2"
|
||||
fi
|
||||
if [ "$file_size" -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
if ! error=$(dd if=/dev/urandom of="$TEST_FILE_FOLDER/$1" bs=1 count="$file_size" 2>&1); then
|
||||
log 2 "error adding data to file: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: folder name
|
||||
# fail if error
|
||||
create_test_folder() {
|
||||
if [ $# -lt 1 ]; then
|
||||
log 2 "'create_test_folder' command requires at least one folder"
|
||||
log 2 "'create_test_folder' requires folder names"
|
||||
return 1
|
||||
fi
|
||||
#test_file_folder=$PWD
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
@@ -89,8 +91,8 @@ create_test_folder() {
|
||||
fi
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! error=$(run mkdir -p "$TEST_FILE_FOLDER"/"$name" 2>&1); then
|
||||
log 2 "error creating test folder $name: $error"
|
||||
if ! error=$(mkdir -p "$TEST_FILE_FOLDER"/"$name" 2>&1); then
|
||||
log 2 "error creating folder $name: $error"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
@@ -161,60 +163,59 @@ compare_files() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# return 0 on success, 1 on failure
|
||||
# return 0 on success, 1 on error
|
||||
create_test_file_folder() {
|
||||
log 6 "create_test_file_folder"
|
||||
if ! error=$(mkdir -p "$TEST_FILE_FOLDER" 2>&1); then
|
||||
# shellcheck disable=SC2035
|
||||
if [[ "$error" != *"File exists"* ]]; then
|
||||
log 2 "error creating test file folder: $error"
|
||||
log 2 "error making test file folder: $error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
export test_file_folder=$TEST_FILE_FOLDER
|
||||
return 0
|
||||
}
|
||||
|
||||
# generate 160MB file
|
||||
# input: filename
|
||||
# return 0 on success, 1 on failure
|
||||
# fail on error
|
||||
create_large_file() {
|
||||
log 6 "create_large_file"
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'create_large_file' requires filename"
|
||||
log 2 "'create_large_file' requires file name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
#test_file_folder=$PWD/versity-gwtest-files
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file"
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
filesize=$((160*1024*1024))
|
||||
if ! error=$(dd if=/dev/urandom of="$TEST_FILE_FOLDER"/"$1" bs=1024 count=$((filesize/1024)) 2>&1); then
|
||||
log 2 "error creating large file: $error"
|
||||
log 2 "error adding data to large file: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# param: number of files
|
||||
# fail on error
|
||||
create_test_file_count() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "create test file count function missing bucket name, count"
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'create_test_file_count' requires number of files"
|
||||
return 1
|
||||
fi
|
||||
#test_file_folder=$PWD
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
create_test_file_folder
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
local touch_result
|
||||
for ((i=1;i<=$1;i++)) {
|
||||
error=$(touch "$TEST_FILE_FOLDER/file_$i") || touch_result=$?
|
||||
if [[ $touch_result -ne 0 ]]; then
|
||||
echo "error creating file_$i: $error"
|
||||
if ! error=$(touch "$TEST_FILE_FOLDER/file_$i" 2>&1); then
|
||||
log 2 "error creating file_$i: $error"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@@ -227,6 +228,7 @@ create_test_file_count() {
|
||||
}
|
||||
|
||||
download_and_compare_file() {
|
||||
log 6 "download_and_compare_file"
|
||||
if [[ $# -ne 5 ]]; then
|
||||
log 2 "'download and compare file' requires command type, original file, bucket, key, local file"
|
||||
return 1
|
||||
@@ -236,6 +238,7 @@ download_and_compare_file() {
|
||||
}
|
||||
|
||||
download_and_compare_file_with_user() {
|
||||
log 6 "download_and_compare_file_with_user"
|
||||
if [[ $# -ne 7 ]]; then
|
||||
log 2 "'download and compare file with user' command requires command type, original file, bucket, key, local file, user, password"
|
||||
return 1
|
||||
@@ -245,9 +248,42 @@ download_and_compare_file_with_user() {
|
||||
return 1
|
||||
fi
|
||||
log 5 "files: $2, $5"
|
||||
if ! compare_files "$2" "$5"; then
|
||||
#if [ "$1" == 'mc' ]; then
|
||||
# file_to_compare="$5/$(basename "$2")"
|
||||
#else
|
||||
file_to_compare="$5"
|
||||
#fi
|
||||
if ! compare_files "$2" "$file_to_compare"; then
|
||||
log 2 "files don't match"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: src, dst
|
||||
# fail if error
|
||||
copy_file_locally() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'copy_file_locally' requires src, dst"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(cp "$1" "$2" 2>&1); then
|
||||
log 2 "error copying file: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: src, dst
|
||||
# fail if error
|
||||
move_file_locally() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'move_file_locally' requires src, dst"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(mv "$1" "$2" 2>&1); then
|
||||
log 2 "error moving file: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
31
tests/util_get_bucket_acl.sh
Normal file
31
tests/util_get_bucket_acl.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
get_bucket_acl_and_check_owner() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'get_acl_and_check_owner' requires client, bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_acl "$1" "$2"; then
|
||||
log 2 "error getting bucket acl"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "ACL: $acl"
|
||||
id=$(echo "$acl" | jq -r '.Owner.ID')
|
||||
[[ $id == "$AWS_ACCESS_KEY_ID" ]] || fail "Acl mismatch"
|
||||
}
|
||||
44
tests/util_get_object_attributes.sh
Normal file
44
tests/util_get_object_attributes.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
get_and_check_object_size() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'get_and_check_object_size' requires bucket, key, object size"
|
||||
return 1
|
||||
fi
|
||||
if ! get_object_attributes "$1" "$2"; then
|
||||
log 2 "failed to get object attributes"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if ! has_object_size=$(echo "$attributes" | jq 'has("ObjectSize")' 2>&1); then
|
||||
log 2 "error checking for ObjectSize parameters: $has_object_size"
|
||||
return 1
|
||||
fi
|
||||
if [[ $has_object_size != "true" ]]; then
|
||||
log 2 "ObjectSize parameter missing: $attributes"
|
||||
return 1
|
||||
fi
|
||||
if ! object_size=$(echo "$attributes" | jq -r ".ObjectSize" 2>&1); then
|
||||
log 2 "error getting object size: $object_size"
|
||||
return 1
|
||||
fi
|
||||
if [[ $object_size != "$3" ]]; then
|
||||
log 2 "Incorrect object size: $object_size"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
46
tests/util_get_object_retention.sh
Normal file
46
tests/util_get_object_retention.sh
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
get_check_object_retention() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'get_check_object_retention' requires bucket, file, expected retention date"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if ! get_object_retention "$BUCKET_ONE_NAME" "$bucket_file"; then
|
||||
log 2 "failed to get object retention"
|
||||
return 1
|
||||
fi
|
||||
log 5 "RETENTION: $retention"
|
||||
retention=$(echo "$retention" | grep -v "InsecureRequestWarning")
|
||||
if ! mode=$(echo "$retention" | jq -r ".Retention.Mode" 2>&1); then
|
||||
log 2 "error getting retention mode: $mode"
|
||||
return 1
|
||||
fi
|
||||
if ! retain_until_date=$(echo "$retention" | jq -r ".Retention.RetainUntilDate" 2>&1); then
|
||||
log 2 "error getting retain until date: $retain_until_date"
|
||||
return 1
|
||||
fi
|
||||
if [[ $mode != "GOVERNANCE" ]]; then
|
||||
log 2 "retention mode should be governance, is $mode"
|
||||
return 1
|
||||
fi
|
||||
if [[ $retain_until_date != "$3"* ]]; then
|
||||
log 2 "retain until date should be $3, is $retain_until_date"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
72
tests/util_head_object.sh
Normal file
72
tests/util_head_object.sh
Normal file
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
get_and_verify_metadata() {
|
||||
if [ $# -ne 7 ]; then
|
||||
log 2 "'get_and_verify_metadata' requires bucket file, expected content type,
|
||||
expected metadata key, expected metadata val, expected hold status, expected retention mode, expected retention date"
|
||||
return 1
|
||||
fi
|
||||
if ! head_object "s3api" "$BUCKET_ONE_NAME" "$1"; then
|
||||
log 2 "error retrieving metadata"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
raw_metadata=$(echo "$metadata" | grep -v "InsecureRequestWarning")
|
||||
log 5 "raw metadata: $raw_metadata"
|
||||
|
||||
if ! content_type=$(echo "$raw_metadata" | jq -r ".ContentType" 2>&1); then
|
||||
log 2 "error retrieving content type: $content_type"
|
||||
return 1
|
||||
fi
|
||||
if [[ $content_type != "$2" ]]; then
|
||||
log 2 "content type mismatch ($content_type, $2)"
|
||||
return 1
|
||||
fi
|
||||
if ! meta_val=$(echo "$raw_metadata" | jq -r ".Metadata.$3" 2>&1); then
|
||||
log 2 "error retrieving metadata val: $meta_val"
|
||||
return 1
|
||||
fi
|
||||
if [[ $meta_val != "$4" ]]; then
|
||||
log 2 "metadata val mismatch ($meta_val, $4)"
|
||||
return 1
|
||||
fi
|
||||
if ! hold_status=$(echo "$raw_metadata" | jq -r ".ObjectLockLegalHoldStatus" 2>&1); then
|
||||
log 2 "error retrieving hold status: $hold_status"
|
||||
return 1
|
||||
fi
|
||||
if [[ $hold_status != "$5" ]]; then
|
||||
log 2 "hold status mismatch ($hold_status, $5)"
|
||||
return 1
|
||||
fi
|
||||
if ! retention_mode=$(echo "$raw_metadata" | jq -r ".ObjectLockMode" 2>&1); then
|
||||
log 2 "error retrieving retention mode: $retention_mode"
|
||||
return 1
|
||||
fi
|
||||
if [[ $retention_mode != "$6" ]]; then
|
||||
log 2 "retention mode mismatch ($retention_mode, $6)"
|
||||
return 1
|
||||
fi
|
||||
if ! retain_until_date=$(echo "$raw_metadata" | jq -r ".ObjectLockRetainUntilDate" 2>&1); then
|
||||
log 2 "error retrieving retain until date: $retain_until_date"
|
||||
return 1
|
||||
fi
|
||||
if [[ $retain_until_date != "$7"* ]]; then
|
||||
log 2"retention date mismatch ($retain_until_date, $7)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
38
tests/util_legal_hold.sh
Normal file
38
tests/util_legal_hold.sh
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
get_and_check_legal_hold() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "'get_and_check_legal_hold' requires client, bucket, key, expected status"
|
||||
return 1
|
||||
fi
|
||||
if ! head_object "$1" "$2" "$3"; then
|
||||
log 2 "error getting object metadata"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
raw_metadata=$(echo "$metadata" | grep -v "InsecureRequestWarning")
|
||||
log 5 "raw metadata: $raw_metadata"
|
||||
if ! hold_status=$(echo "$raw_metadata" | jq -r ".ObjectLockLegalHoldStatus" 2>&1); then
|
||||
log 2 "error retrieving hold status: $hold_status"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$hold_status" != "$4" ]]; then
|
||||
log 2 "hold status mismatch ($hold_status, $4)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
36
tests/util_list_buckets.sh
Normal file
36
tests/util_list_buckets.sh
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
list_check_buckets_rest() {
|
||||
if ! list_buckets "rest"; then
|
||||
log 2 "error listing buckets"
|
||||
return 1
|
||||
fi
|
||||
bucket_found=false
|
||||
# shellcheck disable=SC2154
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
log 5 "bucket: $bucket"
|
||||
if [[ $bucket == "$BUCKET_ONE_NAME" ]]; then
|
||||
bucket_found=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ $bucket_found == "false" ]]; then
|
||||
log 2 "bucket not found"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
121
tests/util_list_objects.sh
Normal file
121
tests/util_list_objects.sh
Normal file
@@ -0,0 +1,121 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/commands/list_objects_v2.sh
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
parse_objects_list_rest() {
|
||||
# shellcheck disable=SC2154
|
||||
object_list=$(echo "$reply" | xmllint --xpath '//*[local-name()="Key"]/text()' -)
|
||||
object_array=()
|
||||
while read -r object; do
|
||||
object_array+=("$object")
|
||||
done <<< "$object_list"
|
||||
log 5 "object array: ${object_array[*]}"
|
||||
}
|
||||
|
||||
list_check_objects_v1() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "'list_check_objects_v1' requires bucket, expected key one, expected size one, expected key two, expected size two"
|
||||
return 1
|
||||
fi
|
||||
if ! list_objects_s3api_v1 "$1"; then
|
||||
log 2 "error listing objects (s3api, v1)"
|
||||
return 1
|
||||
fi
|
||||
if ! check_listed_objects "$2" "$3" "$4" "$5"; then
|
||||
log 2 "error checking listed objects"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_listed_objects() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "'check_listed_objects' requires expected key one, expected size one, expected key two, expected size two"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if ! key_one=$(echo "$objects" | jq -r '.Contents[0].Key' 2>&1); then
|
||||
log 2 "error obtaining key one: $key_one"
|
||||
return 1
|
||||
fi
|
||||
if [[ $key_one != "$1" ]]; then
|
||||
log 2 "Object one mismatch ($key_one, $1)"
|
||||
return 1
|
||||
fi
|
||||
if ! size_one=$(echo "$objects" | jq -r '.Contents[0].Size' 2>&1); then
|
||||
log 2 "error obtaining size one: $size_one"
|
||||
return 1
|
||||
fi
|
||||
if [[ $size_one -ne "$2" ]]; then
|
||||
log 2 "Object one size mismatch ($size_one, $2)"
|
||||
return 1
|
||||
fi
|
||||
if ! key_two=$(echo "$objects" | jq -r '.Contents[1].Key' 2>&1); then
|
||||
log 2 "error obtaining key two: $key_two"
|
||||
return 1
|
||||
fi
|
||||
if [[ $key_two != "$3" ]]; then
|
||||
log 2 "Object two mismatch ($key_two, $3)"
|
||||
return 1
|
||||
fi
|
||||
if ! size_two=$(echo "$objects" | jq '.Contents[1].Size' 2>&1); then
|
||||
log 2 "error obtaining size two: $size_two"
|
||||
return 1
|
||||
fi
|
||||
if [[ $size_two -ne "$4" ]]; then
|
||||
log 2 "Object two size mismatch ($size_two, $4)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
list_check_objects_v2() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "'list_check_objects_v1' requires bucket, expected key one, expected size one, expected key two, expected size two"
|
||||
return 1
|
||||
fi
|
||||
if ! list_objects_v2 "$1"; then
|
||||
log 2 "error listing objects (s3api, v1)"
|
||||
return 1
|
||||
fi
|
||||
if ! check_listed_objects "$2" "$3" "$4" "$5"; then
|
||||
log 2 "error checking listed objects"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_check_objects_rest() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'list_check_objects_rest' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
list_objects "rest" "$1"
|
||||
object_found=false
|
||||
# shellcheck disable=SC2154
|
||||
for object in "${object_array[@]}"; do
|
||||
log 5 "object: $object"
|
||||
if [[ $object == "$test_file" ]]; then
|
||||
object_found=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ $object_found == "false" ]]; then
|
||||
log 2 "object not found"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -61,4 +61,26 @@ get_and_check_object_lock_config() {
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
get_check_object_lock_config_enabled() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'get_check_object_lock_config_enabled' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! get_object_lock_configuration "$1"; then
|
||||
log 2 "error getting lock configuration"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "Lock config: $lock_config"
|
||||
if ! enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled" 2>&1); then
|
||||
log 2 "error parsing enabled value: $enabled"
|
||||
return 1
|
||||
fi
|
||||
if [[ $enabled != "Enabled" ]]; then
|
||||
log 2 "ObjectLockEnabled should be 'Enabled', is '$enabled'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -46,3 +46,94 @@ create_upload_and_test_parts_listing() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
start_multipart_upload_list_check_parts() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'start_multipart_upload_and_list_parts' requires bucket, key, original source"
|
||||
return 1
|
||||
fi
|
||||
if ! start_multipart_upload_and_list_parts "$1" "$2" "$3" 4; then
|
||||
log 2 "error starting upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
declare -a parts_map
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "parts: $parts"
|
||||
for i in {0..3}; do
|
||||
if ! parse_parts_and_etags "$i"; then
|
||||
log 2 "error parsing part $i"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
if [[ ${#parts_map[@]} -eq 0 ]]; then
|
||||
log 2 "error loading multipart upload parts to check"
|
||||
return 1
|
||||
fi
|
||||
|
||||
for i in {0..3}; do
|
||||
if ! compare_parts_to_listed_parts "$i"; then
|
||||
log 2 "error comparing parts to listed parts"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
parse_parts_and_etags() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'parse_parts_and_etags' requires part id"
|
||||
return 1
|
||||
fi
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
if ! part=$(echo "$parts" | grep -v "InsecureRequestWarning" | jq -r ".[$i]" 2>&1); then
|
||||
log 2 "error getting part: $part"
|
||||
return 1
|
||||
fi
|
||||
if ! part_number=$(echo "$part" | jq ".PartNumber" 2>&1); then
|
||||
log 2 "error parsing part number: $part_number"
|
||||
return 1
|
||||
fi
|
||||
if [[ $part_number == "" ]]; then
|
||||
log 2 "error: blank part number"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(echo "$part" | jq ".ETag" 2>&1); then
|
||||
log 2 "error parsing etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
if [[ $etag == "" ]]; then
|
||||
log 2 "error: blank etag"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2004
|
||||
parts_map[$part_number]=$etag
|
||||
}
|
||||
|
||||
compare_parts_to_listed_parts() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'compare_parts_to_listed_parts' requires part number"
|
||||
return 1
|
||||
fi
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
if ! listed_part=$(echo "$listed_parts" | grep -v "InsecureRequestWarning" | jq -r ".Parts[$i]" 2>&1); then
|
||||
log 2 "error parsing listed part: $listed_part"
|
||||
return 1
|
||||
fi
|
||||
if ! part_number=$(echo "$listed_part" | jq ".PartNumber" 2>&1); then
|
||||
log 2 "error parsing listed part number: $part_number"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(echo "$listed_part" | jq ".ETag" 2>&1); then
|
||||
log 2 "error getting listed etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
if [[ ${parts_map[$part_number]} != "$etag" ]]; then
|
||||
log 2 "error: etags don't match (part number: $part_number, etags ${parts_map[$part_number]},$etag)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -44,7 +44,10 @@ check_for_empty_policy() {
|
||||
|
||||
get_modified_principal() {
|
||||
log 6 "get_modified_principal"
|
||||
assert [ $# -eq 1 ]
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'get_modified_principal' requires principal"
|
||||
return 1
|
||||
fi
|
||||
local first_char="${1:0:1}"
|
||||
if [ "$first_char" != '{' ] && [ "$first_char" != '[' ] && [ "$first_char" != '"' ]; then
|
||||
# shellcheck disable=SC2089
|
||||
@@ -52,16 +55,40 @@ get_modified_principal() {
|
||||
else
|
||||
modified_principal=$1
|
||||
fi
|
||||
export modified_principal
|
||||
}
|
||||
|
||||
get_modified_action() {
|
||||
log 6 "get_modified_action"
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'get_modified_action' requires action"
|
||||
return 1
|
||||
fi
|
||||
local first_char="${1:0:1}"
|
||||
if [ "$first_char" != '{' ] && [ "$first_char" != '[' ] && [ "$first_char" != '"' ]; then
|
||||
# shellcheck disable=SC2089
|
||||
modified_action="\"$1\""
|
||||
else
|
||||
modified_action=$1
|
||||
fi
|
||||
}
|
||||
|
||||
# params: file, version, effect, principal, action, resource
|
||||
# fail on error
|
||||
setup_policy_with_single_statement() {
|
||||
log 6 "setup_policy_with_single_statement"
|
||||
assert [ $# -eq 6 ]
|
||||
if [ $# -ne 6 ]; then
|
||||
log 2 "'setup_policy_with_single_statement' requires policy file, version, effect, principal, action, resource"
|
||||
return 1
|
||||
fi
|
||||
log 5 "policy file: $1"
|
||||
get_modified_principal "$4"
|
||||
if ! get_modified_principal "$4"; then
|
||||
log 2 "error getting modified principal"
|
||||
return 1
|
||||
fi
|
||||
if ! get_modified_action "$5"; then
|
||||
log 2 "error getting modified action"
|
||||
return 1
|
||||
fi
|
||||
bash -c "cat <<EOF > $1
|
||||
{
|
||||
\"Version\": \"$2\",
|
||||
@@ -69,7 +96,7 @@ setup_policy_with_single_statement() {
|
||||
{
|
||||
\"Effect\": \"$3\",
|
||||
\"Principal\": $modified_principal,
|
||||
\"Action\": \"$5\",
|
||||
\"Action\": $modified_action,
|
||||
\"Resource\": \"$6\"
|
||||
}
|
||||
]
|
||||
@@ -81,15 +108,24 @@ EOF"
|
||||
}
|
||||
|
||||
# params: file, version, two sets: effect, principal, action, resource
|
||||
# fail on error
|
||||
# return 0 on success, 1 on error
|
||||
setup_policy_with_double_statement() {
|
||||
log 6 "setup_policy_with_double_statement"
|
||||
assert [ $# -eq 10 ]
|
||||
get_modified_principal "$4"
|
||||
if [ $# -ne 10 ]; then
|
||||
log 2 "invalid number of parameters"
|
||||
return 1
|
||||
fi
|
||||
if ! get_modified_principal "$4"; then
|
||||
log 2 "error getting first modified principal"
|
||||
return 1
|
||||
fi
|
||||
principal_one=$modified_principal
|
||||
get_modified_principal "$8"
|
||||
if ! get_modified_principal "$8"; then
|
||||
log 2 "error getting second modified principal"
|
||||
return 1
|
||||
fi
|
||||
principal_two=$modified_principal
|
||||
run bash -c "cat <<EOF > $1
|
||||
bash -c "cat <<EOF > $1
|
||||
{
|
||||
\"Version\": \"$2\",
|
||||
\"Statement\": [
|
||||
@@ -109,6 +145,5 @@ setup_policy_with_double_statement() {
|
||||
}
|
||||
EOF"
|
||||
# shellcheck disable=SC2154
|
||||
assert_success "failed to set up policy: $output"
|
||||
log 5 "policy data: $(cat "$1")"
|
||||
}
|
||||
145
tests/util_rest.sh
Normal file
145
tests/util_rest.sh
Normal file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
parse_bucket_list() {
|
||||
# shellcheck disable=SC2154
|
||||
bucket_list=$(echo "$reply" | xmllint --xpath '//*[local-name()="Bucket"]/*[local-name()="Name"]/text()' -)
|
||||
bucket_array=()
|
||||
while read -r bucket; do
|
||||
bucket_array+=("$bucket")
|
||||
done <<< "$bucket_list"
|
||||
log 5 "bucket array: ${bucket_array[*]}"
|
||||
}
|
||||
|
||||
parse_object_list() {
|
||||
object_list=$(echo "$reply" | xmllint --xpath '//*[local-name()="Bucket"]/*[local-name()="Name"]/text()' -)
|
||||
object_array=()
|
||||
while read -r object; do
|
||||
object_array+=("$object")
|
||||
done <<< "$object_list"
|
||||
log 5 "object array: ${object_array[*]}"
|
||||
}
|
||||
|
||||
get_signature() {
|
||||
date_key=$(echo -n "$ymd" | openssl dgst -sha256 -mac HMAC -macopt key:"AWS4${AWS_SECRET_ACCESS_KEY}" | awk '{print $2}')
|
||||
date_region_key=$(echo -n "$AWS_REGION" | openssl dgst -sha256 -mac HMAC -macopt hexkey:"$date_key" | awk '{print $2}')
|
||||
date_region_service_key=$(echo -n "s3" | openssl dgst -sha256 -mac HMAC -macopt hexkey:"$date_region_key" | awk '{print $2}')
|
||||
signing_key=$(echo -n "aws4_request" | openssl dgst -sha256 -mac HMAC -macopt hexkey:"$date_region_service_key" | awk '{print $2}')
|
||||
# shellcheck disable=SC2034
|
||||
signature=$(echo -n "$sts_data" | openssl dgst -sha256 \
|
||||
-mac HMAC \
|
||||
-macopt hexkey:"$signing_key" | awk '{print $2}')
|
||||
}
|
||||
|
||||
hmac_sha256() {
|
||||
key="$1"
|
||||
data="$2"
|
||||
#echo "key: $1"
|
||||
echo -n "$data" | openssl dgst -sha256 -mac HMAC -macopt "$key" | sed 's/^.* //'
|
||||
}
|
||||
|
||||
send_rest_command_no_payload_no_bucket() {
|
||||
generate_hash_for_payload ""
|
||||
get_creq_string
|
||||
}
|
||||
|
||||
send_rest_command_no_payload() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'send_rest_command_no_payload' requires payload"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
generate_hash_for_payload() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'generate_hash_for_payload' requires payload string"
|
||||
return 1
|
||||
fi
|
||||
payload_hash="$(echo -n "$1" | sha256sum | awk '{print $1}')"
|
||||
}
|
||||
|
||||
get_creq_string_list_buckets() {
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
canonical_request="GET
|
||||
/
|
||||
|
||||
host:${AWS_ENDPOINT_URL#*//}
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
}
|
||||
|
||||
generate_creq_file() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'generate_creq_file' command requires bucket name, creq file name, hash"
|
||||
return 1
|
||||
fi
|
||||
current_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
cat <<EOF > "$2"
|
||||
GET
|
||||
/
|
||||
|
||||
host:$1.s3.amazonaws.com
|
||||
x-amz-content-sha256:$3
|
||||
x-amz-date:$current_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$3
|
||||
EOF
|
||||
|
||||
canonical_request="GET
|
||||
/
|
||||
|
||||
host:$1.s3.amazonaws.com
|
||||
x-amz-content-sha256:$3
|
||||
x-amz-date:$current_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$3"
|
||||
echo "canonical: $canonical_request"
|
||||
|
||||
echo "TEST CREQ"
|
||||
cat test.creq
|
||||
}
|
||||
|
||||
generate_sts_string() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'generate_sts_string' requires current date and time, canonical request string"
|
||||
return 1
|
||||
fi
|
||||
|
||||
ymd=$(echo "$1" | cut -c1-8)
|
||||
creq_hash="$(echo -n "$2" | openssl dgst -sha256 | awk '{print $2}')"
|
||||
sts_data="AWS4-HMAC-SHA256
|
||||
$1
|
||||
$ymd/$AWS_REGION/s3/aws4_request
|
||||
$creq_hash"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
generate_sts_file() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'generate_sts_file' requires date, hash, file name"
|
||||
return 1
|
||||
fi
|
||||
ymd=$(echo "$current_time" | cut -c1-8)
|
||||
creq_hash="$(echo -n "$canonical_request" | openssl dgst -sha256 | awk '{print $2}')"
|
||||
echo "creq hash: $creq_hash"
|
||||
cat <<EOF > "$3"
|
||||
AWS4-HMAC-SHA256
|
||||
$1
|
||||
$ymd/us-west-2/s3/aws4_request
|
||||
$creq_hash
|
||||
EOF
|
||||
sts_data="AWS4-HMAC-SHA256
|
||||
$1
|
||||
$ymd/us-west-2/s3/aws4_request
|
||||
$creq_hash"
|
||||
|
||||
echo "TEST STS"
|
||||
cat test.sts
|
||||
}
|
||||
|
||||
@@ -86,13 +86,13 @@ put_user_policy_userplus() {
|
||||
log 2 "'put user policy userplus' function requires username"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$test_file_folder" ]] && [[ -z "$GITHUB_ACTIONS" ]] && ! create_test_file_folder; then
|
||||
if [[ -z "$TEST_FILE_FOLDER" ]] && [[ -z "$GITHUB_ACTIONS" ]] && ! create_test_file_folder; then
|
||||
log 2 "unable to create test file folder"
|
||||
return 1
|
||||
fi
|
||||
#"Resource": "arn:aws:s3:::${aws:username}-*"
|
||||
|
||||
cat <<EOF > "$test_file_folder"/user_policy_file
|
||||
cat <<EOF > "$TEST_FILE_FOLDER"/user_policy_file
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -118,7 +118,7 @@ cat <<EOF > "$test_file_folder"/user_policy_file
|
||||
]
|
||||
}
|
||||
EOF
|
||||
if ! error=$(aws iam put-user-policy --user-name "$1" --policy-name "UserPolicy" --policy-document "file://$test_file_folder/user_policy_file" 2>&1); then
|
||||
if ! error=$(aws iam put-user-policy --user-name "$1" --policy-name "UserPolicy" --policy-document "file://$TEST_FILE_FOLDER/user_policy_file" 2>&1); then
|
||||
log 2 "error putting user policy: $error"
|
||||
return 1
|
||||
fi
|
||||
@@ -131,7 +131,7 @@ put_user_policy() {
|
||||
log 2 "attaching user policy requires user ID, role, bucket name"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$test_file_folder" ]] && [[ -z "$GITHUB_ACTIONS" ]] && ! create_test_file_folder; then
|
||||
if [[ -z "$TEST_FILE_FOLDER" ]] && [[ -z "$GITHUB_ACTIONS" ]] && ! create_test_file_folder; then
|
||||
log 2 "unable to create test file folder"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -18,12 +18,12 @@ source ./tests/util_file.sh
|
||||
|
||||
start_versity_process() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "start versity process function requires number"
|
||||
return 1
|
||||
log 1 "start versity process function requires number"
|
||||
exit 1
|
||||
fi
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test log folder"
|
||||
return 1
|
||||
log 1 "error creating test log folder"
|
||||
exit 1
|
||||
fi
|
||||
IFS=' ' read -r -a full_command <<< "${base_command[@]}"
|
||||
log 5 "versity command: ${full_command[*]}"
|
||||
@@ -36,9 +36,10 @@ start_versity_process() {
|
||||
if [[ $? -ne 0 ]]; then
|
||||
sleep 1
|
||||
if [ -n "$VERSITY_LOG_FILE" ]; then
|
||||
log 2 "error running versitygw command: $(cat "$VERSITY_LOG_FILE")"
|
||||
log 1 "error running versitygw command: $(cat "$VERSITY_LOG_FILE")"
|
||||
exit 1
|
||||
fi
|
||||
return 1
|
||||
exit 1
|
||||
fi
|
||||
eval versitygw_pid_"$1"=$!
|
||||
if [ -n "$VERSITY_LOG_FILE" ]; then
|
||||
@@ -51,19 +52,19 @@ start_versity_process() {
|
||||
sleep 1
|
||||
|
||||
if ! check_result=$(kill -0 "$pid" 2>&1); then
|
||||
log 2 "versitygw failed to start: $check_result"
|
||||
log 1 "versitygw failed to start: $check_result"
|
||||
if [ -n "$VERSITY_LOG_FILE" ]; then
|
||||
log 2 "log data: $(cat "$VERSITY_LOG_FILE")"
|
||||
log 1 "log data: $(cat "$VERSITY_LOG_FILE")"
|
||||
fi
|
||||
return 1
|
||||
exit 1
|
||||
fi
|
||||
export versitygw_pid_"$1"
|
||||
}
|
||||
|
||||
run_versity_app_posix() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "run versity app w/posix command requires access ID, secret key, process number"
|
||||
return 1
|
||||
log 1 "run versity app w/posix command requires access ID, secret key, process number"
|
||||
exit 1
|
||||
fi
|
||||
base_command=("$VERSITY_EXE" --access="$1" --secret="$2" --region="$AWS_REGION")
|
||||
if [ -n "$RUN_USERS" ]; then
|
||||
@@ -80,17 +81,14 @@ run_versity_app_posix() {
|
||||
base_command+=(posix "$LOCAL_FOLDER")
|
||||
export base_command
|
||||
|
||||
if ! start_versity_process "$3"; then
|
||||
log 2 "error starting versity process"
|
||||
return 1
|
||||
fi
|
||||
start_versity_process "$3"
|
||||
return 0
|
||||
}
|
||||
|
||||
run_versity_app_scoutfs() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "run versity app w/scoutfs command requires access ID, secret key, process number"
|
||||
return 1
|
||||
log 1 "run versity app w/scoutfs command requires access ID, secret key, process number"
|
||||
exit 1
|
||||
fi
|
||||
base_command=("$VERSITY_EXE" --access="$1" --secret="$2" --region="$AWS_REGION" --iam-dir="$USERS_FOLDER")
|
||||
if [ -n "$CERT" ] && [ -n "$KEY" ]; then
|
||||
@@ -102,19 +100,14 @@ run_versity_app_scoutfs() {
|
||||
base_command+=(scoutfs "$LOCAL_FOLDER")
|
||||
export base_command
|
||||
|
||||
local versity_result
|
||||
start_versity_process "$3" || versity_result=$?
|
||||
if [[ $versity_result -ne 0 ]]; then
|
||||
echo "error starting versity process"
|
||||
return 1
|
||||
fi
|
||||
start_versity_process "$3"
|
||||
return 0
|
||||
}
|
||||
|
||||
run_versity_app_s3() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "run versity app w/s3 command requires process number"
|
||||
return 1
|
||||
log 1 "run versity app w/s3 command requires process number"
|
||||
exit 1
|
||||
fi
|
||||
base_command=("$VERSITY_EXE" --access="$AWS_ACCESS_KEY_ID" --secret="$AWS_SECRET_ACCESS_KEY")
|
||||
if [ -n "$CERT" ] && [ -n "$KEY" ]; then
|
||||
@@ -128,43 +121,28 @@ run_versity_app_s3() {
|
||||
base_command+=(s3 --access="$AWS_ACCESS_KEY_ID_TWO" --secret="$AWS_SECRET_ACCESS_KEY_TWO" --region="$AWS_REGION" --endpoint=https://s3.amazonaws.com)
|
||||
export base_command
|
||||
|
||||
if ! start_versity_process "$1"; then
|
||||
log 2 "error starting versity process"
|
||||
return 1
|
||||
fi
|
||||
start_versity_process "$1"
|
||||
return 0
|
||||
}
|
||||
|
||||
run_versity_app() {
|
||||
if [[ $BACKEND == 'posix' ]]; then
|
||||
if ! run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1"; then
|
||||
log 2 "error starting versity app"
|
||||
return 1
|
||||
fi
|
||||
run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1"
|
||||
elif [[ $BACKEND == 'scoutfs' ]]; then
|
||||
run_versity_app_scoutfs "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1" || result_one=$?
|
||||
if [[ $result_one -ne 0 ]]; then
|
||||
echo "error starting versity app"
|
||||
return 1
|
||||
fi
|
||||
run_versity_app_scoutfs "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1"
|
||||
elif [[ $BACKEND == 's3' ]]; then
|
||||
if ! run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1"; then
|
||||
log 2 "error starting versity app"
|
||||
return 1
|
||||
fi
|
||||
if ! run_versity_app_s3 "2"; then
|
||||
log 2 "error starting second versity app"
|
||||
return 1
|
||||
fi
|
||||
run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1"
|
||||
run_versity_app_s3 "2"
|
||||
else
|
||||
log 2 "unrecognized backend type $BACKEND"
|
||||
return 1
|
||||
log 1 "unrecognized backend type $BACKEND"
|
||||
exit 1
|
||||
fi
|
||||
if [[ $IAM_TYPE == "s3" ]]; then
|
||||
if ! bucket_exists "s3api" "$USERS_BUCKET"; then
|
||||
if ! create_bucket "s3api" "$USERS_BUCKET"; then
|
||||
log 2 "error creating IAM bucket"
|
||||
return 1
|
||||
log 1 "error creating IAM bucket"
|
||||
teardown
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user