mirror of
https://github.com/versity/versitygw.git
synced 2026-01-27 13:32:02 +00:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33b7116aab | ||
|
|
0009845acd | ||
|
|
a912980173 | ||
|
|
096f370322 | ||
|
|
b4cd35f60b | ||
|
|
aba8d03ddf | ||
|
|
4a7e2296b9 | ||
|
|
2c165a632c | ||
|
|
3fc8956baf | ||
|
|
acf69ab03d | ||
|
|
60e4a07e65 | ||
|
|
ba8e1f7910 | ||
|
|
864bbf81ff | ||
|
|
259a385aea | ||
|
|
0c3771ae2d | ||
|
|
af469cd279 | ||
|
|
6f9c6fde37 | ||
|
|
dd7de194f9 | ||
|
|
ec53605ea3 | ||
|
|
47ed2d65c1 | ||
|
|
5126aedeff | ||
|
|
a780f89ff0 | ||
|
|
4a56d570ad |
@@ -17,6 +17,7 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -292,13 +293,13 @@ func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) e
|
||||
return nil
|
||||
}
|
||||
|
||||
policy, err := be.GetBucketPolicy(ctx, opts.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
|
||||
if policyErr != nil && !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return policyErr
|
||||
}
|
||||
|
||||
// If bucket policy is not set and the ACL is default, only the owner has access
|
||||
if len(policy) == 0 && opts.Acl.ACL == "" && len(opts.Acl.Grantees) == 0 {
|
||||
if errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) && opts.Acl.ACL == "" && len(opts.Acl.Grantees) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ func ValidatePolicyDocument(policyBin []byte, bucket string, iam IAMService) err
|
||||
|
||||
func verifyBucketPolicy(policy []byte, access, bucket, object string, action Action) error {
|
||||
// If bucket policy is not set
|
||||
if len(policy) == 0 {
|
||||
if policy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -33,11 +33,6 @@ type BucketLockConfig struct {
|
||||
CreatedAt *time.Time
|
||||
}
|
||||
|
||||
type ObjectLockConfig struct {
|
||||
LegalHoldEnabled bool
|
||||
Retention *types.ObjectLockRetention
|
||||
}
|
||||
|
||||
func ParseBucketLockConfigurationInput(input []byte) ([]byte, error) {
|
||||
var lockConfig types.ObjectLockConfiguration
|
||||
if err := xml.Unmarshal(input, &lockConfig); err != nil {
|
||||
@@ -172,12 +167,12 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !isAdminOrRoot {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(policy) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
err = verifyBucketPolicy(policy, userAccess, bucket, obj, BypassGovernanceRetentionAction)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -48,10 +49,13 @@ import (
|
||||
type key string
|
||||
|
||||
const (
|
||||
keyAclCapital key = "Acl"
|
||||
keyAclLower key = "acl"
|
||||
keyTags key = "Tags"
|
||||
keyPolicy key = "Policy"
|
||||
keyAclCapital key = "Acl"
|
||||
keyAclLower key = "acl"
|
||||
keyTags key = "Tags"
|
||||
keyPolicy key = "Policy"
|
||||
keyBucketLock key = "Bucket-Lock"
|
||||
keyObjRetention key = "Object_retention"
|
||||
keyObjLegalHold key = "Object_legal_hold"
|
||||
)
|
||||
|
||||
type Azure struct {
|
||||
@@ -122,6 +126,21 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
|
||||
meta := map[string]*string{
|
||||
string(keyAclCapital): backend.GetStringPtr(string(acl)),
|
||||
}
|
||||
|
||||
if input.ObjectLockEnabledForBucket != nil && *input.ObjectLockEnabledForBucket {
|
||||
now := time.Now()
|
||||
defaultLock := auth.BucketLockConfig{
|
||||
Enabled: true,
|
||||
CreatedAt: &now,
|
||||
}
|
||||
|
||||
defaultLockParsed, err := json.Marshal(defaultLock)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse default bucket lock state: %w", err)
|
||||
}
|
||||
|
||||
meta[string(keyBucketLock)] = backend.GetStringPtr(string(defaultLockParsed))
|
||||
}
|
||||
_, err := az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
@@ -185,6 +204,28 @@ func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (string,
|
||||
return "", azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
// Set object legal hold
|
||||
if po.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
if err := az.PutObjectLegalHold(ctx, *po.Bucket, *po.Key, "", true); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Set object retention
|
||||
if po.ObjectLockMode != "" {
|
||||
retention := types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionMode(po.ObjectLockMode),
|
||||
RetainUntilDate: po.ObjectLockRetainUntilDate,
|
||||
}
|
||||
retParsed, err := json.Marshal(retention)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
if err := az.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", retParsed); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return string(*uploadResp.ETag), nil
|
||||
}
|
||||
|
||||
@@ -231,7 +272,7 @@ func (az *Azure) GetBucketTagging(ctx context.Context, bucket string) (map[strin
|
||||
|
||||
tagsJson, ok := resp.Metadata[string(keyTags)]
|
||||
if !ok {
|
||||
return map[string]string{}, nil
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)
|
||||
}
|
||||
|
||||
var tags map[string]string
|
||||
@@ -314,6 +355,61 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
data, err := az.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
})
|
||||
if err == nil {
|
||||
return s3response.GetObjectAttributesResult{
|
||||
ETag: data.ETag,
|
||||
LastModified: data.LastModified,
|
||||
ObjectSize: data.ContentLength,
|
||||
StorageClass: &data.StorageClass,
|
||||
VersionId: data.VersionId,
|
||||
}, nil
|
||||
}
|
||||
if !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
return s3response.GetObjectAttributesResult{}, err
|
||||
}
|
||||
|
||||
resp, err := az.ListParts(ctx, &s3.ListPartsInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
PartNumberMarker: input.PartNumberMarker,
|
||||
MaxParts: input.MaxParts,
|
||||
})
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchUpload)) {
|
||||
return s3response.GetObjectAttributesResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.GetObjectAttributesResult{}, err
|
||||
}
|
||||
|
||||
parts := []types.ObjectPart{}
|
||||
|
||||
for _, p := range resp.Parts {
|
||||
partNumber := int32(p.PartNumber)
|
||||
size := p.Size
|
||||
|
||||
parts = append(parts, types.ObjectPart{
|
||||
Size: &size,
|
||||
PartNumber: &partNumber,
|
||||
})
|
||||
}
|
||||
|
||||
//TODO: handle PartsCount prop
|
||||
return s3response.GetObjectAttributesResult{
|
||||
ObjectParts: &s3response.ObjectParts{
|
||||
IsTruncated: resp.IsTruncated,
|
||||
MaxParts: resp.MaxParts,
|
||||
PartNumberMarker: resp.PartNumberMarker,
|
||||
NextPartNumberMarker: resp.PartNumberMarker,
|
||||
Parts: parts,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{
|
||||
Marker: input.Marker,
|
||||
@@ -829,7 +925,7 @@ func (az *Azure) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, er
|
||||
|
||||
policyPtr, ok := props.Metadata[string(keyPolicy)]
|
||||
if !ok {
|
||||
return []byte{}, nil
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
|
||||
policy, err := base64.StdEncoding.DecodeString(*policyPtr)
|
||||
@@ -844,6 +940,194 @@ func (az *Azure) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return az.PutBucketPolicy(ctx, bucket, nil)
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
props.Metadata[string(keyBucketLock)] = backend.GetStringPtr(string(config))
|
||||
|
||||
_, err = client.SetMetadata(ctx, &container.SetMetadataOptions{
|
||||
Metadata: props.Metadata,
|
||||
})
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectLockConfiguration(ctx context.Context, bucket string) ([]byte, error) {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
config, ok := props.Metadata[string(keyBucketLock)]
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)
|
||||
}
|
||||
|
||||
return []byte(*config), nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
contClient, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contProps, err := contClient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
contCfg, ok := contProps.Metadata[string(keyBucketLock)]
|
||||
if !ok {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
}
|
||||
|
||||
var bucketLockConfig auth.BucketLockConfig
|
||||
if err := json.Unmarshal([]byte(*contCfg), &bucketLockConfig); err != nil {
|
||||
return fmt.Errorf("parse bucket lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
}
|
||||
|
||||
blobClient, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobProps, err := blobClient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
meta := blobProps.Metadata
|
||||
if meta == nil {
|
||||
meta = map[string]*string{
|
||||
string(keyObjRetention): backend.GetStringPtr(string(retention)),
|
||||
}
|
||||
} else {
|
||||
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retention))
|
||||
}
|
||||
|
||||
_, err = blobClient.SetMetadata(ctx, meta, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectRetention(ctx context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
retentionPtr, ok := props.Metadata[string(keyObjRetention)]
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)
|
||||
}
|
||||
|
||||
return []byte(*retentionPtr), nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectLegalHold(ctx context.Context, bucket, object, versionId string, status bool) error {
|
||||
contClient, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contProps, err := contClient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
contCfg, ok := contProps.Metadata[string(keyBucketLock)]
|
||||
if !ok {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
}
|
||||
|
||||
var bucketLockConfig auth.BucketLockConfig
|
||||
if err := json.Unmarshal([]byte(*contCfg), &bucketLockConfig); err != nil {
|
||||
return fmt.Errorf("parse bucket lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockConfig.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
|
||||
}
|
||||
|
||||
blobClient, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobProps, err := blobClient.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
var statusData string
|
||||
if status {
|
||||
statusData = "1"
|
||||
} else {
|
||||
statusData = "0"
|
||||
}
|
||||
|
||||
meta := blobProps.Metadata
|
||||
if meta == nil {
|
||||
meta = map[string]*string{
|
||||
string(keyObjLegalHold): &statusData,
|
||||
}
|
||||
} else {
|
||||
meta[string(keyObjLegalHold)] = &statusData
|
||||
}
|
||||
|
||||
_, err = blobClient.SetMetadata(ctx, meta, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectLegalHold(ctx context.Context, bucket, object, versionId string) (*bool, error) {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
retentionPtr, ok := props.Metadata[string(keyObjLegalHold)]
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)
|
||||
}
|
||||
|
||||
status := *retentionPtr == "1"
|
||||
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
|
||||
@@ -58,7 +58,7 @@ type Backend interface {
|
||||
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
|
||||
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error)
|
||||
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
|
||||
@@ -173,8 +173,8 @@ func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput, io.Writ
|
||||
func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
return s3response.GetObjectAttributesResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
@@ -229,6 +230,23 @@ func (p *Posix) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, a
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
|
||||
if input.ObjectLockEnabledForBucket != nil && *input.ObjectLockEnabledForBucket {
|
||||
now := time.Now()
|
||||
defaultLock := auth.BucketLockConfig{
|
||||
Enabled: true,
|
||||
CreatedAt: &now,
|
||||
}
|
||||
|
||||
defaultLockParsed, err := json.Marshal(defaultLock)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse default bucket lock state: %w", err)
|
||||
}
|
||||
|
||||
if err := p.meta.StoreAttribute(bucket, "", bucketLockKey, defaultLockParsed); err != nil {
|
||||
return fmt.Errorf("set default bucket lock: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -521,6 +539,18 @@ func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte,
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func (p *Posix) retrieveUploadId(bucket, object string) (string, error) {
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
entries, err := os.ReadDir(objdir)
|
||||
if err != nil || len(entries) == 0 {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
return entries[0].Name(), nil
|
||||
}
|
||||
|
||||
// fll out the user metadata map with the metadata for the object
|
||||
// and return the content type and encoding
|
||||
func (p *Posix) loadUserMetaData(bucket, object string, m map[string]string) (string, string) {
|
||||
@@ -1214,6 +1244,7 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
}
|
||||
|
||||
// Set object tagging
|
||||
if tagsStr != "" {
|
||||
err := p.PutObjectTagging(ctx, *po.Bucket, *po.Key, tags)
|
||||
if err != nil {
|
||||
@@ -1221,6 +1252,28 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
}
|
||||
|
||||
// Set object legal hold
|
||||
if po.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
|
||||
if err := p.PutObjectLegalHold(ctx, *po.Bucket, *po.Key, "", true); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Set object retention
|
||||
if po.ObjectLockMode != "" {
|
||||
retention := types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionMode(po.ObjectLockMode),
|
||||
RetainUntilDate: po.ObjectLockRetainUntilDate,
|
||||
}
|
||||
retParsed, err := json.Marshal(retention)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
if err := p.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", retParsed); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum[:])
|
||||
err = p.meta.StoreAttribute(*po.Bucket, *po.Key, etagkey, []byte(etag))
|
||||
@@ -1402,12 +1455,15 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
etag = ""
|
||||
}
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
return nil, err
|
||||
}
|
||||
if tags != nil {
|
||||
tgCount := int32(len(tags))
|
||||
tagCount = &tgCount
|
||||
}
|
||||
|
||||
tagCount := int32(len(tags))
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
@@ -1417,7 +1473,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: &tagCount,
|
||||
TagCount: tagCount,
|
||||
ContentRange: &contentRange,
|
||||
}, nil
|
||||
}
|
||||
@@ -1447,12 +1503,15 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
etag = ""
|
||||
}
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
return nil, err
|
||||
}
|
||||
if tags != nil {
|
||||
tgCount := int32(len(tags))
|
||||
tagCount = &tgCount
|
||||
}
|
||||
|
||||
tagCount := int32(len(tags))
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
@@ -1462,12 +1521,12 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: &tagCount,
|
||||
TagCount: tagCount,
|
||||
ContentRange: &contentRange,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
@@ -1505,13 +1564,98 @@ func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.He
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
var objectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
|
||||
status, err := p.GetObjectLegalHold(ctx, bucket, object, "")
|
||||
if err == nil {
|
||||
if *status {
|
||||
objectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOn
|
||||
} else {
|
||||
objectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOff
|
||||
}
|
||||
}
|
||||
|
||||
var objectLockMode types.ObjectLockMode
|
||||
var objectLockRetainUntilDate *time.Time
|
||||
retention, err := p.GetObjectRetention(ctx, bucket, object, "")
|
||||
if err == nil {
|
||||
var config types.ObjectLockRetention
|
||||
if err := json.Unmarshal(retention, &config); err == nil {
|
||||
objectLockMode = types.ObjectLockMode(config.Mode)
|
||||
objectLockRetainUntilDate = config.RetainUntilDate
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: the method must handle multipart upload case
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: &size,
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
ContentLength: &size,
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
ObjectLockLegalHoldStatus: objectLockLegalHoldStatus,
|
||||
ObjectLockMode: objectLockMode,
|
||||
ObjectLockRetainUntilDate: objectLockRetainUntilDate,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
data, err := p.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
})
|
||||
if err == nil {
|
||||
return s3response.GetObjectAttributesResult{
|
||||
ETag: data.ETag,
|
||||
LastModified: data.LastModified,
|
||||
ObjectSize: data.ContentLength,
|
||||
StorageClass: &data.StorageClass,
|
||||
VersionId: data.VersionId,
|
||||
}, nil
|
||||
}
|
||||
if !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
|
||||
return s3response.GetObjectAttributesResult{}, err
|
||||
}
|
||||
|
||||
uploadId, err := p.retrieveUploadId(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return s3response.GetObjectAttributesResult{}, err
|
||||
}
|
||||
|
||||
resp, err := p.ListParts(ctx, &s3.ListPartsInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
UploadId: &uploadId,
|
||||
PartNumberMarker: input.PartNumberMarker,
|
||||
MaxParts: input.MaxParts,
|
||||
})
|
||||
if err != nil {
|
||||
return s3response.GetObjectAttributesResult{}, err
|
||||
}
|
||||
|
||||
parts := []types.ObjectPart{}
|
||||
|
||||
for _, p := range resp.Parts {
|
||||
partNumber := int32(p.PartNumber)
|
||||
size := p.Size
|
||||
|
||||
parts = append(parts, types.ObjectPart{
|
||||
Size: &size,
|
||||
PartNumber: &partNumber,
|
||||
})
|
||||
}
|
||||
|
||||
//TODO: handle PartsCount prop
|
||||
//TODO: Maybe simply calling ListParts isn't a good option
|
||||
return s3response.GetObjectAttributesResult{
|
||||
ObjectParts: &s3response.ObjectParts{
|
||||
IsTruncated: resp.IsTruncated,
|
||||
MaxParts: resp.MaxParts,
|
||||
PartNumberMarker: resp.PartNumberMarker,
|
||||
NextPartNumberMarker: resp.NextPartNumberMarker,
|
||||
Parts: parts,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1900,7 +2044,7 @@ func (p *Posix) getAttrTags(bucket, object string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return tags, nil
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get tags: %w", err)
|
||||
@@ -1998,7 +2142,7 @@ func (p *Posix) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, err
|
||||
|
||||
policy, err := p.meta.RetrieveAttribute(bucket, "", policykey)
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return []byte{}, nil
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)
|
||||
}
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
@@ -295,9 +296,41 @@ func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput, w io.
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
out, err := s.client.GetObjectAttributes(ctx, input)
|
||||
return out, handleError(err)
|
||||
|
||||
parts := s3response.ObjectParts{}
|
||||
objParts := out.ObjectParts
|
||||
if objParts != nil {
|
||||
if objParts.PartNumberMarker != nil {
|
||||
partNumberMarker, err := strconv.Atoi(*objParts.PartNumberMarker)
|
||||
if err != nil {
|
||||
parts.PartNumberMarker = partNumberMarker
|
||||
}
|
||||
if objParts.NextPartNumberMarker != nil {
|
||||
nextPartNumberMarker, err := strconv.Atoi(*objParts.NextPartNumberMarker)
|
||||
if err != nil {
|
||||
parts.NextPartNumberMarker = nextPartNumberMarker
|
||||
}
|
||||
}
|
||||
if objParts.IsTruncated != nil {
|
||||
parts.IsTruncated = *objParts.IsTruncated
|
||||
}
|
||||
if objParts.MaxParts != nil {
|
||||
parts.MaxParts = int(*objParts.MaxParts)
|
||||
}
|
||||
parts.Parts = objParts.Parts
|
||||
}
|
||||
}
|
||||
|
||||
return s3response.GetObjectAttributesResult{
|
||||
ETag: out.ETag,
|
||||
LastModified: out.LastModified,
|
||||
ObjectSize: out.ObjectSize,
|
||||
StorageClass: &out.StorageClass,
|
||||
VersionId: out.VersionId,
|
||||
ObjectParts: &parts,
|
||||
}, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
@@ -436,6 +469,128 @@ func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutBucketPolicy(ctx context.Context, bucket string, policy []byte) error {
|
||||
_, err := s.client.PutBucketPolicy(ctx, &s3.PutBucketPolicyInput{
|
||||
Bucket: &bucket,
|
||||
Policy: backend.GetStringPtr(string(policy)),
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, error) {
|
||||
policy, err := s.client.GetBucketPolicy(ctx, &s3.GetBucketPolicyInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
result := []byte{}
|
||||
if policy.Policy != nil {
|
||||
result = []byte(*policy.Policy)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
_, err := s.client.DeleteBucketPolicy(ctx, &s3.DeleteBucketPolicyInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
|
||||
cfg, err := auth.ParseBucketLockConfigurationOutput(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.client.PutObjectLockConfiguration(ctx, &s3.PutObjectLockConfigurationInput{
|
||||
Bucket: &bucket,
|
||||
ObjectLockConfiguration: cfg,
|
||||
})
|
||||
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectLockConfiguration(ctx context.Context, bucket string) ([]byte, error) {
|
||||
resp, err := s.client.GetObjectLockConfiguration(ctx, &s3.GetObjectLockConfigurationInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
config := auth.BucketLockConfig{
|
||||
Enabled: resp.ObjectLockConfiguration.ObjectLockEnabled == types.ObjectLockEnabledEnabled,
|
||||
DefaultRetention: resp.ObjectLockConfiguration.Rule.DefaultRetention,
|
||||
}
|
||||
|
||||
return json.Marshal(config)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectRetention(ctx context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
ret, err := auth.ParseObjectLockRetentionOutput(retention)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
Retention: ret,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectRetention(ctx context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
resp, err := s.client.GetObjectRetention(ctx, &s3.GetObjectRetentionInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
return json.Marshal(resp.Retention)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectLegalHold(ctx context.Context, bucket, object, versionId string, status bool) error {
|
||||
var st types.ObjectLockLegalHoldStatus
|
||||
if status {
|
||||
st = types.ObjectLockLegalHoldStatusOn
|
||||
} else {
|
||||
st = types.ObjectLockLegalHoldStatusOff
|
||||
}
|
||||
|
||||
_, err := s.client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: st,
|
||||
},
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectLegalHold(ctx context.Context, bucket, object, versionId string) (*bool, error) {
|
||||
resp, err := s.client.GetObjectLegalHold(ctx, &s3.GetObjectLegalHoldInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
status := resp.LegalHold.Status == types.ObjectLockLegalHoldStatusOn
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", s.endpoint, bucket, newOwner), nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -54,19 +54,21 @@ func generateEventFiltersConfig(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
config := s3event.EventFilter{
|
||||
s3event.EventObjectCreated: true,
|
||||
s3event.EventObjectCreatedPut: true,
|
||||
s3event.EventObjectCreatedPost: true,
|
||||
s3event.EventObjectCreatedCopy: true,
|
||||
s3event.EventCompleteMultipartUpload: true,
|
||||
s3event.EventObjectDeleted: true,
|
||||
s3event.EventObjectTagging: true,
|
||||
s3event.EventObjectTaggingPut: true,
|
||||
s3event.EventObjectTaggingDelete: true,
|
||||
s3event.EventObjectAclPut: true,
|
||||
s3event.EventObjectRestore: true,
|
||||
s3event.EventObjectRestorePost: true,
|
||||
s3event.EventObjectRestoreCompleted: true,
|
||||
s3event.EventObjectCreated: true,
|
||||
s3event.EventObjectCreatedPut: true,
|
||||
s3event.EventObjectCreatedPost: true,
|
||||
s3event.EventObjectCreatedCopy: true,
|
||||
s3event.EventCompleteMultipartUpload: true,
|
||||
s3event.EventObjectRemoved: true,
|
||||
s3event.EventObjectRemovedDelete: true,
|
||||
s3event.EventObjectRemovedDeleteObjects: true,
|
||||
s3event.EventObjectTagging: true,
|
||||
s3event.EventObjectTaggingPut: true,
|
||||
s3event.EventObjectTaggingDelete: true,
|
||||
s3event.EventObjectAclPut: true,
|
||||
s3event.EventObjectRestore: true,
|
||||
s3event.EventObjectRestorePost: true,
|
||||
s3event.EventObjectRestoreCompleted: true,
|
||||
}
|
||||
|
||||
configBytes, err := json.Marshal(config)
|
||||
|
||||
2
go.mod
2
go.mod
@@ -16,7 +16,7 @@ require (
|
||||
github.com/nats-io/nats.go v1.34.1
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
github.com/urfave/cli/v2 v2.27.1
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
github.com/valyala/fasthttp v1.52.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sys v0.19.0
|
||||
|
||||
4
go.sum
4
go.sum
@@ -138,8 +138,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
|
||||
github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
|
||||
github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.52.0 h1:wqBQpxH71XW0e2g+Og4dzQM8pk34aFYlA1Ga8db7gU0=
|
||||
|
||||
@@ -77,7 +77,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetObjectAclFunc: func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
// panic("mock out the GetObjectAcl method")
|
||||
// },
|
||||
// GetObjectAttributesFunc: func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
// GetObjectAttributesFunc: func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
// panic("mock out the GetObjectAttributes method")
|
||||
// },
|
||||
// GetObjectLegalHoldFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) (*bool, error) {
|
||||
@@ -229,7 +229,7 @@ type BackendMock struct {
|
||||
GetObjectAclFunc func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
|
||||
// GetObjectAttributesFunc mocks the GetObjectAttributes method.
|
||||
GetObjectAttributesFunc func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
|
||||
GetObjectAttributesFunc func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error)
|
||||
|
||||
// GetObjectLegalHoldFunc mocks the GetObjectLegalHold method.
|
||||
GetObjectLegalHoldFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) (*bool, error)
|
||||
@@ -1406,7 +1406,7 @@ func (mock *BackendMock) GetObjectAclCalls() []struct {
|
||||
}
|
||||
|
||||
// GetObjectAttributes calls GetObjectAttributesFunc.
|
||||
func (mock *BackendMock) GetObjectAttributes(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
func (mock *BackendMock) GetObjectAttributes(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
if mock.GetObjectAttributesFunc == nil {
|
||||
panic("BackendMock.GetObjectAttributesFunc: method is nil but Backend.GetObjectAttributes was just called")
|
||||
}
|
||||
|
||||
@@ -291,7 +291,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if attrs := ctx.Get("X-Amz-Object-Attributes"); attrs != "" {
|
||||
if ctx.Request().URI().QueryArgs().Has("attributes") {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
@@ -309,17 +309,36 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
var oattrs []types.ObjectAttributes
|
||||
for _, a := range strings.Split(attrs, ",") {
|
||||
oattrs = append(oattrs, types.ObjectAttributes(a))
|
||||
maxParts := ctx.Get("X-Amz-Max-Parts")
|
||||
partNumberMarker := ctx.Get("X-Amz-Part-Number-Marker")
|
||||
maxPartsParsed, err := utils.ParseUint(maxParts)
|
||||
if err != nil {
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: "GetObjectAttributes",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
attrs := utils.ParseObjectAttributes(ctx)
|
||||
|
||||
res, err := c.be.GetObjectAttributes(ctx.Context(),
|
||||
&s3.GetObjectAttributesInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
ObjectAttributes: oattrs,
|
||||
PartNumberMarker: &partNumberMarker,
|
||||
MaxParts: &maxPartsParsed,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
return SendXMLResponse(ctx, res, err,
|
||||
if err != nil {
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: "GetObjectAttributes",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
return SendXMLResponse(ctx, utils.FilterObjectAttributes(attrs, res), err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: "GetObjectAttributes",
|
||||
@@ -1200,9 +1219,14 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
lockHeader := ctx.Get("X-Amz-Bucket-Object-Lock-Enabled")
|
||||
// CLI provides "True", SDK - "true"
|
||||
lockEnabled := lockHeader == "True" || lockHeader == "true"
|
||||
|
||||
err = c.be.CreateBucket(ctx.Context(), &s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: types.ObjectOwnership(acct.Access),
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: types.ObjectOwnership(acct.Access),
|
||||
ObjectLockEnabledForBucket: &lockEnabled,
|
||||
}, updAcl)
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -1761,6 +1785,52 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
legalHoldHdr := ctx.Get("X-Amz-Object-Lock-Legal-Hold")
|
||||
objLockModeHdr := ctx.Get("X-Amz-Object-Lock-Mode")
|
||||
objLockDate := ctx.Get("X-Amz-Object-Lock-Retain-Until-Date")
|
||||
|
||||
if (objLockDate != "" && objLockModeHdr == "") || (objLockDate == "" && objLockModeHdr != "") {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: "PutObject",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
var retainUntilDate *time.Time
|
||||
if objLockDate != "" {
|
||||
rDate, err := time.Parse(time.RFC3339, objLockDate)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: "PutObject",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
if rDate.Before(time.Now()) {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: "PutObject",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
retainUntilDate = &rDate
|
||||
}
|
||||
|
||||
if objLockModeHdr != "" &&
|
||||
objLockModeHdr != string(types.ObjectLockModeCompliance) &&
|
||||
objLockModeHdr != string(types.ObjectLockModeGovernance) {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: "PutObject",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
bodyi := ctx.Locals("body-reader")
|
||||
if bodyi != nil {
|
||||
@@ -1772,12 +1842,15 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("logReqBody", false)
|
||||
etag, err := c.be.PutObject(ctx.Context(),
|
||||
&s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
ContentLength: &contentLength,
|
||||
Metadata: metadata,
|
||||
Body: body,
|
||||
Tagging: &tagging,
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
ContentLength: &contentLength,
|
||||
Metadata: metadata,
|
||||
Body: body,
|
||||
Tagging: &tagging,
|
||||
ObjectLockRetainUntilDate: retainUntilDate,
|
||||
ObjectLockMode: types.ObjectLockMode(objLockModeHdr),
|
||||
ObjectLockLegalHoldStatus: types.ObjectLockLegalHoldStatus(legalHoldHdr),
|
||||
})
|
||||
ctx.Response().Header.Set("ETag", etag)
|
||||
return SendResponse(ctx, err,
|
||||
@@ -1947,6 +2020,8 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
Logger: c.logger,
|
||||
Action: "DeleteObjects",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
EvSender: c.evSender,
|
||||
EventName: s3event.EventObjectRemovedDeleteObjects,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2077,7 +2152,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
EvSender: c.evSender,
|
||||
Action: "DeleteObject",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
EventName: s3event.EventObjectDeleted,
|
||||
EventName: s3event.EventObjectRemovedDelete,
|
||||
Status: http.StatusNoContent,
|
||||
})
|
||||
}
|
||||
@@ -2191,7 +2266,7 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
if res.LastModified != nil {
|
||||
lastmod = res.LastModified.Format(timefmt)
|
||||
}
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
headers := []utils.CustomHeader{
|
||||
{
|
||||
Key: "Content-Length",
|
||||
Value: fmt.Sprint(getint64(res.ContentLength)),
|
||||
@@ -2220,7 +2295,27 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
Key: "x-amz-restore",
|
||||
Value: getstring(res.Restore),
|
||||
},
|
||||
})
|
||||
}
|
||||
if res.ObjectLockMode != "" {
|
||||
headers = append(headers, utils.CustomHeader{
|
||||
Key: "x-amz-object-lock-mode",
|
||||
Value: string(res.ObjectLockMode),
|
||||
})
|
||||
}
|
||||
if res.ObjectLockLegalHoldStatus != "" {
|
||||
headers = append(headers, utils.CustomHeader{
|
||||
Key: "x-amz-object-lock-legal-hold",
|
||||
Value: string(res.ObjectLockLegalHoldStatus),
|
||||
})
|
||||
}
|
||||
if res.ObjectLockRetainUntilDate != nil {
|
||||
retainUntilDate := res.ObjectLockRetainUntilDate.Format(time.RFC3339)
|
||||
headers = append(headers, utils.CustomHeader{
|
||||
Key: "x-amz-object-lock-retain-until-date",
|
||||
Value: retainUntilDate,
|
||||
})
|
||||
}
|
||||
utils.SetResponseHeaders(ctx, headers)
|
||||
|
||||
return SendResponse(ctx, nil,
|
||||
&MetaOpts{
|
||||
|
||||
@@ -188,8 +188,8 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
GetObjectAclFunc: func(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
return &s3.GetObjectAclOutput{}, nil
|
||||
},
|
||||
GetObjectAttributesFunc: func(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
return &s3.GetObjectAttributesOutput{}, nil
|
||||
GetObjectAttributesFunc: func(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
return s3response.GetObjectAttributesResult{}, nil
|
||||
},
|
||||
GetObjectFunc: func(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return &s3.GetObjectOutput{
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -192,6 +193,9 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
cr.chunkDataLeft = 0
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
if (chunkSize + int64(n)) > math.MaxInt {
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
return n + int(chunkSize), err
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -223,24 +224,17 @@ func IsBigDataAction(ctx *fiber.Ctx) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// expiration time window
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationTimeStamp
|
||||
const timeExpirationSec = 15 * 60
|
||||
|
||||
func ValidateDate(date time.Time) error {
|
||||
now := time.Now().UTC()
|
||||
diff := date.Unix() - now.Unix()
|
||||
|
||||
// Checks the dates difference to be less than a minute
|
||||
if diff > 60 {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature not yet current: %s is still later than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
if diff < -60 {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature expired: %s is now earlier than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
// Checks the dates difference to be within allotted window
|
||||
if diff > timeExpirationSec || diff < -timeExpirationSec {
|
||||
return s3err.GetAPIError(s3err.ErrRequestTimeTooSkewed)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -253,3 +247,34 @@ func ParseDeleteObjects(objs []types.ObjectIdentifier) (result []string) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func FilterObjectAttributes(attrs map[types.ObjectAttributes]struct{}, output s3response.GetObjectAttributesResult) s3response.GetObjectAttributesResult {
|
||||
if _, ok := attrs[types.ObjectAttributesEtag]; !ok {
|
||||
output.ETag = nil
|
||||
}
|
||||
if _, ok := attrs[types.ObjectAttributesObjectParts]; !ok {
|
||||
output.ObjectParts = nil
|
||||
}
|
||||
if _, ok := attrs[types.ObjectAttributesObjectSize]; !ok {
|
||||
output.ObjectSize = nil
|
||||
}
|
||||
if _, ok := attrs[types.ObjectAttributesStorageClass]; !ok {
|
||||
output.StorageClass = nil
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func ParseObjectAttributes(ctx *fiber.Ctx) map[types.ObjectAttributes]struct{} {
|
||||
attrs := map[types.ObjectAttributes]struct{}{}
|
||||
ctx.Request().Header.VisitAll(func(key, value []byte) {
|
||||
if string(key) == "X-Amz-Object-Attributes" {
|
||||
oattrs := strings.Split(string(value), ",")
|
||||
for _, a := range oattrs {
|
||||
attrs[types.ObjectAttributes(a)] = struct{}{}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return attrs
|
||||
}
|
||||
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
func TestCreateHttpRequestFromCtx(t *testing.T) {
|
||||
@@ -264,3 +266,58 @@ func TestParseUint(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterObjectAttributes(t *testing.T) {
|
||||
type args struct {
|
||||
attrs map[types.ObjectAttributes]struct{}
|
||||
output s3response.GetObjectAttributesResult
|
||||
}
|
||||
etag, objSize := "etag", int64(3222)
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want s3response.GetObjectAttributesResult
|
||||
}{
|
||||
{
|
||||
name: "keep only ETag",
|
||||
args: args{
|
||||
attrs: map[types.ObjectAttributes]struct{}{
|
||||
types.ObjectAttributesEtag: {},
|
||||
},
|
||||
output: s3response.GetObjectAttributesResult{
|
||||
ObjectSize: &objSize,
|
||||
ETag: &etag,
|
||||
},
|
||||
},
|
||||
want: s3response.GetObjectAttributesResult{ETag: &etag},
|
||||
},
|
||||
{
|
||||
name: "keep multiple props",
|
||||
args: args{
|
||||
attrs: map[types.ObjectAttributes]struct{}{
|
||||
types.ObjectAttributesEtag: {},
|
||||
types.ObjectAttributesObjectSize: {},
|
||||
types.ObjectAttributesStorageClass: {},
|
||||
},
|
||||
output: s3response.GetObjectAttributesResult{
|
||||
ObjectSize: &objSize,
|
||||
ETag: &etag,
|
||||
ObjectParts: &s3response.ObjectParts{},
|
||||
VersionId: &etag,
|
||||
},
|
||||
},
|
||||
want: s3response.GetObjectAttributesResult{
|
||||
ETag: &etag,
|
||||
ObjectSize: &objSize,
|
||||
VersionId: &etag,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := FilterObjectAttributes(tt.args.attrs, tt.args.output); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("FilterObjectAttributes() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,6 +116,10 @@ const (
|
||||
ErrInvalidBucketObjectLockConfiguration
|
||||
ErrObjectLocked
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrRequestTimeTooSkewed
|
||||
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
@@ -430,6 +434,28 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "the retain until date must be in the future",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchBucketPolicy: {
|
||||
Code: "NoSuchBucketPolicy",
|
||||
Description: "The bucket policy does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketTaggingNotFound: {
|
||||
Code: "NoSuchTagSet",
|
||||
Description: "The TagSet does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockInvalidHeaders: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRequestTimeTooSkewed: {
|
||||
Code: "RequestTimeTooSkewed",
|
||||
Description: "The difference between the request time and the server's time is too large.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
// non aws errors
|
||||
ErrExistingObjectIsDirectory: {
|
||||
Code: "ExistingObjectIsDirectory",
|
||||
Description: "Existing Object is a directory.",
|
||||
|
||||
@@ -37,11 +37,11 @@ type EventMeta struct {
|
||||
VersionId *string
|
||||
}
|
||||
|
||||
type EventFields struct {
|
||||
Records []EventSchema
|
||||
type EventSchema struct {
|
||||
Records []EventRecord
|
||||
}
|
||||
|
||||
type EventSchema struct {
|
||||
type EventRecord struct {
|
||||
EventVersion string `json:"eventVersion"`
|
||||
EventSource string `json:"eventSource"`
|
||||
AwsRegion string `json:"awsRegion"`
|
||||
@@ -139,54 +139,54 @@ func InitEventSender(cfg *EventConfig) (S3EventSender, error) {
|
||||
return evSender, err
|
||||
}
|
||||
|
||||
func createEventSchema(ctx *fiber.Ctx, meta EventMeta, configId ConfigurationId) ([]byte, error) {
|
||||
func createEventSchema(ctx *fiber.Ctx, meta EventMeta, configId ConfigurationId) EventSchema {
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
acc := ctx.Locals("account").(auth.Account)
|
||||
|
||||
event := []EventSchema{
|
||||
{
|
||||
EventVersion: "2.2",
|
||||
EventSource: "aws:s3",
|
||||
AwsRegion: ctx.Locals("region").(string),
|
||||
EventTime: time.Now().Format(time.RFC3339),
|
||||
EventName: meta.EventName,
|
||||
UserIdentity: EventUserIdentity{
|
||||
PrincipalId: acc.Access,
|
||||
},
|
||||
RequestParameters: EventRequestParams{
|
||||
SourceIPAddress: ctx.IP(),
|
||||
},
|
||||
ResponseElements: EventResponseElements{
|
||||
RequestId: ctx.Get("X-Amz-Request-Id"),
|
||||
HostId: ctx.Get("X-Amz-Id-2"),
|
||||
},
|
||||
S3: EventS3Data{
|
||||
S3SchemaVersion: "1.0",
|
||||
ConfigurationId: configId,
|
||||
Bucket: EventS3BucketData{
|
||||
Name: bucket,
|
||||
OwnerIdentity: EventUserIdentity{
|
||||
PrincipalId: meta.BucketOwner,
|
||||
return EventSchema{
|
||||
Records: []EventRecord{
|
||||
{
|
||||
EventVersion: "2.2",
|
||||
EventSource: "aws:s3",
|
||||
AwsRegion: ctx.Locals("region").(string),
|
||||
EventTime: time.Now().Format(time.RFC3339),
|
||||
EventName: meta.EventName,
|
||||
UserIdentity: EventUserIdentity{
|
||||
PrincipalId: acc.Access,
|
||||
},
|
||||
RequestParameters: EventRequestParams{
|
||||
SourceIPAddress: ctx.IP(),
|
||||
},
|
||||
ResponseElements: EventResponseElements{
|
||||
RequestId: ctx.Get("X-Amz-Request-Id"),
|
||||
HostId: ctx.Get("X-Amz-Id-2"),
|
||||
},
|
||||
S3: EventS3Data{
|
||||
S3SchemaVersion: "1.0",
|
||||
ConfigurationId: configId,
|
||||
Bucket: EventS3BucketData{
|
||||
Name: bucket,
|
||||
OwnerIdentity: EventUserIdentity{
|
||||
PrincipalId: meta.BucketOwner,
|
||||
},
|
||||
Arn: fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/")),
|
||||
},
|
||||
Object: EventObjectData{
|
||||
Key: object,
|
||||
Size: meta.ObjectSize,
|
||||
ETag: meta.ObjectETag,
|
||||
VersionId: meta.VersionId,
|
||||
Sequencer: genSequencer(),
|
||||
},
|
||||
Arn: fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/")),
|
||||
},
|
||||
Object: EventObjectData{
|
||||
Key: object,
|
||||
Size: meta.ObjectSize,
|
||||
ETag: meta.ObjectETag,
|
||||
VersionId: meta.VersionId,
|
||||
Sequencer: genSequencer(),
|
||||
GlacierEventData: EventGlacierData{
|
||||
// Not supported
|
||||
RestoreEventData: EventRestoreData{},
|
||||
},
|
||||
},
|
||||
GlacierEventData: EventGlacierData{
|
||||
// Not supported
|
||||
RestoreEventData: EventRestoreData{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return json.Marshal(event)
|
||||
}
|
||||
|
||||
func generateTestEvent() ([]byte, error) {
|
||||
|
||||
@@ -25,19 +25,21 @@ import (
|
||||
type EventType string
|
||||
|
||||
const (
|
||||
EventObjectCreated EventType = "s3:ObjectCreated:*" // ObjectCreated
|
||||
EventObjectCreatedPut EventType = "s3:ObjectCreated:Put"
|
||||
EventObjectCreatedPost EventType = "s3:ObjectCreated:Post"
|
||||
EventObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
|
||||
EventCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
|
||||
EventObjectDeleted EventType = "s3:ObjectRemoved:Delete" // ObjectRemoved
|
||||
EventObjectTagging EventType = "s3:ObjectTagging:*" // ObjectTagging
|
||||
EventObjectTaggingPut EventType = "s3:ObjectTagging:Put"
|
||||
EventObjectTaggingDelete EventType = "s3:ObjectTagging:Delete"
|
||||
EventObjectAclPut EventType = "s3:ObjectAcl:Put"
|
||||
EventObjectRestore EventType = "s3:ObjectRestore:*" // ObjectRestore
|
||||
EventObjectRestorePost EventType = "s3:ObjectRestore:Post"
|
||||
EventObjectRestoreCompleted EventType = "s3:ObjectRestore:Completed"
|
||||
EventObjectCreated EventType = "s3:ObjectCreated:*" // ObjectCreated
|
||||
EventObjectCreatedPut EventType = "s3:ObjectCreated:Put"
|
||||
EventObjectCreatedPost EventType = "s3:ObjectCreated:Post"
|
||||
EventObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
|
||||
EventCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
|
||||
EventObjectRemoved EventType = "s3:ObjectRemoved:*"
|
||||
EventObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
|
||||
EventObjectRemovedDeleteObjects EventType = "s3:ObjectRemoved:DeleteObjects" // non AWS custom type for DeleteObjects
|
||||
EventObjectTagging EventType = "s3:ObjectTagging:*" // ObjectTagging
|
||||
EventObjectTaggingPut EventType = "s3:ObjectTagging:Put"
|
||||
EventObjectTaggingDelete EventType = "s3:ObjectTagging:Delete"
|
||||
EventObjectAclPut EventType = "s3:ObjectAcl:Put"
|
||||
EventObjectRestore EventType = "s3:ObjectRestore:*" // ObjectRestore
|
||||
EventObjectRestorePost EventType = "s3:ObjectRestore:Post"
|
||||
EventObjectRestoreCompleted EventType = "s3:ObjectRestore:Completed"
|
||||
// EventObjectRestorePost EventType = "s3:ObjectRestore:Post"
|
||||
// EventObjectRestoreDelete EventType = "s3:ObjectRestore:Delete"
|
||||
)
|
||||
@@ -48,19 +50,21 @@ func (event EventType) IsValid() bool {
|
||||
}
|
||||
|
||||
var supportedEventFilters = map[EventType]struct{}{
|
||||
EventObjectCreated: {},
|
||||
EventObjectCreatedPut: {},
|
||||
EventObjectCreatedPost: {},
|
||||
EventObjectCreatedCopy: {},
|
||||
EventCompleteMultipartUpload: {},
|
||||
EventObjectDeleted: {},
|
||||
EventObjectTagging: {},
|
||||
EventObjectTaggingPut: {},
|
||||
EventObjectTaggingDelete: {},
|
||||
EventObjectAclPut: {},
|
||||
EventObjectRestore: {},
|
||||
EventObjectRestorePost: {},
|
||||
EventObjectRestoreCompleted: {},
|
||||
EventObjectCreated: {},
|
||||
EventObjectCreatedPut: {},
|
||||
EventObjectCreatedPost: {},
|
||||
EventObjectCreatedCopy: {},
|
||||
EventCompleteMultipartUpload: {},
|
||||
EventObjectRemoved: {},
|
||||
EventObjectRemovedDelete: {},
|
||||
EventObjectRemovedDeleteObjects: {},
|
||||
EventObjectTagging: {},
|
||||
EventObjectTaggingPut: {},
|
||||
EventObjectTaggingDelete: {},
|
||||
EventObjectAclPut: {},
|
||||
EventObjectRestore: {},
|
||||
EventObjectRestorePost: {},
|
||||
EventObjectRestoreCompleted: {},
|
||||
}
|
||||
|
||||
type EventFilter map[EventType]bool
|
||||
|
||||
@@ -16,6 +16,8 @@ package s3event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -23,6 +25,7 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/segmentio/kafka-go"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
var sequencer = 0
|
||||
@@ -78,12 +81,29 @@ func (ks *Kafka) SendEvent(ctx *fiber.Ctx, meta EventMeta) {
|
||||
return
|
||||
}
|
||||
|
||||
schema, err := createEventSchema(ctx, meta, ConfigurationIdKafka)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to create kafka event: %v\n", err.Error())
|
||||
if meta.EventName == EventObjectRemovedDeleteObjects {
|
||||
var dObj s3response.DeleteObjects
|
||||
|
||||
if err := xml.Unmarshal(ctx.Body(), &dObj); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse delete objects input payload: %v\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Events aren't send in correct order
|
||||
for _, obj := range dObj.Objects {
|
||||
key := *obj.Key
|
||||
schema := createEventSchema(ctx, meta, ConfigurationIdWebhook)
|
||||
schema.Records[0].S3.Object.Key = key
|
||||
schema.Records[0].S3.Object.VersionId = obj.VersionId
|
||||
|
||||
go ks.send(schema)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
schema := createEventSchema(ctx, meta, ConfigurationIdWebhook)
|
||||
|
||||
go ks.send(schema)
|
||||
}
|
||||
|
||||
@@ -91,14 +111,20 @@ func (ks *Kafka) Close() error {
|
||||
return ks.writer.Close()
|
||||
}
|
||||
|
||||
func (ks *Kafka) send(event []byte) {
|
||||
func (ks *Kafka) send(event EventSchema) {
|
||||
eventBytes, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse event data: %v\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
message := kafka.Message{
|
||||
Key: []byte(ks.key),
|
||||
Value: event,
|
||||
Value: eventBytes,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err := ks.writer.WriteMessages(ctx, message)
|
||||
err = ks.writer.WriteMessages(ctx, message)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to send kafka event: %v\n", err.Error())
|
||||
}
|
||||
|
||||
@@ -15,12 +15,15 @@
|
||||
package s3event
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type NatsEventSender struct {
|
||||
@@ -65,12 +68,29 @@ func (ns *NatsEventSender) SendEvent(ctx *fiber.Ctx, meta EventMeta) {
|
||||
return
|
||||
}
|
||||
|
||||
schema, err := createEventSchema(ctx, meta, ConfigurationIdNats)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to create nats event: %v\n", err.Error())
|
||||
if meta.EventName == EventObjectRemovedDeleteObjects {
|
||||
var dObj s3response.DeleteObjects
|
||||
|
||||
if err := xml.Unmarshal(ctx.Body(), &dObj); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse delete objects input payload: %v\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Events aren't send in correct order
|
||||
for _, obj := range dObj.Objects {
|
||||
key := *obj.Key
|
||||
schema := createEventSchema(ctx, meta, ConfigurationIdWebhook)
|
||||
schema.Records[0].S3.Object.Key = key
|
||||
schema.Records[0].S3.Object.VersionId = obj.VersionId
|
||||
|
||||
go ns.send(schema)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
schema := createEventSchema(ctx, meta, ConfigurationIdWebhook)
|
||||
|
||||
go ns.send(schema)
|
||||
}
|
||||
|
||||
@@ -79,8 +99,13 @@ func (ns *NatsEventSender) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *NatsEventSender) send(event []byte) {
|
||||
err := ns.client.Publish(ns.topic, event)
|
||||
func (ns *NatsEventSender) send(event EventSchema) {
|
||||
eventBytes, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse event data: %v\n", err.Error())
|
||||
return
|
||||
}
|
||||
err = ns.client.Publish(ns.topic, eventBytes)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to send nats event: %v\n", err.Error())
|
||||
}
|
||||
|
||||
@@ -16,6 +16,8 @@ package s3event
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type Webhook struct {
|
||||
@@ -77,12 +80,29 @@ func (w *Webhook) SendEvent(ctx *fiber.Ctx, meta EventMeta) {
|
||||
return
|
||||
}
|
||||
|
||||
schema, err := createEventSchema(ctx, meta, ConfigurationIdWebhook)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to create webhook event: %v\n", err.Error())
|
||||
if meta.EventName == EventObjectRemovedDeleteObjects {
|
||||
var dObj s3response.DeleteObjects
|
||||
|
||||
if err := xml.Unmarshal(ctx.Body(), &dObj); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse delete objects input payload: %v\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Events aren't send in correct order
|
||||
for _, obj := range dObj.Objects {
|
||||
key := *obj.Key
|
||||
schema := createEventSchema(ctx, meta, ConfigurationIdWebhook)
|
||||
schema.Records[0].S3.Object.Key = key
|
||||
schema.Records[0].S3.Object.VersionId = obj.VersionId
|
||||
|
||||
go w.send(schema)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
schema := createEventSchema(ctx, meta, ConfigurationIdWebhook)
|
||||
|
||||
go w.send(schema)
|
||||
}
|
||||
|
||||
@@ -90,8 +110,14 @@ func (w *Webhook) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Webhook) send(event []byte) {
|
||||
req, err := http.NewRequest(http.MethodPost, w.url, bytes.NewReader(event))
|
||||
func (w *Webhook) send(event EventSchema) {
|
||||
eventBytes, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse event data: %v\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, w.url, bytes.NewReader(eventBytes))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to create webhook event request: %v\n", err.Error())
|
||||
return
|
||||
|
||||
@@ -52,6 +52,23 @@ type ListPartsResult struct {
|
||||
Parts []Part `xml:"Part"`
|
||||
}
|
||||
|
||||
type GetObjectAttributesResult struct {
|
||||
ETag *string
|
||||
LastModified *time.Time
|
||||
ObjectSize *int64
|
||||
StorageClass *types.StorageClass
|
||||
VersionId *string
|
||||
ObjectParts *ObjectParts
|
||||
}
|
||||
|
||||
type ObjectParts struct {
|
||||
PartNumberMarker int
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
IsTruncated bool
|
||||
Parts []types.ObjectPart `xml:"Part"`
|
||||
}
|
||||
|
||||
// ListMultipartUploadsResponse - s3 api list multipart uploads response.
|
||||
type ListMultipartUploadsResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
|
||||
|
||||
15
tests/commands/abort_multipart_upload.sh
Normal file
15
tests/commands/abort_multipart_upload.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
abort_multipart_upload() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "command to run abort requires bucket, key, upload ID"
|
||||
return 1
|
||||
fi
|
||||
|
||||
error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3") || local aborted=$?
|
||||
if [[ $aborted -ne 0 ]]; then
|
||||
echo "Error aborting upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
28
tests/commands/copy_object.sh
Normal file
28
tests/commands/copy_object.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
copy_object() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "copy object command requires command type, source, bucket, key"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 cp "$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api copy-object --copy-source "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate cp "s3://$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure cp "$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "'copy-object' not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
log 5 "copy object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error copying object to bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
31
tests/commands/create_bucket.sh
Normal file
31
tests/commands/create_bucket.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# create an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
create_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "create bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api create-bucket --bucket "$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error creating bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
35
tests/commands/delete_bucket.sh
Normal file
35
tests/commands/delete_bucket.sh
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# delete an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "delete bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-bucket --bucket "$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure rb "$MC_ALIAS/$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "Invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "error deleting bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
23
tests/commands/delete_bucket_policy.sh
Normal file
23
tests/commands/delete_bucket_policy.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_bucket_policy() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "delete bucket policy command requires command type, bucket"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2") || delete_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate delpolicy "s3://$2") || delete_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure anonymous set none "$MC_ALIAS/$2") || delete_result=$?
|
||||
else
|
||||
echo "command 'get bucket policy' not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "error deleting bucket policy: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
27
tests/commands/delete_object.sh
Normal file
27
tests/commands/delete_object.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_object() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "delete object command requires command type, bucket, key"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure rm "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error deleting object: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
21
tests/commands/delete_object_tagging.sh
Normal file
21
tests/commands/delete_object_tagging.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_object_tagging() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "delete object tagging command missing command type, bucket, key"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-object-tagging --bucket "$2" --key "$3" 2>&1) || delete_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure tag remove "$MC_ALIAS/$2/$3") || delete_result=$?
|
||||
else
|
||||
echo "delete-object-tagging command not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "error deleting object tagging: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
68
tests/commands/get_bucket_location.sh
Normal file
68
tests/commands/get_bucket_location.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_bucket_location() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "get bucket location command requires command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
get_bucket_location_aws "$2" || get_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
get_bucket_location_s3cmd "$2" || get_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
get_bucket_location_mc "$2" || get_result=$?
|
||||
else
|
||||
echo "command type '$1' not implemented for get_bucket_location"
|
||||
return 1
|
||||
fi
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
location=$(echo "$location_json" | jq -r '.LocationConstraint')
|
||||
export location
|
||||
}
|
||||
|
||||
get_bucket_location_aws() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "get bucket location (aws) requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
location_json=$(aws --no-verify-ssl s3api get-bucket-location --bucket "$1") || location_result=$?
|
||||
if [[ $location_result -ne 0 ]]; then
|
||||
echo "error getting bucket location: $location"
|
||||
return 1
|
||||
fi
|
||||
bucket_location=$(echo "$location_json" | jq -r '.LocationConstraint')
|
||||
export bucket_location
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_location_s3cmd() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "get bucket location (s3cmd) requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
info=$(s3cmd --no-check-certificate info "s3://$1") || results=$?
|
||||
if [[ $results -ne 0 ]]; then
|
||||
echo "error getting s3cmd info: $info"
|
||||
return 1
|
||||
fi
|
||||
bucket_location=$(echo "$info" | grep -o 'Location:.*' | awk '{print $2}')
|
||||
export bucket_location
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_location_mc() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "get bucket location (mc) requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
info=$(mc --insecure stat "$MC_ALIAS/$1") || results=$?
|
||||
if [[ $results -ne 0 ]]; then
|
||||
echo "error getting s3cmd info: $info"
|
||||
return 1
|
||||
fi
|
||||
bucket_location=$(echo "$info" | grep -o 'Location:.*' | awk '{print $2}')
|
||||
export bucket_location
|
||||
return 0
|
||||
}
|
||||
97
tests/commands/get_bucket_policy.sh
Normal file
97
tests/commands/get_bucket_policy.sh
Normal file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_bucket_policy() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "get bucket policy command requires command type, bucket"
|
||||
return 1
|
||||
fi
|
||||
local get_bucket_policy_result=0
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
get_bucket_policy_aws "$2" || get_bucket_policy_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
get_bucket_policy_s3cmd "$2" || get_bucket_policy_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
get_bucket_policy_mc "$2" || get_bucket_policy_result=$?
|
||||
else
|
||||
echo "command 'get bucket policy' not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
if [[ $get_bucket_policy_result -ne 0 ]]; then
|
||||
echo "error getting policy: $bucket_policy"
|
||||
return 1
|
||||
fi
|
||||
export bucket_policy
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_policy_aws() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "aws 'get bucket policy' command requires bucket"
|
||||
return 1
|
||||
fi
|
||||
policy_json=$(aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1) || get_result=$?
|
||||
if [[ $policy_json == *"InsecureRequestWarning"* ]]; then
|
||||
policy_json=$(awk 'NR>2' <<< "$policy_json")
|
||||
fi
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
if [[ "$policy_json" == *"(NoSuchBucketPolicy)"* ]]; then
|
||||
bucket_policy=
|
||||
else
|
||||
echo "error getting policy: $policy_json"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
bucket_policy=$(echo "{$policy_json}" | jq -r '.Policy')
|
||||
fi
|
||||
export bucket_policy
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_policy_s3cmd() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "s3cmd 'get bucket policy' command requires bucket"
|
||||
return 1
|
||||
fi
|
||||
|
||||
info=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$1") || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "error getting bucket policy: $info"
|
||||
return 1
|
||||
fi
|
||||
|
||||
bucket_policy=""
|
||||
policy_brackets=false
|
||||
while IFS= read -r line; do
|
||||
if [[ $policy_brackets == false ]]; then
|
||||
policy_line=$(echo "$line" | grep 'Policy: ')
|
||||
if [[ $policy_line != "" ]]; then
|
||||
if [[ $policy_line != *'{' ]]; then
|
||||
break
|
||||
fi
|
||||
policy_brackets=true
|
||||
bucket_policy+="{"
|
||||
fi
|
||||
else
|
||||
bucket_policy+=$line
|
||||
if [[ $line == "" ]]; then
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done <<< "$info"
|
||||
export bucket_policy
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_policy_mc() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "aws 'get bucket policy' command requires bucket"
|
||||
return 1
|
||||
fi
|
||||
bucket_policy=$(mc --insecure anonymous get-json "$MC_ALIAS/$1") || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "error getting policy: $bucket_policy"
|
||||
return 1
|
||||
fi
|
||||
export bucket_policy
|
||||
return 0
|
||||
}
|
||||
31
tests/commands/get_bucket_tagging.sh
Normal file
31
tests/commands/get_bucket_tagging.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# get bucket tags
|
||||
# params: bucket
|
||||
# export 'tags' on success, return 1 for error
|
||||
get_bucket_tagging() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "get bucket tag command missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
local result
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tags=$(aws --no-verify-ssl s3api get-bucket-tagging --bucket "$2" 2>&1) || result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
tags=$(mc --insecure tag list "$MC_ALIAS"/"$2" 2>&1) || result=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
log 5 "Tags: $tags"
|
||||
tags=$(echo "$tags" | grep -v "InsecureRequestWarning")
|
||||
if [[ $result -ne 0 ]]; then
|
||||
if [[ $tags =~ "No tags found" ]] || [[ $tags =~ "The TagSet does not exist" ]]; then
|
||||
export tags=
|
||||
return 0
|
||||
fi
|
||||
echo "error getting bucket tags: $tags"
|
||||
return 1
|
||||
fi
|
||||
export tags
|
||||
}
|
||||
28
tests/commands/get_object.sh
Normal file
28
tests/commands/get_object.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_object() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "get object command requires command type, bucket, key, destination"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mv "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "'get object' command not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
log 5 "get object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error putting object into bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
25
tests/commands/head_bucket.sh
Normal file
25
tests/commands/head_bucket.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
head_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "head bucket command missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]] || [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
|
||||
bucket_info=$(aws --no-verify-ssl s3api head-bucket --bucket "$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
bucket_info=$(s3cmd --no-check-certificate info "s3://$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
bucket_info=$(mc --insecure stat "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error getting bucket info: $bucket_info"
|
||||
return 1
|
||||
fi
|
||||
export bucket_info
|
||||
return 0
|
||||
}
|
||||
29
tests/commands/head_object.sh
Normal file
29
tests/commands/head_object.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
head_object() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "head-object missing command, bucket name, object name"
|
||||
return 2
|
||||
fi
|
||||
local exit_code=0
|
||||
local error=""
|
||||
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api head-object --bucket "$2" --key "$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info s3://"$2/$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure stat "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 2
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == *"404"* ]] || [[ "$error" == *"does not exist"* ]]; then
|
||||
return 1
|
||||
else
|
||||
echo "error checking if object exists: $error"
|
||||
return 2
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
61
tests/commands/list_buckets.sh
Normal file
61
tests/commands/list_buckets.sh
Normal file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
list_buckets() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "list buckets command missing command type"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
buckets=$(aws --no-verify-ssl s3 ls 2>&1 s3://) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
list_buckets_s3api || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
buckets=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3:// 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
buckets=$(mc --insecure ls "$MC_ALIAS" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "list buckets command not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing buckets: $buckets"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
bucket_array=()
|
||||
while IFS= read -r line; do
|
||||
bucket_name=$(echo "$line" | awk '{print $NF}')
|
||||
bucket_array+=("${bucket_name%/}")
|
||||
done <<< "$buckets"
|
||||
export bucket_array
|
||||
return 0
|
||||
}
|
||||
|
||||
list_buckets_s3api() {
|
||||
output=$(aws --no-verify-ssl s3api list-buckets 2>&1) || exit_code=$?
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
echo "error listing buckets: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
modified_output=""
|
||||
while IFS= read -r line; do
|
||||
if [[ $line != *InsecureRequestWarning* ]]; then
|
||||
modified_output+="$line"
|
||||
fi
|
||||
done <<< "$output"
|
||||
|
||||
bucket_array=()
|
||||
names=$(jq -r '.Buckets[].Name' <<<"$modified_output")
|
||||
IFS=$'\n' read -rd '' -a bucket_array <<<"$names"
|
||||
|
||||
export bucket_array
|
||||
return 0
|
||||
}
|
||||
65
tests/commands/list_objects.sh
Normal file
65
tests/commands/list_objects.sh
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
list_objects() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "list objects command requires command type, and bucket or folder"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local output
|
||||
if [[ $1 == "aws" ]] || [[ $1 == 's3' ]]; then
|
||||
output=$(aws --no-verify-ssl s3 ls s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]]; then
|
||||
list_objects_s3api "$2" || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
output=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
output=$(mc --insecure ls "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing objects: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ $1 == 's3api' ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
object_array=()
|
||||
while IFS= read -r line; do
|
||||
if [[ $line != *InsecureRequestWarning* ]]; then
|
||||
object_name=$(echo "$line" | awk '{print $NF}')
|
||||
object_array+=("$object_name")
|
||||
fi
|
||||
done <<< "$output"
|
||||
|
||||
export object_array
|
||||
}
|
||||
|
||||
list_objects_s3api() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "list objects s3api command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
output=$(aws --no-verify-ssl s3api list-objects --bucket "$1" 2>&1) || local exit_code=$?
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
echo "error listing objects: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
modified_output=""
|
||||
while IFS= read -r line; do
|
||||
if [[ $line != *InsecureRequestWarning* ]]; then
|
||||
modified_output+="$line"
|
||||
fi
|
||||
done <<< "$output"
|
||||
|
||||
object_array=()
|
||||
keys=$(jq -r '.Contents[].Key' <<<"$modified_output")
|
||||
IFS=$'\n' read -rd '' -a object_array <<<"$keys"
|
||||
|
||||
export object_array
|
||||
}
|
||||
23
tests/commands/put_bucket_policy.sh
Normal file
23
tests/commands/put_bucket_policy.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_bucket_policy() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "get bucket policy command requires command type, bucket, policy file"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
policy=$(aws --no-verify-ssl s3api put-bucket-policy --bucket "$2" --policy "file://$3") || get_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
policy=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate setpolicy "$3" "s3://$2") || get_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
policy=$(mc --insecure anonymous set-json "$3" "$MC_ALIAS/$2")
|
||||
else
|
||||
echo "command 'put bucket policy' not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "error putting policy: $policy"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
28
tests/commands/put_object.sh
Normal file
28
tests/commands/put_object.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_object() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "put object command requires command type, source, destination bucket, destination key"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mv "$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api put-object --body "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate put "$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure put "$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "'put object' command not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
log 5 "put object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error putting object into bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -54,6 +54,7 @@ func TestCreateBucket(s *S3Conf) {
|
||||
CreateBucket_default_acl(s)
|
||||
CreateBucket_non_default_acl(s)
|
||||
CreateDeleteBucket_success(s)
|
||||
CreateBucket_default_object_lock(s)
|
||||
}
|
||||
|
||||
func TestHeadBucket(s *S3Conf) {
|
||||
@@ -81,6 +82,7 @@ func TestPutBucketTagging(s *S3Conf) {
|
||||
|
||||
func TestGetBucketTagging(s *S3Conf) {
|
||||
GetBucketTagging_non_existing_bucket(s)
|
||||
GetBucketTagging_unset_tags(s)
|
||||
GetBucketTagging_success(s)
|
||||
}
|
||||
|
||||
@@ -94,6 +96,8 @@ func TestPutObject(s *S3Conf) {
|
||||
PutObject_non_existing_bucket(s)
|
||||
PutObject_special_chars(s)
|
||||
PutObject_invalid_long_tags(s)
|
||||
PutObject_missing_object_lock_retention_config(s)
|
||||
PutObject_with_object_lock(s)
|
||||
PutObject_success(s)
|
||||
PutObject_invalid_credentials(s)
|
||||
}
|
||||
@@ -103,6 +107,14 @@ func TestHeadObject(s *S3Conf) {
|
||||
HeadObject_success(s)
|
||||
}
|
||||
|
||||
func TestGetObjectAttributes(s *S3Conf) {
|
||||
GetObjectAttributes_non_existing_bucket(s)
|
||||
GetObjectAttributes_non_existing_object(s)
|
||||
GetObjectAttributes_existing_object(s)
|
||||
GetObjectAttributes_multipart_upload(s)
|
||||
GetObjectAttributes_multipart_upload_truncated(s)
|
||||
}
|
||||
|
||||
func TestGetObject(s *S3Conf) {
|
||||
GetObject_non_existing_key(s)
|
||||
GetObject_invalid_ranges(s)
|
||||
@@ -157,6 +169,7 @@ func TestPutObjectTagging(s *S3Conf) {
|
||||
|
||||
func TestGetObjectTagging(s *S3Conf) {
|
||||
GetObjectTagging_non_existing_object(s)
|
||||
GetObjectTagging_unset_tags(s)
|
||||
GetObjectTagging_success(s)
|
||||
}
|
||||
|
||||
@@ -267,7 +280,7 @@ func TestPutBucketPolicy(s *S3Conf) {
|
||||
|
||||
func TestGetBucketPolicy(s *S3Conf) {
|
||||
GetBucketPolicy_non_existing_bucket(s)
|
||||
GetBucketPolicy_default_empty_policy(s)
|
||||
GetBucketPolicy_not_set(s)
|
||||
GetBucketPolicy_success(s)
|
||||
}
|
||||
|
||||
@@ -344,6 +357,7 @@ func TestFullFlow(s *S3Conf) {
|
||||
TestDeleteBucketTagging(s)
|
||||
TestPutObject(s)
|
||||
TestHeadObject(s)
|
||||
TestGetObjectAttributes(s)
|
||||
TestGetObject(s)
|
||||
TestListObjects(s)
|
||||
TestListObjectsV2(s)
|
||||
@@ -440,6 +454,8 @@ func GetIntTests() IntTests {
|
||||
"PresignedAuth_expired_request": PresignedAuth_expired_request,
|
||||
"PresignedAuth_incorrect_secret_key": PresignedAuth_incorrect_secret_key,
|
||||
"PresignedAuth_PutObject_success": PresignedAuth_PutObject_success,
|
||||
"PutObject_missing_object_lock_retention_config": PutObject_missing_object_lock_retention_config,
|
||||
"PutObject_with_object_lock": PutObject_with_object_lock,
|
||||
"PresignedAuth_Put_GetObject_with_data": PresignedAuth_Put_GetObject_with_data,
|
||||
"PresignedAuth_Put_GetObject_with_UTF8_chars": PresignedAuth_Put_GetObject_with_UTF8_chars,
|
||||
"PresignedAuth_UploadPart": PresignedAuth_UploadPart,
|
||||
@@ -449,6 +465,7 @@ func GetIntTests() IntTests {
|
||||
"CreateDeleteBucket_success": CreateDeleteBucket_success,
|
||||
"CreateBucket_default_acl": CreateBucket_default_acl,
|
||||
"CreateBucket_non_default_acl": CreateBucket_non_default_acl,
|
||||
"CreateBucket_default_object_lock": CreateBucket_default_object_lock,
|
||||
"HeadBucket_non_existing_bucket": HeadBucket_non_existing_bucket,
|
||||
"HeadBucket_success": HeadBucket_success,
|
||||
"ListBuckets_as_user": ListBuckets_as_user,
|
||||
@@ -461,6 +478,7 @@ func GetIntTests() IntTests {
|
||||
"PutBucketTagging_long_tags": PutBucketTagging_long_tags,
|
||||
"PutBucketTagging_success": PutBucketTagging_success,
|
||||
"GetBucketTagging_non_existing_bucket": GetBucketTagging_non_existing_bucket,
|
||||
"GetBucketTagging_unset_tags": GetBucketTagging_unset_tags,
|
||||
"GetBucketTagging_success": GetBucketTagging_success,
|
||||
"DeleteBucketTagging_non_existing_object": DeleteBucketTagging_non_existing_object,
|
||||
"DeleteBucketTagging_success_status": DeleteBucketTagging_success_status,
|
||||
@@ -471,6 +489,11 @@ func GetIntTests() IntTests {
|
||||
"PutObject_success": PutObject_success,
|
||||
"HeadObject_non_existing_object": HeadObject_non_existing_object,
|
||||
"HeadObject_success": HeadObject_success,
|
||||
"GetObjectAttributes_non_existing_bucket": GetObjectAttributes_non_existing_bucket,
|
||||
"GetObjectAttributes_non_existing_object": GetObjectAttributes_non_existing_object,
|
||||
"GetObjectAttributes_existing_object": GetObjectAttributes_existing_object,
|
||||
"GetObjectAttributes_multipart_upload": GetObjectAttributes_multipart_upload,
|
||||
"GetObjectAttributes_multipart_upload_truncated": GetObjectAttributes_multipart_upload_truncated,
|
||||
"GetObject_non_existing_key": GetObject_non_existing_key,
|
||||
"GetObject_invalid_ranges": GetObject_invalid_ranges,
|
||||
"GetObject_with_meta": GetObject_with_meta,
|
||||
@@ -503,6 +526,7 @@ func GetIntTests() IntTests {
|
||||
"PutObjectTagging_long_tags": PutObjectTagging_long_tags,
|
||||
"PutObjectTagging_success": PutObjectTagging_success,
|
||||
"GetObjectTagging_non_existing_object": GetObjectTagging_non_existing_object,
|
||||
"GetObjectTagging_unset_tags": GetObjectTagging_unset_tags,
|
||||
"GetObjectTagging_success": GetObjectTagging_success,
|
||||
"DeleteObjectTagging_non_existing_object": DeleteObjectTagging_non_existing_object,
|
||||
"DeleteObjectTagging_success_status": DeleteObjectTagging_success_status,
|
||||
@@ -577,7 +601,7 @@ func GetIntTests() IntTests {
|
||||
"PutBucketPolicy_bucket_action_on_object_resource": PutBucketPolicy_bucket_action_on_object_resource,
|
||||
"PutBucketPolicy_success": PutBucketPolicy_success,
|
||||
"GetBucketPolicy_non_existing_bucket": GetBucketPolicy_non_existing_bucket,
|
||||
"GetBucketPolicy_default_empty_policy": GetBucketPolicy_default_empty_policy,
|
||||
"GetBucketPolicy_not_set": GetBucketPolicy_not_set,
|
||||
"GetBucketPolicy_success": GetBucketPolicy_success,
|
||||
"DeleteBucketPolicy_non_existing_bucket": DeleteBucketPolicy_non_existing_bucket,
|
||||
"DeleteBucketPolicy_remove_before_setting": DeleteBucketPolicy_remove_before_setting,
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
@@ -337,11 +339,8 @@ func Authentication_credentials_future_date(s *S3Conf) error {
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
return fmt.Errorf("expected response status code to be %v, instead got %v", http.StatusForbidden, resp.StatusCode)
|
||||
}
|
||||
if errResp.Code != "SignatureDoesNotMatch" {
|
||||
return fmt.Errorf("expected error code to be %v, instead got %v", "SignatureDoesNotMatch", errResp.Code)
|
||||
}
|
||||
if !strings.Contains(errResp.Message, "Signature not yet current:") {
|
||||
return fmt.Errorf("expected future date error message, instead got %v", errResp.Message)
|
||||
if errResp.Code != "RequestTimeTooSkewed" {
|
||||
return fmt.Errorf("expected error code to be %v, instead got %v", "RequestTimeTooSkewed", errResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -381,11 +380,8 @@ func Authentication_credentials_past_date(s *S3Conf) error {
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
return fmt.Errorf("expected response status code to be %v, instead got %v", http.StatusForbidden, resp.StatusCode)
|
||||
}
|
||||
if errResp.Code != "SignatureDoesNotMatch" {
|
||||
return fmt.Errorf("expected error code to be %v, instead got %v", "SignatureDoesNotMatch", errResp.Code)
|
||||
}
|
||||
if !strings.Contains(errResp.Message, "Signature expired:") {
|
||||
return fmt.Errorf("expected past date error message, instead got %v", errResp.Message)
|
||||
if errResp.Code != "RequestTimeTooSkewed" {
|
||||
return fmt.Errorf("expected error code to be %v, instead got %v", "RequestTimeTooSkewed", errResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1788,6 +1784,51 @@ func CreateBucket_non_default_acl(s *S3Conf) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateBucket_default_object_lock(s *S3Conf) error {
|
||||
testName := "CreateBucket_default_object_lock"
|
||||
runF(testName)
|
||||
|
||||
bucket := getBucketName()
|
||||
lockEnabled := true
|
||||
|
||||
client := s3.NewFromConfig(s.Config())
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := client.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ObjectLockEnabledForBucket: &lockEnabled,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
failF("%v: %v", err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
resp, err := client.GetObjectLockConfiguration(ctx, &s3.GetObjectLockConfigurationInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
failF("%v: %v", err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
if resp.ObjectLockConfiguration.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
failF("%v: expected object lock to be enabled", testName)
|
||||
return fmt.Errorf("%v: expected object lock to be enabled", testName)
|
||||
}
|
||||
|
||||
err = teardown(s, bucket)
|
||||
if err != nil {
|
||||
failF("%v: %v", err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
passF(testName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func HeadBucket_non_existing_bucket(s *S3Conf) error {
|
||||
testName := "HeadBucket_non_existing_bucket"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -2187,6 +2228,21 @@ func GetBucketTagging_non_existing_bucket(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func GetBucketTagging_unset_tags(s *S3Conf) error {
|
||||
testName := "GetBucketTagging_unset_tags"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetBucketTagging_success(s *S3Conf) error {
|
||||
testName := "GetBucketTagging_success"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -2375,6 +2431,107 @@ func PutObject_invalid_long_tags(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func PutObject_missing_object_lock_retention_config(s *S3Conf) error {
|
||||
testName := "PutObject_missing_object_lock_retention_config"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
key := "my-obj"
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
ObjectLockMode: types.ObjectLockModeCompliance,
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
retainDate := time.Now().Add(time.Hour * 48)
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
ObjectLockRetainUntilDate: &retainDate,
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func PutObject_with_object_lock(s *S3Conf) error {
|
||||
testName := "PutObject_with_object_lock"
|
||||
runF(testName)
|
||||
bucket, obj, lockStatus := getBucketName(), "my-obj", true
|
||||
|
||||
client := s3.NewFromConfig(s.Config())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := client.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ObjectLockEnabledForBucket: &lockStatus,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
retainDate := time.Now().Add(time.Hour * 48)
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
ObjectLockLegalHoldStatus: types.ObjectLockLegalHoldStatusOn,
|
||||
ObjectLockMode: types.ObjectLockModeCompliance,
|
||||
ObjectLockRetainUntilDate: &retainDate,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
if out.ObjectLockMode != types.ObjectLockModeCompliance {
|
||||
failF("%v: expected object lock mode to be %v, instead got %v", testName, types.ObjectLockModeCompliance, out.ObjectLockMode)
|
||||
return fmt.Errorf("%v: expected object lock mode to be %v, instead got %v", testName, types.ObjectLockModeCompliance, out.ObjectLockMode)
|
||||
}
|
||||
if out.ObjectLockLegalHoldStatus != types.ObjectLockLegalHoldStatusOn {
|
||||
failF("%v: expected object lock mode to be %v, instead got %v", testName, types.ObjectLockLegalHoldStatusOn, out.ObjectLockLegalHoldStatus)
|
||||
return fmt.Errorf("%v: expected object lock mode to be %v, instead got %v", testName, types.ObjectLockLegalHoldStatusOn, out.ObjectLockLegalHoldStatus)
|
||||
}
|
||||
|
||||
if err := changeBucketObjectLockStatus(client, bucket, false); err != nil {
|
||||
failF("%v: %v", err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
err = teardown(s, bucket)
|
||||
if err != nil {
|
||||
failF("%v: %v", err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
passF(testName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func PutObject_success(s *S3Conf) error {
|
||||
testName := "PutObject_success"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -2422,7 +2579,11 @@ func HeadObject_success(s *S3Conf) error {
|
||||
"key2": "val2",
|
||||
}
|
||||
|
||||
_, _, err := putObjectWithData(dataLen, &s3.PutObjectInput{Bucket: &bucket, Key: &obj, Metadata: meta}, s3client)
|
||||
_, _, err := putObjectWithData(dataLen, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
Metadata: meta,
|
||||
}, s3client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2452,6 +2613,247 @@ func HeadObject_success(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func GetObjectAttributes_non_existing_bucket(s *S3Conf) error {
|
||||
testName := "GetObjectAttributes_non_existing_bucket"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: getPtr(getBucketName()),
|
||||
Key: getPtr("my-obj"),
|
||||
ObjectAttributes: []types.ObjectAttributes{},
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucket)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetObjectAttributes_non_existing_object(s *S3Conf) error {
|
||||
testName := "GetObjectAttributes_non_existing_object"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: &bucket,
|
||||
Key: getPtr("my-obj"),
|
||||
ObjectAttributes: []types.ObjectAttributes{},
|
||||
})
|
||||
cancel()
|
||||
if err := checkSdkApiErr(err, "NoSuchKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetObjectAttributes_existing_object(s *S3Conf) error {
|
||||
testName := "GetObjectAttributes_existing_object"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj, data_len := "my-obj", int64(45679)
|
||||
data := make([]byte, data_len)
|
||||
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
resp, err := s3client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
Body: bytes.NewReader(data),
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
ObjectAttributes: []types.ObjectAttributes{
|
||||
types.ObjectAttributesEtag,
|
||||
types.ObjectAttributesObjectSize,
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.ETag == nil || out.ETag == nil {
|
||||
return fmt.Errorf("nil ETag output")
|
||||
}
|
||||
if *resp.ETag != *out.ETag {
|
||||
return fmt.Errorf("expected ETag to be %v, instead got %v", *resp.ETag, *out.ETag)
|
||||
}
|
||||
if out.ObjectSize == nil {
|
||||
return fmt.Errorf("nil object size output")
|
||||
}
|
||||
if *out.ObjectSize != data_len {
|
||||
return fmt.Errorf("expected object size to be %v, instead got %v", data_len, *out.ObjectSize)
|
||||
}
|
||||
if out.Checksum != nil {
|
||||
return fmt.Errorf("expected checksum do be nil, instead got %v", *out.Checksum)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetObjectAttributes_multipart_upload(s *S3Conf) error {
|
||||
testName := "GetObjectAttributes_multipart_upload"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parts, err := uploadParts(s3client, 5*1024*1024, 5, bucket, obj, *out.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
resp, err := s3client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
ObjectAttributes: []types.ObjectAttributes{
|
||||
types.ObjectAttributesObjectParts,
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.ObjectParts == nil {
|
||||
return fmt.Errorf("expected non nil object parts")
|
||||
}
|
||||
|
||||
for i, p := range resp.ObjectParts.Parts {
|
||||
if *p.PartNumber != *parts[i].PartNumber {
|
||||
return fmt.Errorf("expected part number to be %v, instead got %v", *parts[i].PartNumber, *p.PartNumber)
|
||||
}
|
||||
if *p.Size != *parts[i].Size {
|
||||
return fmt.Errorf("expected part size to be %v, instead got %v", *parts[i].Size, *p.Size)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetObjectAttributes_multipart_upload_truncated(s *S3Conf) error {
|
||||
testName := "GetObjectAttributes_multipart_upload_truncated"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parts, err := uploadParts(s3client, 5*1024*1024, 5, bucket, obj, *out.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maxParts := int32(3)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
resp, err := s3client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
ObjectAttributes: []types.ObjectAttributes{
|
||||
types.ObjectAttributesObjectParts,
|
||||
},
|
||||
MaxParts: &maxParts,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.ObjectParts == nil {
|
||||
return fmt.Errorf("expected non nil object parts")
|
||||
}
|
||||
if resp.ObjectParts.IsTruncated == nil {
|
||||
return fmt.Errorf("expected non nil isTruncated")
|
||||
}
|
||||
if !*resp.ObjectParts.IsTruncated {
|
||||
return fmt.Errorf("expected object parts to be truncated")
|
||||
}
|
||||
if resp.ObjectParts.MaxParts == nil {
|
||||
return fmt.Errorf("expected non nil max-parts")
|
||||
}
|
||||
if *resp.ObjectParts.MaxParts != maxParts {
|
||||
return fmt.Errorf("expected max-parts to be %v, instead got %v", maxParts, *resp.ObjectParts.MaxParts)
|
||||
}
|
||||
if resp.ObjectParts.NextPartNumberMarker == nil {
|
||||
return fmt.Errorf("expected non nil NextPartNumberMarker")
|
||||
}
|
||||
if *resp.ObjectParts.NextPartNumberMarker != fmt.Sprint(*parts[2].PartNumber) {
|
||||
return fmt.Errorf("expected NextPartNumberMarker to be %v, instead got %v", fmt.Sprint(*parts[2].PartNumber), *resp.ObjectParts.NextPartNumberMarker)
|
||||
}
|
||||
if len(resp.ObjectParts.Parts) != int(maxParts) {
|
||||
return fmt.Errorf("expected length of parts to be %v, instead got %v", maxParts, len(resp.ObjectParts.Parts))
|
||||
}
|
||||
|
||||
for i, p := range resp.ObjectParts.Parts {
|
||||
if *p.PartNumber != *parts[i].PartNumber {
|
||||
return fmt.Errorf("expected part number to be %v, instead got %v", *parts[i].PartNumber, *p.PartNumber)
|
||||
}
|
||||
if *p.Size != *parts[i].Size {
|
||||
return fmt.Errorf("expected part size to be %v, instead got %v", *parts[i].Size, *p.Size)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
resp, err = s3client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
ObjectAttributes: []types.ObjectAttributes{
|
||||
types.ObjectAttributesObjectParts,
|
||||
},
|
||||
PartNumberMarker: resp.ObjectParts.NextPartNumberMarker,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.ObjectParts == nil {
|
||||
return fmt.Errorf("expected non nil object parts")
|
||||
}
|
||||
if resp.ObjectParts.IsTruncated == nil {
|
||||
return fmt.Errorf("expected non nil isTruncated")
|
||||
}
|
||||
if *resp.ObjectParts.IsTruncated {
|
||||
return fmt.Errorf("expected object parts to not be truncated")
|
||||
}
|
||||
|
||||
if len(resp.ObjectParts.Parts) != len(parts)-int(maxParts) {
|
||||
return fmt.Errorf("expected length of parts to be %v, instead got %v", len(parts)-int(maxParts), len(resp.ObjectParts.Parts))
|
||||
}
|
||||
|
||||
for i, p := range resp.ObjectParts.Parts {
|
||||
if *p.PartNumber != *parts[i+int(maxParts)].PartNumber {
|
||||
return fmt.Errorf("expected part number to be %v, instead got %v", *parts[i+int(maxParts)].PartNumber, *p.PartNumber)
|
||||
}
|
||||
if *p.Size != *parts[i+int(maxParts)].Size {
|
||||
return fmt.Errorf("expected part size to be %v, instead got %v", *parts[i+int(maxParts)].Size, *p.Size)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetObject_non_existing_key(s *S3Conf) error {
|
||||
testName := "GetObject_non_existing_key"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -3520,6 +3922,26 @@ func GetObjectTagging_non_existing_object(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func GetObjectTagging_unset_tags(s *S3Conf) error {
|
||||
testName := "GetObjectTagging_unset_tags"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
if err := putObjects(s3client, []string{obj}, bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetObjectTagging_success(s *S3Conf) error {
|
||||
testName := "PutObjectTagging_success"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -5682,22 +6104,18 @@ func GetBucketPolicy_non_existing_bucket(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func GetBucketPolicy_default_empty_policy(s *S3Conf) error {
|
||||
testName := "GetBucketPolicy_default_empty_policy"
|
||||
func GetBucketPolicy_not_set(s *S3Conf) error {
|
||||
testName := "GetBucketPolicy_not_set"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.GetBucketPolicy(ctx, &s3.GetBucketPolicyInput{
|
||||
_, err := s3client.GetBucketPolicy(ctx, &s3.GetBucketPolicyInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out.Policy != nil {
|
||||
return fmt.Errorf("expected policy to be nil, instead got %s", *out.Policy)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -5789,19 +6207,15 @@ func DeleteBucketPolicy_success(s *S3Conf) error {
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.GetBucketPolicy(ctx, &s3.GetBucketPolicyInput{
|
||||
_, err = s3client.GetBucketPolicy(ctx, &s3.GetBucketPolicyInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out.Policy != nil {
|
||||
return fmt.Errorf("expected policy to be nil, instead got %s", *out.Policy)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -522,6 +522,7 @@ func uploadParts(client *s3.Client, size, partCount int, bucket, key, uploadId s
|
||||
parts = append(parts, types.Part{
|
||||
ETag: out.ETag,
|
||||
PartNumber: &pn,
|
||||
Size: &partSize,
|
||||
})
|
||||
offset += partSize
|
||||
}
|
||||
|
||||
@@ -11,4 +11,7 @@ log() {
|
||||
return 0
|
||||
fi
|
||||
echo "$2"
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
echo "$2" >> "$TEST_LOG_FILE"
|
||||
fi
|
||||
}
|
||||
16
tests/run.sh
16
tests/run.sh
@@ -6,7 +6,6 @@ show_help() {
|
||||
echo " -h, --help Display this help message and exit"
|
||||
echo " -s, --static Don't remove buckets between tests"
|
||||
echo " aws Run tests with aws cli"
|
||||
echo " aws-posix Run posix tests with aws cli"
|
||||
echo " s3cmd Run tests with s3cmd utility"
|
||||
echo " mc Run tests with mc utility"
|
||||
}
|
||||
@@ -20,7 +19,7 @@ handle_param() {
|
||||
-s|--static)
|
||||
export RECREATE_BUCKETS=false
|
||||
;;
|
||||
aws|aws-posix|s3cmd|mc|user)
|
||||
s3|s3api|aws|s3cmd|mc|user)
|
||||
set_command_type "$1"
|
||||
;;
|
||||
*) # Handle unrecognized options or positional arguments
|
||||
@@ -65,27 +64,28 @@ if [[ $RECREATE_BUCKETS == false ]]; then
|
||||
fi
|
||||
|
||||
case $command_type in
|
||||
aws)
|
||||
s3api|aws)
|
||||
echo "Running aws tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_aws.sh || exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
"$HOME"/bin/bats ./tests/test_user_aws.sh || exit_code=$?
|
||||
fi
|
||||
;;
|
||||
aws-posix)
|
||||
"$HOME"/bin/bats ./tests/test_aws_posix.sh || exit_code=$?
|
||||
s3)
|
||||
echo "Running s3 tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_s3.sh || exit_code=$?
|
||||
;;
|
||||
s3cmd)
|
||||
echo "Running s3cmd tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_s3cmd.sh || exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
"$HOME"/bin/bats ./tests/test_user_s3cmd.sh || exit_code=$?
|
||||
fi
|
||||
;;
|
||||
mc)
|
||||
echo "Running mc tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_mc.sh || exit_code=$?
|
||||
;;
|
||||
user)
|
||||
"$HOME"/bin/bats ./tests/test_user_aws.sh || exit_code=$?
|
||||
;;
|
||||
esac
|
||||
|
||||
exit $exit_code
|
||||
|
||||
@@ -12,7 +12,7 @@ export RECREATE_BUCKETS
|
||||
if ! ./tests/run.sh aws; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh aws-posix; then
|
||||
if ! ./tests/run.sh s3; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh s3cmd; then
|
||||
|
||||
@@ -17,6 +17,12 @@ setup() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 4 "Running test $BATS_TEST_NAME"
|
||||
if [[ $LOG_LEVEL -ge 5 ]]; then
|
||||
start_time=$(date +%s)
|
||||
export start_time
|
||||
fi
|
||||
|
||||
if [[ $RUN_S3CMD == true ]]; then
|
||||
S3CMD_OPTS=()
|
||||
S3CMD_OPTS+=(-c "$S3CMD_CONFIG")
|
||||
@@ -59,6 +65,9 @@ check_params() {
|
||||
else
|
||||
export LOG_LEVEL
|
||||
fi
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
export TEST_LOG_FILE
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -72,4 +81,8 @@ fail() {
|
||||
# bats teardown function
|
||||
teardown() {
|
||||
stop_versity
|
||||
if [[ $LOG_LEVEL -ge 5 ]]; then
|
||||
end_time=$(date +%s)
|
||||
log 4 "Total test time: $((end_time - start_time))"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -6,6 +6,77 @@ source ./tests/util_aws.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/commands/copy_object.sh
|
||||
source ./tests/commands/delete_bucket_policy.sh
|
||||
source ./tests/commands/delete_object_tagging.sh
|
||||
source ./tests/commands/get_bucket_policy.sh
|
||||
source ./tests/commands/get_object.sh
|
||||
source ./tests/commands/put_bucket_policy.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
|
||||
@test "test_abort_multipart_upload" {
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
printf "%s" "$bucket_file_data" > "$test_file_folder"/$bucket_file
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || abort_result=$?
|
||||
[[ $abort_result -eq 0 ]] || fail "Abort failed"
|
||||
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$bucket_file" || exists=$?
|
||||
[[ $exists -eq 1 ]] || fail "Upload file exists after abort"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
@test "test_complete_multipart_upload" {
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
printf "%s" "$bucket_file_data" > "$test_file_folder"/$bucket_file
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || upload_result=$?
|
||||
[[ $upload_result -eq 0 ]] || fail "Error performing multipart upload"
|
||||
|
||||
copy_file "s3://$BUCKET_ONE_NAME/$bucket_file" "$test_file_folder/$bucket_file-copy"
|
||||
compare_files "$test_file_folder/$bucket_file-copy" "$test_file_folder"/$bucket_file || compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || fail "Files do not match"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
@test "test_put_object" {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
setup_bucket "s3api" "$BUCKET_TWO_NAME" || local setup_result_two=$?
|
||||
[[ $setup_result_two -eq 0 ]] || fail "Bucket two setup error"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
error=$(aws --no-verify-ssl s3api copy-object --copy-source "$BUCKET_ONE_NAME/$bucket_file" --key "$bucket_file" --bucket "$BUCKET_TWO_NAME" 2>&1) || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Error copying file: $error"
|
||||
copy_file "s3://$BUCKET_TWO_NAME/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || file "files don't match"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_TWO_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
}
|
||||
|
||||
# test creation and deletion of bucket on versitygw
|
||||
@test "test_create_delete_bucket_aws" {
|
||||
@@ -21,22 +92,20 @@ source ./tests/test_common.sh
|
||||
[[ $create_result -eq 0 ]] || fail "Invalid name test failed"
|
||||
|
||||
[[ "$bucket_create_error" == *"Invalid bucket name "* ]] || fail "unexpected error: $bucket_create_error"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
# test adding and removing an object on versitygw
|
||||
@test "test_put_object-with-data" {
|
||||
@test "test_put_object_with_data" {
|
||||
test_common_put_object_with_data "aws"
|
||||
}
|
||||
|
||||
@test "test_put_object-no-data" {
|
||||
@test "test_put_object_no_data" {
|
||||
test_common_put_object_no_data "aws"
|
||||
}
|
||||
|
||||
# test listing buckets on versitygw
|
||||
@test "test_list_buckets" {
|
||||
test_common_list_buckets "aws"
|
||||
test_common_list_buckets "s3api"
|
||||
}
|
||||
|
||||
# test listing a bucket's objects on versitygw
|
||||
@@ -80,7 +149,6 @@ source ./tests/test_common.sh
|
||||
# delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
#}
|
||||
|
||||
|
||||
# test ability to delete multiple objects from bucket
|
||||
@test "test_delete_objects" {
|
||||
local object_one="test-file-one"
|
||||
@@ -91,9 +159,9 @@ source ./tests/test_common.sh
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result_one=$?
|
||||
[[ $result_one -eq 0 ]] || fail "Error creating bucket"
|
||||
|
||||
put_object "aws" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME"/"$object_one" || local result_two=$?
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local result_two=$?
|
||||
[[ $result_two -eq 0 ]] || fail "Error adding object one"
|
||||
put_object "aws" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME"/"$object_two" || local result_three=$?
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local result_three=$?
|
||||
[[ $result_three -eq 0 ]] || fail "Error adding object two"
|
||||
|
||||
error=$(aws --no-verify-ssl s3api delete-objects --bucket "$BUCKET_ONE_NAME" --delete '{
|
||||
@@ -104,9 +172,9 @@ source ./tests/test_common.sh
|
||||
}') || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error deleting objects: $error"
|
||||
|
||||
object_exists "aws" "$BUCKET_ONE_NAME"/"$object_one" || local exists_one=$?
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_one" || local exists_one=$?
|
||||
[[ $exists_one -eq 1 ]] || fail "Object one not deleted"
|
||||
object_exists "aws" "$BUCKET_ONE_NAME"/"$object_two" || local exists_two=$?
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_two" || local exists_two=$?
|
||||
[[ $exists_two -eq 1 ]] || fail "Object two not deleted"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
@@ -129,20 +197,22 @@ source ./tests/test_common.sh
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "aws" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME"/"$object_one" || local put_object_one=$?
|
||||
[[ $put_object_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "aws" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME"/"$object_two" || local put_object_two=$?
|
||||
[[ $put_object_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local copy_result_one=$?
|
||||
[[ $copy_result_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_result_two=$?
|
||||
[[ $copy_result_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
|
||||
sleep 1
|
||||
|
||||
list_objects_s3api_v1 "$BUCKET_ONE_NAME"
|
||||
key_one=$(echo "$objects" | jq '.Contents[0].Key')
|
||||
[[ $key_one == '"'$object_one'"' ]] || fail "Object one mismatch"
|
||||
size_one=$(echo "$objects" | jq '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch"
|
||||
key_two=$(echo "$objects" | jq '.Contents[1].Key')
|
||||
[[ $key_two == '"'$object_two'"' ]] || fail "Object two mismatch"
|
||||
key_one=$(echo "$objects" | jq -r '.Contents[0].Key')
|
||||
[[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)"
|
||||
size_one=$(echo "$objects" | jq -r '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch ($size_one, 0)"
|
||||
key_two=$(echo "$objects" | jq -r '.Contents[1].Key')
|
||||
[[ $key_two == "$object_two" ]] || fail "Object two mismatch ($key_two, $object_two)"
|
||||
size_two=$(echo "$objects" | jq '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch"
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch ($size_two, ${#object_two_data})"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
@@ -159,20 +229,20 @@ source ./tests/test_common.sh
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "aws" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME"/"$object_one" || local put_object_one=$?
|
||||
[[ $put_object_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "aws" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME"/"$object_two" || local put_object_two=$?
|
||||
[[ $put_object_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local copy_object_one=$?
|
||||
[[ $copy_object_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_object_two=$?
|
||||
[[ $copy_object_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
|
||||
list_objects_s3api_v2 "$BUCKET_ONE_NAME"
|
||||
key_one=$(echo "$objects" | jq '.Contents[0].Key')
|
||||
[[ $key_one == '"'$object_one'"' ]] || fail "Object one mismatch"
|
||||
size_one=$(echo "$objects" | jq '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch"
|
||||
key_two=$(echo "$objects" | jq '.Contents[1].Key')
|
||||
[[ $key_two == '"'$object_two'"' ]] || fail "Object two mismatch"
|
||||
size_two=$(echo "$objects" | jq '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch"
|
||||
key_one=$(echo "$objects" | jq -r '.Contents[0].Key')
|
||||
[[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)"
|
||||
size_one=$(echo "$objects" | jq -r '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch ($size_one, 0)"
|
||||
key_two=$(echo "$objects" | jq -r '.Contents[1].Key')
|
||||
[[ $key_two == "$object_two" ]] || fail "Object two mismatch ($key_two, $object_two)"
|
||||
size_two=$(echo "$objects" | jq -r '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch ($size_two, ${#object_two_data})"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
@@ -183,49 +253,6 @@ source ./tests/test_common.sh
|
||||
test_common_set_get_object_tags "aws"
|
||||
}
|
||||
|
||||
# test multi-part upload
|
||||
@test "test-multi-part-upload" {
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
printf "%s" "$bucket_file_data" > "$test_file_folder"/$bucket_file
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || upload_result=$?
|
||||
[[ $upload_result -eq 0 ]] || fail "Error performing multipart upload"
|
||||
|
||||
copy_file "s3://$BUCKET_ONE_NAME/$bucket_file" "$test_file_folder/$bucket_file-copy"
|
||||
compare_files "$test_file_folder/$bucket_file-copy" "$test_file_folder"/$bucket_file || compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || fail "Files do not match"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
# test multi-part upload abort
|
||||
@test "test-multi-part-upload-abort" {
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
printf "%s" "$bucket_file_data" > "$test_file_folder"/$bucket_file
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || abort_result=$?
|
||||
[[ $abort_result -eq 0 ]] || fail "Abort failed"
|
||||
|
||||
object_exists "aws" "$BUCKET_ONE_NAME/$bucket_file" || exists=$?
|
||||
[[ $exists -eq 1 ]] || fail "Upload file exists after abort"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
# test multi-part upload list parts command
|
||||
@test "test-multipart-upload-list-parts" {
|
||||
local bucket_file="bucket-file"
|
||||
@@ -241,7 +268,7 @@ source ./tests/test_common.sh
|
||||
[[ list_result -eq 0 ]] || fail "Listing multipart upload parts failed"
|
||||
|
||||
declare -a parts_map
|
||||
for ((i=0;i<$4;i++)) {
|
||||
for i in {0..3}; do
|
||||
local part_number
|
||||
local etag
|
||||
part_number=$(echo "$parts" | jq ".[$i].PartNumber")
|
||||
@@ -255,9 +282,10 @@ source ./tests/test_common.sh
|
||||
return 1
|
||||
fi
|
||||
parts_map[$part_number]=$etag
|
||||
}
|
||||
done
|
||||
[[ ${#parts_map[@]} -ne 0 ]] || fail "error loading multipart upload parts to check"
|
||||
|
||||
for ((i=0;i<$4;i++)) {
|
||||
for i in {0..3}; do
|
||||
local part_number
|
||||
local etag
|
||||
part_number=$(echo "$listed_parts" | jq ".Parts[$i].PartNumber")
|
||||
@@ -266,9 +294,9 @@ source ./tests/test_common.sh
|
||||
echo "error: etags don't match (part number: $part_number, etags ${parts_map[$part_number]},$etag)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
done
|
||||
|
||||
run_abort_command "$BUCKET_ONE_NAME" "$bucket_file" $upload_id
|
||||
run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file" 4
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
@@ -322,7 +350,7 @@ source ./tests/test_common.sh
|
||||
multipart_upload_from_bucket "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || upload_result=$?
|
||||
[[ $upload_result -eq 0 ]] || fail "Error performing multipart upload"
|
||||
|
||||
copy_file "s3://$BUCKET_ONE_NAME/$bucket_file-copy" "$test_file_folder/$bucket_file-copy"
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file-copy" "$test_file_folder/$bucket_file-copy"
|
||||
compare_files "$test_file_folder"/$bucket_file-copy "$test_file_folder"/$bucket_file || compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || fail "Data doesn't match"
|
||||
|
||||
@@ -345,8 +373,8 @@ source ./tests/test_common.sh
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
put_object "aws" "$test_file_folder"/"$folder_name"/"$object_name" "$BUCKET_ONE_NAME"/"$folder_name"/"$object_name" || local put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
put_object "aws" "$test_file_folder/$folder_name/$object_name" "$BUCKET_ONE_NAME" "$folder_name/$object_name" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
|
||||
list_objects_s3api_v1 "$BUCKET_ONE_NAME" "/"
|
||||
prefix=$(echo "${objects[@]}" | jq ".CommonPrefixes[0].Prefix")
|
||||
@@ -361,9 +389,9 @@ source ./tests/test_common.sh
|
||||
}
|
||||
|
||||
# ensure that lists of files greater than a size of 1000 (pagination) are returned properly
|
||||
@test "test_list_objects_file_count" {
|
||||
test_common_list_objects_file_count "aws"
|
||||
}
|
||||
#@test "test_list_objects_file_count" {
|
||||
# test_common_list_objects_file_count "aws"
|
||||
#}
|
||||
|
||||
#@test "test_filename_length" {
|
||||
# file_name=$(printf "%0.sa" $(seq 1 1025))
|
||||
@@ -395,30 +423,6 @@ source ./tests/test_common.sh
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_copy_object_aws" {
|
||||
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
setup_bucket "aws" "$BUCKET_TWO_NAME" || local setup_result_two=$?
|
||||
[[ $setup_result_two -eq 0 ]] || fail "Bucket two setup error"
|
||||
put_object "aws" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME"/"$bucket_file" || local put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
error=$(aws --no-verify-ssl s3api copy-object --copy-source "$BUCKET_ONE_NAME"/"$bucket_file" --key "$bucket_file" --bucket "$BUCKET_TWO_NAME" 2>&1) || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Error copying file: $error"
|
||||
copy_file "s3://$BUCKET_TWO_NAME"/"$bucket_file" "$test_file_folder/${bucket_file}_copy" || local put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || file "files don't match"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_TWO_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
}
|
||||
|
||||
@test "test_add_object_metadata" {
|
||||
|
||||
object_one="object-one"
|
||||
@@ -432,15 +436,27 @@ source ./tests/test_common.sh
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
object="$test_file_folder"/"$object_one"
|
||||
put_object_with_metadata "aws" "$object" "$BUCKET_ONE_NAME" "$test_key" "$test_value" || put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
object_exists "aws" "$object" || local exists_result_one=$?
|
||||
put_object_with_metadata "aws" "$object" "$BUCKET_ONE_NAME" "$object_one" "$test_key" "$test_value" || copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_one" || local exists_result_one=$?
|
||||
[[ $exists_result_one -eq 0 ]] || fail "Object not added to bucket"
|
||||
|
||||
get_object_metadata "aws" "$BUCKET_ONE_NAME" "$object" || get_result=$?
|
||||
get_object_metadata "aws" "$BUCKET_ONE_NAME" "$object_one" || get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "error getting object metadata"
|
||||
key=$(echo "$metadata" | jq 'keys[]')
|
||||
value=$(echo "$metadata" | jq '.[]')
|
||||
[[ $key == "\"$test_key\"" ]] || fail "keys doesn't match (expected $key, actual \"$test_key\")"
|
||||
[[ $value == "\"$test_value\"" ]] || fail "values doesn't match (expected $value, actual \"$test_value\")"
|
||||
}
|
||||
|
||||
@test "test_delete_object_tagging" {
|
||||
test_common_delete_object_tagging "aws"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_location" {
|
||||
test_common_get_bucket_location "aws"
|
||||
}
|
||||
|
||||
@test "test_get_put_delete_bucket_policy" {
|
||||
test_common_get_put_delete_bucket_policy "aws"
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/util_posix.sh
|
||||
|
||||
# test that changes to local folders and files are reflected on S3
|
||||
@test "test_local_creation_deletion" {
|
||||
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local object_name="test-object"
|
||||
|
||||
if [[ -e "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME" ]]; then
|
||||
rm -rf "${LOCAL_FOLDER:?}"/"${BUCKET_ONE_NAME:?}"
|
||||
fi
|
||||
|
||||
mkdir "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
local object="$BUCKET_ONE_NAME"/"$object_name"
|
||||
touch "$LOCAL_FOLDER"/"$object"
|
||||
|
||||
bucket_exists_remote_and_local "$BUCKET_ONE_NAME" || local bucket_exists_two=$?
|
||||
[[ $bucket_exists_two -eq 0 ]] || fail "Failed bucket existence check"
|
||||
object_exists_remote_and_local "$object" || local object_exists_two=$?
|
||||
[[ $object_exists_two -eq 0 ]] || fail "Failed object existence check"
|
||||
|
||||
rm "$LOCAL_FOLDER"/"$object"
|
||||
sleep 1
|
||||
object_not_exists_remote_and_local "$object" || local object_deleted=$?
|
||||
[[ $object_deleted -eq 0 ]] || fail "Failed object deletion check"
|
||||
|
||||
rmdir "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
bucket_not_exists_remote_and_local "$BUCKET_ONE_NAME" || local bucket_deleted=$?
|
||||
[[ $bucket_deleted -eq 0 ]] || fail "Failed bucket deletion check"
|
||||
}
|
||||
|
||||
# test head-object command
|
||||
@test "test_head_object" {
|
||||
|
||||
local bucket_name=$BUCKET_ONE_NAME
|
||||
local object_name="object-one"
|
||||
|
||||
create_test_files $object_name
|
||||
if [ -e "$LOCAL_FOLDER"/"$bucket_name"/$object_name ]; then
|
||||
chmod 755 "$LOCAL_FOLDER"/"$bucket_name"/$object_name
|
||||
fi
|
||||
setup_bucket "aws" "$bucket_name" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating bucket"
|
||||
put_object "aws" "$test_file_folder"/"$object_name" "$bucket_name"/"$object_name" || local result="$?"
|
||||
[[ result -eq 0 ]] || fail "Error adding object one"
|
||||
|
||||
chmod 000 "$LOCAL_FOLDER"/"$bucket_name"/$object_name
|
||||
sleep 1
|
||||
object_is_accessible "$bucket_name" $object_name || local accessible=$?
|
||||
[[ $accessible -eq 1 ]] || fail "Object should be inaccessible"
|
||||
|
||||
chmod 755 "$LOCAL_FOLDER"/"$bucket_name"/$object_name
|
||||
sleep 1
|
||||
object_is_accessible "$bucket_name" $object_name || local accessible_two=$?
|
||||
[[ $accessible_two -eq 0 ]] || fail "Object should be accessible"
|
||||
|
||||
delete_object "aws" "$bucket_name"/$object_name
|
||||
delete_bucket_or_contents "aws" "$bucket_name"
|
||||
delete_test_files $object_name
|
||||
}
|
||||
|
||||
# check info, accessiblity of bucket
|
||||
@test "test_get_bucket_info" {
|
||||
|
||||
if [ -e "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME" ]; then
|
||||
chmod 755 "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
else
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating bucket"
|
||||
fi
|
||||
|
||||
chmod 000 "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
bucket_is_accessible "$BUCKET_ONE_NAME" || local accessible=$?
|
||||
[[ $accessible -eq 1 ]] || fail "Bucket should be inaccessible"
|
||||
|
||||
chmod 755 "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
bucket_is_accessible "$BUCKET_ONE_NAME" || local accessible_two=$?
|
||||
[[ $accessible_two -eq 0 ]] || fail "Bucket should be accessible"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
@@ -1,7 +1,35 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/util_policy.sh
|
||||
source ./tests/commands/copy_object.sh
|
||||
source ./tests/commands/delete_object_tagging.sh
|
||||
source ./tests/commands/get_bucket_location.sh
|
||||
source ./tests/commands/get_bucket_tagging.sh
|
||||
source ./tests/commands/list_buckets.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
|
||||
test_common_multipart_upload() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "multipart upload command missing command type"
|
||||
return 1
|
||||
fi
|
||||
bucket_file="largefile"
|
||||
|
||||
create_large_file "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test file for multipart upload"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
put_object "$1" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "failed to copy file"
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
# common test for creating, deleting buckets
|
||||
# param: "aws" or "s3cmd"
|
||||
@@ -56,15 +84,14 @@ test_common_put_object() {
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
object="$BUCKET_ONE_NAME"/"$2"
|
||||
put_object "$1" "$test_file_folder"/"$2" "$object" || local put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
object_exists "$1" "$object" || local exists_result_one=$?
|
||||
put_object "$1" "$test_file_folder/$2" "$BUCKET_ONE_NAME" "$2" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
object_exists "$1" "$BUCKET_ONE_NAME" "$2" || local exists_result_one=$?
|
||||
[[ $exists_result_one -eq 0 ]] || fail "Object not added to bucket"
|
||||
|
||||
delete_object "$1" "$object" || local delete_result=$?
|
||||
delete_object "$1" "$BUCKET_ONE_NAME" "$2" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "Failed to delete object"
|
||||
object_exists "$1" "$object" || local exists_result_two=$?
|
||||
object_exists "$1" "$BUCKET_ONE_NAME" "$2" || local exists_result_two=$?
|
||||
[[ $exists_result_two -eq 1 ]] || fail "Object not removed from bucket"
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
@@ -90,6 +117,7 @@ test_common_list_buckets() {
|
||||
if [ -z "$bucket_array" ]; then
|
||||
fail "bucket_array parameter not exported"
|
||||
fi
|
||||
log 5 "bucket array: ${bucket_array[*]}"
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ] || [ "$bucket" == "s3://$BUCKET_ONE_NAME" ]; then
|
||||
bucket_one_found=true
|
||||
@@ -123,9 +151,9 @@ test_common_list_objects() {
|
||||
echo "test data 2" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result_one=$?
|
||||
[[ result_one -eq 0 ]] || fail "Error creating bucket"
|
||||
put_object "$1" "$test_file_folder"/$object_one "$BUCKET_ONE_NAME"/"$object_one" || local result_two=$?
|
||||
put_object "$1" "$test_file_folder"/$object_one "$BUCKET_ONE_NAME" "$object_one" || local result_two=$?
|
||||
[[ result_two -eq 0 ]] || fail "Error adding object one"
|
||||
put_object "$1" "$test_file_folder"/$object_two "$BUCKET_ONE_NAME"/"$object_two" || local result_three=$?
|
||||
put_object "$1" "$test_file_folder"/$object_two "$BUCKET_ONE_NAME" "$object_two" || local result_three=$?
|
||||
[[ result_three -eq 0 ]] || fail "Error adding object two"
|
||||
|
||||
list_objects "$1" "$BUCKET_ONE_NAME"
|
||||
@@ -159,14 +187,14 @@ test_common_set_get_delete_bucket_tags() {
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
get_bucket_tags "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting bucket tags first time"
|
||||
|
||||
check_bucket_tags_empty "$1" || local check_result=$?
|
||||
check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || local check_result=$?
|
||||
[[ $check_result -eq 0 ]] || fail "error checking if bucket tags are empty"
|
||||
|
||||
put_bucket_tag "$1" "$BUCKET_ONE_NAME" $key $value
|
||||
get_bucket_tags "$1" "$BUCKET_ONE_NAME" || local get_result_two=$?
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || local get_result_two=$?
|
||||
[[ $get_result_two -eq 0 ]] || fail "Error getting bucket tags second time"
|
||||
|
||||
local tag_set_key
|
||||
@@ -184,10 +212,10 @@ test_common_set_get_delete_bucket_tags() {
|
||||
fi
|
||||
delete_bucket_tags "$1" "$BUCKET_ONE_NAME"
|
||||
|
||||
get_bucket_tags "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting bucket tags third time"
|
||||
|
||||
check_bucket_tags_empty "$1" || local check_result=$?
|
||||
check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || local check_result=$?
|
||||
[[ $check_result -eq 0 ]] || fail "error checking if bucket tags are empty"
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
@@ -206,27 +234,26 @@ test_common_set_get_object_tags() {
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
local object_path="$BUCKET_ONE_NAME"/"$bucket_file"
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$object_path" || local put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
get_object_tags "$1" "$BUCKET_ONE_NAME" $bucket_file || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting object tags"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set=$(echo "$tags" | jq '.TagSet')
|
||||
[[ $tag_set == "[]" ]] || fail "Error: tags not empty"
|
||||
elif [[ ! $tags == *"No tags found"* ]]; then
|
||||
[[ $tag_set == "[]" ]] || [[ $tag_set == "" ]] || fail "Error: tags not empty"
|
||||
elif [[ $tags != *"No tags found"* ]] && [[ $tags != "" ]]; then
|
||||
fail "no tags found (tags: $tags)"
|
||||
fi
|
||||
|
||||
put_object_tag "$1" "$BUCKET_ONE_NAME" $bucket_file $key $value
|
||||
get_object_tags "$1" "$BUCKET_ONE_NAME" $bucket_file || local get_result_two=$?
|
||||
get_object_tags "$1" "$BUCKET_ONE_NAME" "$bucket_file" || local get_result_two=$?
|
||||
[[ $get_result_two -eq 0 ]] || fail "Error getting object tags"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set_key=$(echo "$tags" | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq '.TagSet[0].Value')
|
||||
[[ $tag_set_key == '"'$key'"' ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == '"'$value'"' ]] || fail "Value mismatch"
|
||||
tag_set_key=$(echo "$tags" | jq -r '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq -r '.TagSet[0].Value')
|
||||
[[ $tag_set_key == "$key" ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == "$value" ]] || fail "Value mismatch"
|
||||
else
|
||||
read -r tag_set_key tag_set_value <<< "$(echo "$tags" | awk 'NR==2 {print $1, $3}')"
|
||||
[[ $tag_set_key == "$key" ]] || fail "Key mismatch"
|
||||
@@ -237,28 +264,7 @@ test_common_set_get_object_tags() {
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
test_common_multipart_upload() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "multipart upload command missing command type"
|
||||
return 1
|
||||
fi
|
||||
bucket_file="largefile"
|
||||
|
||||
create_large_file "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test file for multipart upload"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
put_object "$1" "$test_file_folder"/$bucket_file "$BUCKET_ONE_NAME/$bucket_file" || local put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "failed to copy file"
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
test_common_presigned_url_utf8_chars() {
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "presigned url command missing command type"
|
||||
return 1
|
||||
@@ -273,7 +279,7 @@ test_common_presigned_url_utf8_chars() {
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME"/"$bucket_file" || put_result=$?
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "Failed to add object $bucket_file"
|
||||
|
||||
create_presigned_url "$1" "$BUCKET_ONE_NAME" "$bucket_file" || presigned_result=$?
|
||||
@@ -313,3 +319,101 @@ test_common_list_objects_file_count() {
|
||||
[[ $file_count == 1001 ]] || fail "file count should be 1001, is $file_count"
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
test_common_delete_object_tagging() {
|
||||
[[ $# -eq 1 ]] || fail "test common delete object tagging requires command type"
|
||||
|
||||
bucket_file="bucket_file"
|
||||
tag_key="key"
|
||||
tag_value="value"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
|
||||
put_object_tag "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "failed to add tags to object"
|
||||
|
||||
get_and_verify_object_tags "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "failed to get tags"
|
||||
|
||||
delete_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "error deleting object tagging"
|
||||
|
||||
check_object_tags_empty "$1" "$BUCKET_ONE_NAME" "$bucket_file" || get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "failed to get tags"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
}
|
||||
|
||||
test_common_get_bucket_location() {
|
||||
[[ $# -eq 1 ]] || fail "test common get bucket location missing command type"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
get_bucket_location "aws" "$BUCKET_ONE_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
[[ $bucket_location == "null" ]] || [[ $bucket_location == "us-east-1" ]] || fail "wrong location: '$bucket_location'"
|
||||
}
|
||||
|
||||
test_common_get_put_delete_bucket_policy() {
|
||||
[[ $# -eq 1 ]] || fail "get/put/delete policy test requires command type"
|
||||
|
||||
policy_file="policy_file"
|
||||
|
||||
create_test_files "$policy_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating policy file"
|
||||
|
||||
effect="Allow"
|
||||
principal="*"
|
||||
action="s3:GetObject"
|
||||
resource="arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
|
||||
cat <<EOF > "$test_file_folder"/$policy_file
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "$effect",
|
||||
"Principal": "$principal",
|
||||
"Action": "$action",
|
||||
"Resource": "$resource"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
check_for_empty_policy "$1" "$BUCKET_ONE_NAME" || check_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "policy not empty"
|
||||
|
||||
put_bucket_policy "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$policy_file" || put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "error putting bucket"
|
||||
|
||||
get_bucket_policy "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "error getting bucket policy after setting"
|
||||
|
||||
returned_effect=$(echo "$bucket_policy" | jq -r '.Statement[0].Effect')
|
||||
[[ $effect == "$returned_effect" ]] || fail "effect mismatch ($effect, $returned_effect)"
|
||||
returned_principal=$(echo "$bucket_policy" | jq -r '.Statement[0].Principal')
|
||||
[[ $principal == "$returned_principal" ]] || fail "principal mismatch ($principal, $returned_principal)"
|
||||
returned_action=$(echo "$bucket_policy" | jq -r '.Statement[0].Action')
|
||||
[[ $action == "$returned_action" ]] || fail "action mismatch ($action, $returned_action)"
|
||||
returned_resource=$(echo "$bucket_policy" | jq -r '.Statement[0].Resource')
|
||||
[[ $resource == "$returned_resource" ]] || fail "resource mismatch ($resource, $returned_resource)"
|
||||
|
||||
delete_bucket_policy "$1" "$BUCKET_ONE_NAME" || delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "error deleting policy"
|
||||
|
||||
check_for_empty_policy "$1" "$BUCKET_ONE_NAME" || check_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "policy not empty after deletion"
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@@ -3,9 +3,16 @@
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/commands/delete_bucket_policy.sh
|
||||
source ./tests/commands/get_bucket_policy.sh
|
||||
source ./tests/commands/put_bucket_policy.sh
|
||||
|
||||
export RUN_MC=true
|
||||
|
||||
@test "test_multipart_upload_mc" {
|
||||
test_common_multipart_upload "mc"
|
||||
}
|
||||
|
||||
# test mc bucket creation/deletion
|
||||
@test "test_create_delete_bucket_mc" {
|
||||
test_common_create_delete_bucket "mc"
|
||||
@@ -35,10 +42,6 @@ export RUN_MC=true
|
||||
test_common_set_get_object_tags "mc"
|
||||
}
|
||||
|
||||
@test "test_multipart_upload_mc" {
|
||||
test_common_multipart_upload "mc"
|
||||
}
|
||||
|
||||
@test "test_presigned_url_utf8_chars_mc" {
|
||||
test_common_presigned_url_utf8_chars "mc"
|
||||
}
|
||||
@@ -76,3 +79,15 @@ export RUN_MC=true
|
||||
[[ $bucket_info == *"does not exist"* ]] || fail "404 not returned for non-existent bucket info"
|
||||
delete_bucket_or_contents "mc" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_delete_object_tagging" {
|
||||
test_common_delete_object_tagging "mc"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_location" {
|
||||
test_common_get_bucket_location "mc"
|
||||
}
|
||||
|
||||
@test "test_get_put_delete_bucket_policy" {
|
||||
test_common_get_put_delete_bucket_policy "mc"
|
||||
}
|
||||
|
||||
19
tests/test_s3.sh
Executable file
19
tests/test_s3.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/test_common.sh
|
||||
|
||||
@test "test_multipart_upload" {
|
||||
test_common_multipart_upload "s3"
|
||||
}
|
||||
|
||||
@test "test_put_object" {
|
||||
test_common_put_object_no_data "s3"
|
||||
}
|
||||
|
||||
@test "test_list_buckets" {
|
||||
test_common_list_buckets "s3"
|
||||
}
|
||||
|
||||
@test "test_list_objects_file_count" {
|
||||
test_common_list_objects_file_count "s3"
|
||||
}
|
||||
@@ -4,20 +4,27 @@ source ./tests/setup.sh
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/commands/delete_bucket_policy.sh
|
||||
source ./tests/commands/get_bucket_policy.sh
|
||||
source ./tests/commands/put_bucket_policy.sh
|
||||
|
||||
export RUN_S3CMD=true
|
||||
|
||||
@test "test_multipart_upload_s3cmd" {
|
||||
test_common_multipart_upload "s3cmd"
|
||||
}
|
||||
|
||||
# test s3cmd bucket creation/deletion
|
||||
@test "test_create_delete_bucket_s3cmd" {
|
||||
test_common_create_delete_bucket "s3cmd"
|
||||
}
|
||||
|
||||
# test s3cmd put object
|
||||
@test "test_put_object_with_data_s3cmd" {
|
||||
@test "test_copy_object_with_data" {
|
||||
test_common_put_object_with_data "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_put_object_no_data_s3cmd" {
|
||||
@test "test_copy_object_no_data" {
|
||||
test_common_put_object_no_data "s3cmd"
|
||||
}
|
||||
|
||||
@@ -30,10 +37,6 @@ export RUN_S3CMD=true
|
||||
test_common_list_objects "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_multipart_upload_s3cmd" {
|
||||
test_common_multipart_upload "s3cmd"
|
||||
}
|
||||
|
||||
#@test "test_presigned_url_utf8_chars_s3cmd" {
|
||||
# test_common_presigned_url_utf8_chars "s3cmd"
|
||||
#}
|
||||
@@ -71,3 +74,11 @@ export RUN_S3CMD=true
|
||||
[[ $bucket_info == *"404"* ]] || fail "404 not returned for non-existent bucket info"
|
||||
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_location" {
|
||||
test_common_get_bucket_location "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_get_put_delete_bucket_policy" {
|
||||
test_common_get_put_delete_bucket_policy "s3cmd"
|
||||
}
|
||||
|
||||
424
tests/util.sh
424
tests/util.sh
@@ -1,37 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_mc.sh
|
||||
source ./tests/logger.sh
|
||||
|
||||
# delete an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "delete bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure rb "$MC_ALIAS/$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "Invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "error deleting bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
source ./tests/commands/abort_multipart_upload.sh
|
||||
source ./tests/commands/create_bucket.sh
|
||||
source ./tests/commands/delete_bucket.sh
|
||||
source ./tests/commands/delete_object.sh
|
||||
source ./tests/commands/get_bucket_tagging.sh
|
||||
source ./tests/commands/head_bucket.sh
|
||||
source ./tests/commands/head_object.sh
|
||||
source ./tests/commands/list_objects.sh
|
||||
|
||||
# recursively delete an AWS bucket
|
||||
# param: bucket name
|
||||
@@ -44,8 +23,10 @@ delete_bucket_recursive() {
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == "aws" ]]; then
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rb s3://"$2" --force 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
|
||||
delete_bucket_recursive_s3api "$2" 2>&1 || exit_code="$?"
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" --recursive 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
@@ -66,6 +47,33 @@ delete_bucket_recursive() {
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_bucket_recursive_s3api() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "delete bucket recursive command for s3api requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
list_objects 's3api' "$1" || list_result=$?
|
||||
if [[ $list_result -ne 0 ]]; then
|
||||
echo "error listing objects"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
for object in "${object_array[@]}"; do
|
||||
delete_object 's3api' "$1" "$object" || delete_result=$?
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "error deleting object $object"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
delete_bucket 's3api' "$1" || delete_result=$?
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "error deleting bucket"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# delete contents of a bucket
|
||||
# param: command type, bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
@@ -103,28 +111,14 @@ bucket_exists() {
|
||||
return 2
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 ls s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
# NOTE: s3cmd sometimes takes longer with direct connection
|
||||
sleep 1
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure ls "$MC_ALIAS/$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type: $1"
|
||||
return 2
|
||||
fi
|
||||
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == *"does not exist"* ]] || [[ "$error" == *"Access Denied"* ]]; then
|
||||
head_bucket "$1" "$2" || local check_result=$?
|
||||
if [[ $check_result -ne 0 ]]; then
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "$bucket_info" == *"404"* ]] || [[ "$bucket_info" == *"does not exist"* ]]; then
|
||||
return 1
|
||||
else
|
||||
echo "error checking if bucket exists: $error"
|
||||
return 2
|
||||
fi
|
||||
echo "error checking if bucket exists"
|
||||
return 2
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -197,18 +191,28 @@ setup_bucket() {
|
||||
# param: command, object path
|
||||
# return 0 for true, 1 for false, 2 for error
|
||||
object_exists() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "object exists check missing command, object name"
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "object exists check missing command, bucket name, object name"
|
||||
return 2
|
||||
fi
|
||||
head_object "$1" "$2" "$3" || head_result=$?
|
||||
if [[ $head_result -eq 2 ]]; then
|
||||
echo "error checking if object exists"
|
||||
return 2
|
||||
fi
|
||||
return $head_result
|
||||
|
||||
return 0
|
||||
local exit_code=0
|
||||
local error=""
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 ls s3://"$2" 2>&1) || exit_code="$?"
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 ls "s3://$2/$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api head-object --bucket "$2" --prefix "$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2" 2>&1) || exit_code="$?"
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2/$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure ls "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
error=$(mc --insecure ls "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 2
|
||||
@@ -227,44 +231,16 @@ object_exists() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# add object to versitygw
|
||||
# params: source file, destination copy location
|
||||
# return 0 for success, 1 for failure
|
||||
put_object() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "put object command requires command type, source, destination"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 cp "$2" s3://"$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate put "$2" s3://"$(dirname "$3")" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure cp "$2" "$MC_ALIAS"/"$(dirname "$3")" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
log 5 "put object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error copying object to bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_object_with_metadata() {
|
||||
if [ $# -ne 5 ]; then
|
||||
echo "put object command requires command type, source, destination, key, value"
|
||||
if [ $# -ne 6 ]; then
|
||||
echo "put object command requires command type, source, destination, key, metadata key, metadata value"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api put-object --bucket "$3" --key "$2" --body "$2" --metadata "{\"$4\":\"$5\"}") || exit_code=$?
|
||||
error=$(aws --no-verify-ssl s3api put-object --body "$2" --bucket "$3" --key "$4" --metadata "{\"$5\":\"$6\"}") || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
@@ -308,7 +284,7 @@ put_object_multiple() {
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
if [[ $1 == 'aws' ]] || [[ $1 == 's3' ]]; then
|
||||
# shellcheck disable=SC2086
|
||||
error=$(aws --no-verify-ssl s3 cp "$(dirname "$2")" s3://"$3" --recursive --exclude="*" --include="$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
@@ -334,18 +310,18 @@ put_object_multiple() {
|
||||
# params: source file, destination copy location
|
||||
# return 0 for success or already exists, 1 for failure
|
||||
check_and_put_object() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "check and put object function requires source, destination"
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "check and put object function requires source, bucket, destination"
|
||||
return 1
|
||||
fi
|
||||
object_exists "aws" "$2" || local exists_result=$?
|
||||
object_exists "aws" "$2" "$3" || local exists_result=$?
|
||||
if [ "$exists_result" -eq 2 ]; then
|
||||
echo "error checking if object exists"
|
||||
return 1
|
||||
fi
|
||||
if [ "$exists_result" -eq 1 ]; then
|
||||
put_object "$1" "$2" || local put_result=$?
|
||||
if [ "$put_result" -ne 0 ]; then
|
||||
copy_object "$1" "$2" || local copy_result=$?
|
||||
if [ "$copy_result" -ne 0 ]; then
|
||||
echo "error adding object"
|
||||
return 1
|
||||
fi
|
||||
@@ -353,69 +329,6 @@ check_and_put_object() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# delete object from versitygw
|
||||
# param: object path, including bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_object() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "delete object command requires command type, object parameter"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rm s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure rm "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error deleting object: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# list buckets on versitygw
|
||||
# params: format (aws, s3cmd)
|
||||
# export bucket_array (bucket names) on success, return 1 for failure
|
||||
list_buckets() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "List buckets command missing format"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local output
|
||||
if [[ $1 == "aws" ]]; then
|
||||
output=$(aws --no-verify-ssl s3 ls s3:// 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
output=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3:// 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
output=$(mc --insecure ls "$MC_ALIAS" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid format: $1"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing buckets: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
bucket_array=()
|
||||
while IFS= read -r line; do
|
||||
bucket_name=$(echo "$line" | awk '{print $NF}')
|
||||
bucket_array+=("${bucket_name%/}")
|
||||
done <<< "$output"
|
||||
|
||||
export bucket_array
|
||||
}
|
||||
|
||||
list_buckets_with_user() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "List buckets command missing format, user id, key"
|
||||
@@ -445,42 +358,6 @@ list_buckets_with_user() {
|
||||
export bucket_array
|
||||
}
|
||||
|
||||
# list objects on versitygw, in bucket or folder
|
||||
# param: path of bucket or folder
|
||||
# export object_array (object names) on success, return 1 for failure
|
||||
list_objects() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "list objects command requires command type, and bucket or folder"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local output
|
||||
if [[ $1 == "aws" ]]; then
|
||||
output=$(aws --no-verify-ssl s3 ls s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
output=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
output=$(mc --insecure ls "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing objects: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
object_array=()
|
||||
while IFS= read -r line; do
|
||||
if [[ $line != *InsecureRequestWarning* ]]; then
|
||||
object_name=$(echo "$line" | awk '{print $NF}')
|
||||
object_array+=("$object_name")
|
||||
fi
|
||||
done <<< "$output"
|
||||
|
||||
export object_array
|
||||
}
|
||||
|
||||
remove_insecure_request_warning() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "remove insecure request warning requires input lines"
|
||||
@@ -596,40 +473,12 @@ put_bucket_tag() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# get bucket tags
|
||||
# params: bucket
|
||||
# export 'tags' on success, return 1 for error
|
||||
get_bucket_tags() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "get bucket tag command missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
local result
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tags=$(aws --no-verify-ssl s3api get-bucket-tagging --bucket "$2" 2>&1) || result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
tags=$(mc --insecure tag list "$MC_ALIAS"/"$2" 2>&1) || result=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
log 5 "Tags: $tags"
|
||||
tags=$(echo "$tags" | grep -v "InsecureRequestWarning")
|
||||
if [[ $result -ne 0 ]]; then
|
||||
if [[ $tags =~ "No tags found" ]] || [[ $tags =~ "The TagSet does not exist" ]]; then
|
||||
export tags=
|
||||
return 0
|
||||
fi
|
||||
echo "error getting bucket tags: $tags"
|
||||
return 1
|
||||
fi
|
||||
export tags
|
||||
}
|
||||
|
||||
check_bucket_tags_empty() {
|
||||
|
||||
check_tags_empty() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "bucket tags empty check requires command type"
|
||||
return 2
|
||||
echo "check tags empty requires command type"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
if [[ $tags != "" ]]; then
|
||||
@@ -648,6 +497,34 @@ check_bucket_tags_empty() {
|
||||
return 0
|
||||
}
|
||||
|
||||
check_object_tags_empty() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "bucket tags empty check requires command type, bucket, and key"
|
||||
return 2
|
||||
fi
|
||||
get_object_tags "$1" "$2" "$3" || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "failed to get tags"
|
||||
return 2
|
||||
fi
|
||||
check_tags_empty "$1" || local check_result=$?
|
||||
return $check_result
|
||||
}
|
||||
|
||||
check_bucket_tags_empty() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "bucket tags empty check requires command type, bucket"
|
||||
return 2
|
||||
fi
|
||||
get_bucket_tagging "$1" "$2" || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "failed to get tags"
|
||||
return 2
|
||||
fi
|
||||
check_tags_empty "$1" || local check_result=$?
|
||||
return $check_result
|
||||
}
|
||||
|
||||
delete_bucket_tags() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "delete bucket tag command missing command type, bucket name"
|
||||
@@ -690,6 +567,35 @@ put_object_tag() {
|
||||
return 0
|
||||
}
|
||||
|
||||
get_and_verify_object_tags() {
|
||||
if [[ $# -ne 5 ]]; then
|
||||
echo "get and verify object tags missing command type, bucket, key, tag key, tag value"
|
||||
return 1
|
||||
fi
|
||||
get_object_tags "$1" "$2" "$3" || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "failed to get tags"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set_key=$(echo "$tags" | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq '.TagSet[0].Value')
|
||||
if [[ $tag_set_key != '"'$4'"' ]]; then
|
||||
echo "Key mismatch ($tag_set_key, \"$4\")"
|
||||
return 1
|
||||
fi
|
||||
if [[ $tag_set_value != '"'$5'"' ]]; then
|
||||
echo "Value mismatch ($tag_set_value, \"$5\")"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
read -r tag_set_key tag_set_value <<< "$(echo "$tags" | awk 'NR==2 {print $1, $3}')"
|
||||
[[ $tag_set_key == "$4" ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == "$5" ]] || fail "Value mismatch"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# get object tags
|
||||
# params: bucket
|
||||
# export 'tags' on success, return 1 for error
|
||||
@@ -708,11 +614,16 @@ get_object_tags() {
|
||||
return 1
|
||||
fi
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error getting object tags: $tags"
|
||||
return 1
|
||||
if [[ "$tags" == *"NoSuchTagSet"* ]] || [[ "$tags" == *"No tags found"* ]]; then
|
||||
tags=
|
||||
else
|
||||
echo "error getting object tags: $tags"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
log 5 "$tags"
|
||||
tags=$(echo "$tags" | grep -v "InsecureRequestWarning")
|
||||
fi
|
||||
log 5 "$tags"
|
||||
tags=$(echo "$tags" | grep -v "InsecureRequestWarning")
|
||||
export tags
|
||||
}
|
||||
|
||||
@@ -852,29 +763,12 @@ multipart_upload() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# run the abort multipart command
|
||||
# params: bucket, key, upload ID
|
||||
# return 0 for success, 1 for failure
|
||||
run_abort_command() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "command to run abort requires bucket, key, upload ID"
|
||||
return 1
|
||||
fi
|
||||
|
||||
error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3") || local aborted=$?
|
||||
if [[ $aborted -ne 0 ]]; then
|
||||
echo "Error aborting upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# run upload, then abort it
|
||||
# params: bucket, key, local file location, number of parts to split into before uploading
|
||||
# return 0 for success, 1 for failure
|
||||
abort_multipart_upload() {
|
||||
run_then_abort_multipart_upload() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "abort multipart upload command missing bucket, key, file, and/or part count"
|
||||
echo "run then abort multipart upload command missing bucket, key, file, and/or part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -884,7 +778,7 @@ abort_multipart_upload() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
run_abort_command "$1" "$2" "$upload_id"
|
||||
abort_multipart_upload "$1" "$2" "$upload_id"
|
||||
return $?
|
||||
}
|
||||
|
||||
@@ -974,9 +868,10 @@ multipart_upload_from_bucket() {
|
||||
fi
|
||||
|
||||
for ((i=0;i<$4;i++)) {
|
||||
put_object "aws" "$3"-"$i" "$1" || put_result=$?
|
||||
if [[ $put_result -ne 0 ]]; then
|
||||
echo "error putting object"
|
||||
echo "key: $3"
|
||||
put_object "s3api" "$3-$i" "$1" "$2-$i" || copy_result=$?
|
||||
if [[ $copy_result -ne 0 ]]; then
|
||||
echo "error copying object"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@@ -1017,6 +912,7 @@ upload_part_copy() {
|
||||
return 1
|
||||
fi
|
||||
local etag_json
|
||||
echo "$1 $2 $3 $4 $5"
|
||||
etag_json=$(aws --no-verify-ssl s3api upload-part-copy --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --copy-source "$1/$4-$(($5-1))") || local uploaded=$?
|
||||
if [[ $uploaded -ne 0 ]]; then
|
||||
echo "Error uploading part $5: $etag_json"
|
||||
@@ -1050,27 +946,3 @@ create_presigned_url() {
|
||||
fi
|
||||
export presigned_url
|
||||
}
|
||||
|
||||
head_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "head bucket command missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == "aws" ]]; then
|
||||
bucket_info=$(aws --no-verify-ssl s3api head-bucket --bucket "$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
bucket_info=$(s3cmd --no-check-certificate info "s3://$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
bucket_info=$(mc --insecure stat "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error getting bucket info: $bucket_info"
|
||||
return 1
|
||||
fi
|
||||
export bucket_info
|
||||
}
|
||||
|
||||
@@ -3,34 +3,6 @@
|
||||
source ./tests/util_mc.sh
|
||||
source ./tests/logger.sh
|
||||
|
||||
# create an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
create_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "create bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == "aws" ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error creating bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_with_user() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "create bucket missing command type, bucket name, access, secret"
|
||||
@@ -61,8 +33,10 @@ create_bucket_invalid_name() {
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]]; then
|
||||
if [[ $1 == "aws" ]] || [[ $1 == 's3' ]]; then
|
||||
bucket_create_error=$(aws --no-verify-ssl s3 mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]]; then
|
||||
bucket_create_error=$(aws --no-verify-ssl s3api create-bucket --bucket "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
bucket_create_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
|
||||
28
tests/util_policy.sh
Normal file
28
tests/util_policy.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
check_for_empty_policy() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "check for empty policy command requires command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local get_result=0
|
||||
get_bucket_policy "$1" "$2" || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "error getting bucket policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $bucket_policy == "" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
policy=$(echo "$bucket_policy" | jq -r '.Policy')
|
||||
statement=$(echo "$policy" | jq -r '.Statement[0]')
|
||||
if [[ "" != "$statement" ]] && [[ "null" != "$statement" ]]; then
|
||||
echo "policy should be empty (actual value: '$statement')"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# check if object exists both on S3 and locally
|
||||
# param: object path
|
||||
# 0 for yes, 1 for no, 2 for error
|
||||
object_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "object existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
object_exists "aws" "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if object exists"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 1 ]]; then
|
||||
echo "Error: object doesn't exist remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: object doesn't exist locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if object doesn't exist both on S3 and locally
|
||||
# param: object path
|
||||
# return 0 for doesn't exist, 1 for still exists, 2 for error
|
||||
object_not_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "object non-existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
object_exists "aws" "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if object doesn't exist"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 0 ]]; then
|
||||
echo "Error: object exists remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: object exists locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if a bucket doesn't exist both on S3 and on gateway
|
||||
# param: bucket name
|
||||
# return: 0 for doesn't exist, 1 for does, 2 for error
|
||||
bucket_not_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
bucket_exists "aws" "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if bucket exists"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 0 ]]; then
|
||||
echo "Error: bucket exists remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: bucket exists locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if a bucket exists both on S3 and on gateway
|
||||
# param: bucket name
|
||||
# return: 0 for yes, 1 for no, 2 for error
|
||||
bucket_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
bucket_exists "aws" "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if bucket exists"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 1 ]]; then
|
||||
echo "Error: bucket doesn't exist remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: bucket doesn't exist locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
Reference in New Issue
Block a user