mirror of
https://github.com/versity/versitygw.git
synced 2026-01-28 14:02:03 +00:00
Compare commits
54 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
adbc8140ed | ||
|
|
ce9d3aa01a | ||
|
|
2e6bef6760 | ||
|
|
797376a235 | ||
|
|
cb992a4794 | ||
|
|
61a97e94db | ||
|
|
18a8813ce7 | ||
|
|
8872e2a428 | ||
|
|
b421598647 | ||
|
|
cacd1d28ea | ||
|
|
ad30c251bc | ||
|
|
55c7109c94 | ||
|
|
331996d3dd | ||
|
|
18a9a23f2f | ||
|
|
60c8eb795d | ||
|
|
1173ea920b | ||
|
|
370b51d327 | ||
|
|
1a3937de90 | ||
|
|
ca79182c95 | ||
|
|
d2b004af9a | ||
|
|
93b4926aeb | ||
|
|
12da1e2099 | ||
|
|
5e484f2355 | ||
|
|
d521c66171 | ||
|
|
c580947b98 | ||
|
|
733b6e7b2f | ||
|
|
23a40d86a2 | ||
|
|
ed9a10a337 | ||
|
|
828eb93bee | ||
|
|
3361391506 | ||
|
|
55cf7674b8 | ||
|
|
cf5d164b9f | ||
|
|
5ddc5418a6 | ||
|
|
f949e2d5ea | ||
|
|
a8adb471fe | ||
|
|
ddd048495a | ||
|
|
f6dd2f947c | ||
|
|
5d33c7bde5 | ||
|
|
2843cdbd45 | ||
|
|
85dc8e71b5 | ||
|
|
059205c174 | ||
|
|
4749c80698 | ||
|
|
b87ed2ae63 | ||
|
|
2529028e22 | ||
|
|
e773872c48 | ||
|
|
157f22b08b | ||
|
|
e81fac9558 | ||
|
|
36738022ed | ||
|
|
e2d69cfb66 | ||
|
|
68db536587 | ||
|
|
f66deb9b9a | ||
|
|
7545e6236c | ||
|
|
2db2481f04 | ||
|
|
812efe6d43 |
18
.github/ISSUE_TEMPLATE/bug_report.md
vendored
18
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,27 +1,23 @@
|
||||
---
|
||||
name: Bug report
|
||||
name: Bug Report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
title: '[Bug] - <Short Description>'
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior.
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
|
||||
**Server Version**
|
||||
output of
|
||||
```
|
||||
./versitygw -version
|
||||
uname -a
|
||||
```
|
||||
<!-- output of: './versitygw -version && uname -a' -->
|
||||
|
||||
**Additional context**
|
||||
Describe s3 client and version if applicable.
|
||||
<!-- Describe s3 client and version if applicable.
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,14 +1,14 @@
|
||||
---
|
||||
name: Feature request
|
||||
name: Feature Request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
title: '[Feature] - <Short Description>'
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
|
||||
33
.github/ISSUE_TEMPLATE/test_case.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/test_case.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Test Case Request
|
||||
about: Request new test cases or additional test coverage
|
||||
title: '[Test Case] - <Short Description>'
|
||||
labels: 'testcase'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Description
|
||||
<!-- Please provide a detailed description of the test case or test coverage request. -->
|
||||
|
||||
## Purpose
|
||||
<!-- Explain why this test case is important and what it aims to achieve. -->
|
||||
|
||||
## Scope
|
||||
<!-- Describe the scope of the test case, including any specific functionalities, features, or modules that should be tested. -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- List the criteria that must be met for the test case to be considered complete. -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Additional Context
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
|
||||
## Resources
|
||||
<!-- Provide any resources, documentation, or links that could help in writing the test case. -->
|
||||
|
||||
|
||||
**Thank you for contributing to our project!**
|
||||
8
.github/workflows/docker.yaml
vendored
8
.github/workflows/docker.yaml
vendored
@@ -15,6 +15,13 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -43,6 +50,7 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
VERSION=${{ github.event.release.tag_name }}
|
||||
TIME=${{ github.event.release.published_at }}
|
||||
|
||||
201
auth/acl.go
201
auth/acl.go
@@ -28,7 +28,6 @@ import (
|
||||
)
|
||||
|
||||
type ACL struct {
|
||||
ACL types.BucketCannedACL
|
||||
Owner string
|
||||
Grantees []Grantee
|
||||
}
|
||||
@@ -36,6 +35,7 @@ type ACL struct {
|
||||
type Grantee struct {
|
||||
Permission types.Permission
|
||||
Access string
|
||||
Type types.Type
|
||||
}
|
||||
|
||||
type GetBucketAclOutput struct {
|
||||
@@ -43,14 +43,38 @@ type GetBucketAclOutput struct {
|
||||
AccessControlList AccessControlList
|
||||
}
|
||||
|
||||
type AccessControlList struct {
|
||||
Grants []types.Grant `xml:"Grant"`
|
||||
type PutBucketAclInput struct {
|
||||
Bucket *string
|
||||
ACL types.BucketCannedACL
|
||||
AccessControlPolicy *AccessControlPolicy
|
||||
GrantFullControl *string
|
||||
GrantRead *string
|
||||
GrantReadACP *string
|
||||
GrantWrite *string
|
||||
GrantWriteACP *string
|
||||
}
|
||||
|
||||
type AccessControlPolicy struct {
|
||||
AccessControlList AccessControlList `xml:"AccessControlList"`
|
||||
Owner types.Owner
|
||||
}
|
||||
|
||||
type AccessControlList struct {
|
||||
Grants []Grant `xml:"Grant"`
|
||||
}
|
||||
|
||||
type Grant struct {
|
||||
Grantee *Grt
|
||||
Permission types.Permission
|
||||
}
|
||||
|
||||
type Grt struct {
|
||||
XMLNS string `xml:"xmlns:xsi,attr"`
|
||||
XMLXSI types.Type `xml:"xsi:type,attr"`
|
||||
Type types.Type `xml:"Type"`
|
||||
ID string `xml:"ID"`
|
||||
}
|
||||
|
||||
func ParseACL(data []byte) (ACL, error) {
|
||||
if len(data) == 0 {
|
||||
return ACL{}, nil
|
||||
@@ -69,11 +93,19 @@ func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
|
||||
return GetBucketAclOutput{}, fmt.Errorf("parse acl: %w", err)
|
||||
}
|
||||
|
||||
grants := []types.Grant{}
|
||||
grants := []Grant{}
|
||||
|
||||
for _, elem := range acl.Grantees {
|
||||
acs := elem.Access
|
||||
grants = append(grants, types.Grant{Grantee: &types.Grantee{ID: &acs}, Permission: elem.Permission})
|
||||
grants = append(grants, Grant{
|
||||
Grantee: &Grt{
|
||||
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
|
||||
XMLXSI: elem.Type,
|
||||
ID: acs,
|
||||
Type: elem.Type,
|
||||
},
|
||||
Permission: elem.Permission,
|
||||
})
|
||||
}
|
||||
|
||||
return GetBucketAclOutput{
|
||||
@@ -86,20 +118,46 @@ func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error) {
|
||||
func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool) ([]byte, error) {
|
||||
if input == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
if acl.Owner != *input.AccessControlPolicy.Owner.ID {
|
||||
if !isAdmin && acl.Owner != *input.AccessControlPolicy.Owner.ID {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
defaultGrantees := []Grantee{
|
||||
{
|
||||
Permission: types.PermissionFullControl,
|
||||
Access: acl.Owner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
}
|
||||
|
||||
// if the ACL is specified, set the ACL, else replace the grantees
|
||||
if input.ACL != "" {
|
||||
acl.ACL = input.ACL
|
||||
acl.Grantees = []Grantee{}
|
||||
switch input.ACL {
|
||||
case types.BucketCannedACLPublicRead:
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Permission: types.PermissionRead,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
})
|
||||
case types.BucketCannedACLPublicReadWrite:
|
||||
defaultGrantees = append(defaultGrantees, []Grantee{
|
||||
{
|
||||
Permission: types.PermissionRead,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
},
|
||||
{
|
||||
Permission: types.PermissionWrite,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
},
|
||||
}...)
|
||||
}
|
||||
} else {
|
||||
grantees := []Grantee{}
|
||||
accs := []string{}
|
||||
|
||||
if input.GrantRead != nil || input.GrantReadACP != nil || input.GrantFullControl != nil || input.GrantWrite != nil || input.GrantWriteACP != nil {
|
||||
@@ -108,45 +166,71 @@ func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) ([]byte, er
|
||||
if input.GrantFullControl != nil && *input.GrantFullControl != "" {
|
||||
fullControlList = splitUnique(*input.GrantFullControl, ",")
|
||||
for _, str := range fullControlList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "FULL_CONTROL"})
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionFullControl,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
}
|
||||
if input.GrantRead != nil && *input.GrantRead != "" {
|
||||
readList = splitUnique(*input.GrantRead, ",")
|
||||
for _, str := range readList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ"})
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionRead,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
}
|
||||
if input.GrantReadACP != nil && *input.GrantReadACP != "" {
|
||||
readACPList = splitUnique(*input.GrantReadACP, ",")
|
||||
for _, str := range readACPList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ_ACP"})
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionReadAcp,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
}
|
||||
if input.GrantWrite != nil && *input.GrantWrite != "" {
|
||||
writeList = splitUnique(*input.GrantWrite, ",")
|
||||
for _, str := range writeList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE"})
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionWrite,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
}
|
||||
if input.GrantWriteACP != nil && *input.GrantWriteACP != "" {
|
||||
writeACPList = splitUnique(*input.GrantWriteACP, ",")
|
||||
for _, str := range writeACPList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE_ACP"})
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionWriteAcp,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
accs = append(append(append(append(fullControlList, readList...), writeACPList...), readACPList...), writeList...)
|
||||
} else {
|
||||
cache := make(map[string]bool)
|
||||
for _, grt := range input.AccessControlPolicy.Grants {
|
||||
if grt.Grantee == nil || grt.Grantee.ID == nil || grt.Permission == "" {
|
||||
for _, grt := range input.AccessControlPolicy.AccessControlList.Grants {
|
||||
if grt.Grantee == nil || grt.Grantee.ID == "" || grt.Permission == "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
grantees = append(grantees, Grantee{Access: *grt.Grantee.ID, Permission: grt.Permission})
|
||||
if _, ok := cache[*grt.Grantee.ID]; !ok {
|
||||
cache[*grt.Grantee.ID] = true
|
||||
accs = append(accs, *grt.Grantee.ID)
|
||||
|
||||
access := grt.Grantee.ID
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: access,
|
||||
Permission: grt.Permission,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
if _, ok := cache[access]; !ok {
|
||||
cache[access] = true
|
||||
accs = append(accs, access)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -159,11 +243,10 @@ func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) ([]byte, er
|
||||
if len(accList) > 0 {
|
||||
return nil, fmt.Errorf("accounts does not exist: %s", strings.Join(accList, ", "))
|
||||
}
|
||||
|
||||
acl.Grantees = grantees
|
||||
acl.ACL = ""
|
||||
}
|
||||
|
||||
acl.Grantees = defaultGrantees
|
||||
|
||||
result, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -207,34 +290,33 @@ func splitUnique(s, divider string) []string {
|
||||
}
|
||||
|
||||
func verifyACL(acl ACL, access string, permission types.Permission) error {
|
||||
if acl.ACL != "" {
|
||||
if (permission == "READ" || permission == "READ_ACP") && (acl.ACL != "public-read" && acl.ACL != "public-read-write") {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
if (permission == "WRITE" || permission == "WRITE_ACP") && acl.ACL != "public-read-write" {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
grantee := Grantee{
|
||||
Access: access,
|
||||
Permission: permission,
|
||||
Type: types.TypeCanonicalUser,
|
||||
}
|
||||
granteeFullCtrl := Grantee{
|
||||
Access: access,
|
||||
Permission: types.PermissionFullControl,
|
||||
Type: types.TypeCanonicalUser,
|
||||
}
|
||||
granteeAllUsers := Grantee{
|
||||
Access: "all-users",
|
||||
Permission: permission,
|
||||
Type: types.TypeGroup,
|
||||
}
|
||||
|
||||
isFound := false
|
||||
|
||||
for _, grt := range acl.Grantees {
|
||||
if grt == grantee || grt == granteeFullCtrl || grt == granteeAllUsers {
|
||||
isFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isFound {
|
||||
return nil
|
||||
} else {
|
||||
if len(acl.Grantees) == 0 {
|
||||
return nil
|
||||
}
|
||||
grantee := Grantee{Access: access, Permission: permission}
|
||||
granteeFullCtrl := Grantee{Access: access, Permission: "FULL_CONTROL"}
|
||||
|
||||
isFound := false
|
||||
|
||||
for _, grt := range acl.Grantees {
|
||||
if grt == grantee || grt == granteeFullCtrl {
|
||||
isFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isFound {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
@@ -295,23 +377,16 @@ func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) e
|
||||
if opts.Acc.Role == RoleAdmin {
|
||||
return nil
|
||||
}
|
||||
if opts.Acc.Access == opts.Acl.Owner {
|
||||
return nil
|
||||
}
|
||||
|
||||
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
|
||||
if policyErr != nil && !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return policyErr
|
||||
if policyErr != nil {
|
||||
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return policyErr
|
||||
}
|
||||
} else {
|
||||
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
|
||||
}
|
||||
|
||||
// If bucket policy is not set and the ACL is default, only the owner has access
|
||||
if errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) && opts.Acl.ACL == "" && len(opts.Acl.Grantees) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
if err := VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -134,11 +134,6 @@ func ValidatePolicyDocument(policyBin []byte, bucket string, iam IAMService) err
|
||||
}
|
||||
|
||||
func VerifyBucketPolicy(policy []byte, access, bucket, object string, action Action) error {
|
||||
// If bucket policy is not set
|
||||
if policy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var bucketPolicy BucketPolicy
|
||||
if err := json.Unmarshal(policy, &bucketPolicy); err != nil {
|
||||
return err
|
||||
|
||||
@@ -55,6 +55,8 @@ const (
|
||||
GetObjectRetentionAction Action = "s3:GetObjectRetention"
|
||||
PutObjectRetentionAction Action = "s3:PutObjectRetention"
|
||||
BypassGovernanceRetentionAction Action = "s3:BypassGovernanceRetention"
|
||||
PutBucketOwnershipControlsAction Action = "s3:PutBucketOwnershipControls"
|
||||
GetBucketOwnershipControlsAction Action = "s3:GetBucketOwnershipControls"
|
||||
AllActions Action = "s3:*"
|
||||
)
|
||||
|
||||
@@ -91,6 +93,8 @@ var supportedActionList = map[Action]struct{}{
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
PutBucketOwnershipControlsAction: {},
|
||||
GetBucketOwnershipControlsAction: {},
|
||||
AllActions: {},
|
||||
}
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ type key string
|
||||
const (
|
||||
keyAclCapital key = "Acl"
|
||||
keyAclLower key = "acl"
|
||||
keyOwnership key = "Ownership"
|
||||
keyTags key = "Tags"
|
||||
keyPolicy key = "Policy"
|
||||
keyBucketLock key = "Bucket-Lock"
|
||||
@@ -127,6 +128,7 @@ func (az *Azure) String() string {
|
||||
func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
meta := map[string]*string{
|
||||
string(keyAclCapital): backend.GetStringPtr(string(acl)),
|
||||
string(keyOwnership): backend.GetStringPtr(string(input.ObjectOwnership)),
|
||||
}
|
||||
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
@@ -251,6 +253,67 @@ func (az *Azure) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput)
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
func (az *Azure) PutBucketOwnershipControls(ctx context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.GetProperties(ctx, &container.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
resp.Metadata[string(keyOwnership)] = backend.GetStringPtr(string(ownership))
|
||||
|
||||
_, err = client.SetMetadata(ctx, &container.SetMetadataOptions{Metadata: resp.Metadata})
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetBucketOwnershipControls(ctx context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
var ownship types.ObjectOwnership
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return ownship, err
|
||||
}
|
||||
|
||||
resp, err := client.GetProperties(ctx, &container.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return ownship, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
ownership, ok := resp.Metadata[string(keyOwnership)]
|
||||
if !ok {
|
||||
return ownship, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound)
|
||||
}
|
||||
|
||||
return types.ObjectOwnership(*ownership), nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteBucketOwnershipControls(ctx context.Context, bucket string) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.GetProperties(ctx, &container.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
delete(resp.Metadata, string(keyOwnership))
|
||||
|
||||
_, err = client.SetMetadata(ctx, &container.SetMetadataOptions{Metadata: resp.Metadata})
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, error) {
|
||||
tags, err := parseTags(po.Tagging)
|
||||
if err != nil {
|
||||
@@ -348,7 +411,7 @@ func (az *Azure) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
return az.PutBucketTagging(ctx, bucket, nil)
|
||||
}
|
||||
|
||||
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
var opts *azblob.DownloadStreamOptions
|
||||
if *input.Range != "" {
|
||||
offset, count, err := backend.ParseRange(0, *input.Range)
|
||||
@@ -366,12 +429,6 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput, writer
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
defer blobDownloadResponse.Body.Close()
|
||||
|
||||
_, err = io.Copy(writer, blobDownloadResponse.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
var tagcount int32
|
||||
if blobDownloadResponse.TagCount != nil {
|
||||
@@ -388,6 +445,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput, writer
|
||||
Metadata: parseAzMetadata(blobDownloadResponse.Metadata),
|
||||
TagCount: &tagcount,
|
||||
ContentRange: blobDownloadResponse.ContentRange,
|
||||
Body: blobDownloadResponse.Body,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -686,7 +744,12 @@ func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource && isMetaSame(res.Metadata, input.Metadata) {
|
||||
cpSrc := *input.CopySource
|
||||
if cpSrc[0] == '/' {
|
||||
cpSrc = cpSrc[1:]
|
||||
}
|
||||
|
||||
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == cpSrc && isMetaSame(res.Metadata, input.Metadata) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
}
|
||||
|
||||
@@ -700,7 +763,7 @@ func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := client.CopyFromURL(ctx, az.serviceURL+"/"+*input.CopySource, &blob.CopyFromURLOptions{
|
||||
resp, err := client.CopyFromURL(ctx, az.serviceURL+"/"+cpSrc, &blob.CopyFromURLOptions{
|
||||
BlobTags: tags,
|
||||
Metadata: parseMetadata(input.Metadata),
|
||||
})
|
||||
@@ -1310,34 +1373,8 @@ func (az *Azure) GetObjectLegalHold(ctx context.Context, bucket, object, version
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
acl, err := getAclFromMetadata(props.Metadata, keyAclCapital)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acl.Owner = newOwner
|
||||
|
||||
newAcl, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal acl: %w", err)
|
||||
}
|
||||
|
||||
err = az.PutBucketAcl(ctx, bucket, newAcl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
|
||||
return az.PutBucketAcl(ctx, bucket, acl)
|
||||
}
|
||||
|
||||
// The action actually returns the containers owned by the user, who initialized the gateway
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
"github.com/versity/versitygw/s3select"
|
||||
@@ -43,6 +43,9 @@ type Backend interface {
|
||||
PutBucketPolicy(_ context.Context, bucket string, policy []byte) error
|
||||
GetBucketPolicy(_ context.Context, bucket string) ([]byte, error)
|
||||
DeleteBucketPolicy(_ context.Context, bucket string) error
|
||||
PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error
|
||||
GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error)
|
||||
DeleteBucketOwnershipControls(_ context.Context, bucket string) error
|
||||
|
||||
// multipart operations
|
||||
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
@@ -56,7 +59,7 @@ type Backend interface {
|
||||
// standard object operations
|
||||
PutObject(context.Context, *s3.PutObjectInput) (string, error)
|
||||
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error)
|
||||
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
@@ -90,7 +93,7 @@ type Backend interface {
|
||||
GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error)
|
||||
|
||||
// non AWS actions
|
||||
ChangeBucketOwner(_ context.Context, bucket, newOwner string) error
|
||||
ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error
|
||||
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
|
||||
}
|
||||
|
||||
@@ -138,6 +141,15 @@ func (BackendUnsupported) GetBucketPolicy(_ context.Context, bucket string) ([]b
|
||||
func (BackendUnsupported) DeleteBucketPolicy(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
return types.ObjectOwnershipBucketOwnerEnforced, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucketOwnershipControls(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
@@ -167,7 +179,7 @@ func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (string
|
||||
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
@@ -256,7 +268,7 @@ func (BackendUnsupported) GetObjectLegalHold(_ context.Context, bucket, object,
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, newOwner string) error {
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error) {
|
||||
|
||||
@@ -18,7 +18,9 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -128,3 +130,16 @@ func md5String(data []byte) string {
|
||||
sum := md5.Sum(data)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
type FileSectionReadCloser struct {
|
||||
R io.Reader
|
||||
F *os.File
|
||||
}
|
||||
|
||||
func (f *FileSectionReadCloser) Read(p []byte) (int, error) {
|
||||
return f.R.Read(p)
|
||||
}
|
||||
|
||||
func (f *FileSectionReadCloser) Close() error {
|
||||
return f.F.Close()
|
||||
}
|
||||
|
||||
@@ -61,6 +61,10 @@ type Posix struct {
|
||||
// used to determine if chowning is needed
|
||||
euid int
|
||||
egid int
|
||||
|
||||
// bucketlinks is a flag to enable symlinks to directories at the top
|
||||
// level gateway directory to be treated as buckets the same as directories
|
||||
bucketlinks bool
|
||||
}
|
||||
|
||||
var _ backend.Backend = &Posix{}
|
||||
@@ -75,6 +79,7 @@ const (
|
||||
contentEncHdr = "content-encoding"
|
||||
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
aclkey = "acl"
|
||||
ownershipkey = "ownership"
|
||||
etagkey = "etag"
|
||||
policykey = "policy"
|
||||
bucketLockKey = "bucket-lock"
|
||||
@@ -86,8 +91,9 @@ const (
|
||||
)
|
||||
|
||||
type PosixOpts struct {
|
||||
ChownUID bool
|
||||
ChownGID bool
|
||||
ChownUID bool
|
||||
ChownGID bool
|
||||
BucketLinks bool
|
||||
}
|
||||
|
||||
func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, error) {
|
||||
@@ -102,13 +108,14 @@ func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, erro
|
||||
}
|
||||
|
||||
return &Posix{
|
||||
meta: meta,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
euid: os.Geteuid(),
|
||||
egid: os.Getegid(),
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
meta: meta,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
euid: os.Geteuid(),
|
||||
egid: os.Getegid(),
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
bucketlinks: opts.BucketLinks,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -129,17 +136,25 @@ func (p *Posix) ListBuckets(_ context.Context, owner string, isAdmin bool) (s3re
|
||||
|
||||
var buckets []s3response.ListAllMyBucketsEntry
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
// buckets must be a directory
|
||||
continue
|
||||
}
|
||||
|
||||
fi, err := entry.Info()
|
||||
if err != nil {
|
||||
// skip entries returning errors
|
||||
continue
|
||||
}
|
||||
|
||||
if p.bucketlinks && entry.Type() == fs.ModeSymlink {
|
||||
fi, err = os.Stat(entry.Name())
|
||||
if err != nil {
|
||||
// skip entries returning errors
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
// buckets must be a directory
|
||||
continue
|
||||
}
|
||||
|
||||
// return all the buckets for admin users
|
||||
if isAdmin {
|
||||
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
|
||||
@@ -245,6 +260,9 @@ func (p *Posix) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, a
|
||||
if err := p.meta.StoreAttribute(bucket, "", aclkey, acl); err != nil {
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
if err := p.meta.StoreAttribute(bucket, "", ownershipkey, []byte(input.ObjectOwnership)); err != nil {
|
||||
return fmt.Errorf("set ownership: %w", err)
|
||||
}
|
||||
|
||||
if input.ObjectLockEnabledForBucket != nil && *input.ObjectLockEnabledForBucket {
|
||||
now := time.Now()
|
||||
@@ -304,6 +322,61 @@ func (p *Posix) DeleteBucket(_ context.Context, input *s3.DeleteBucketInput) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if err := p.meta.StoreAttribute(bucket, "", ownershipkey, []byte(ownership)); err != nil {
|
||||
return fmt.Errorf("set ownership: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (p *Posix) GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
var ownship types.ObjectOwnership
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return ownship, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return ownship, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
ownership, err := p.meta.RetrieveAttribute(bucket, "", ownershipkey)
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return ownship, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound)
|
||||
}
|
||||
if err != nil {
|
||||
return ownship, fmt.Errorf("get bucket ownership status: %w", err)
|
||||
}
|
||||
|
||||
return types.ObjectOwnership(ownership), nil
|
||||
}
|
||||
func (p *Posix) DeleteBucketOwnershipControls(_ context.Context, bucket string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if err := p.meta.DeleteAttribute(bucket, "", ownershipkey); err != nil {
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("delete ownership: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) CreateMultipartUpload(ctx context.Context, mpu *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
if mpu.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
@@ -1450,7 +1523,20 @@ func (p *Posix) DeleteObject(_ context.Context, input *s3.DeleteObjectInput) err
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = os.Remove(filepath.Join(bucket, object))
|
||||
objpath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objpath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
err = os.Remove(objpath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -1534,7 +1620,7 @@ func (p *Posix) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
@@ -1556,6 +1642,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
|
||||
object := *input.Key
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
@@ -1564,6 +1651,10 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
acceptRange := *input.Range
|
||||
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
|
||||
if err != nil {
|
||||
@@ -1578,11 +1669,11 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
}
|
||||
|
||||
if length == -1 {
|
||||
length = objSize - startOffset + 1
|
||||
length = objSize - startOffset
|
||||
}
|
||||
|
||||
if startOffset+length > objSize+1 {
|
||||
length = objSize - startOffset + 1
|
||||
if startOffset+length > objSize {
|
||||
length = objSize - startOffset
|
||||
}
|
||||
|
||||
var contentRange string
|
||||
@@ -1625,21 +1716,6 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
}, nil
|
||||
}
|
||||
|
||||
f, err := os.Open(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open object: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
rdr := io.NewSectionReader(f, startOffset, length)
|
||||
_, err = io.Copy(writer, rdr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := p.loadUserMetaData(bucket, object, userMetaData)
|
||||
@@ -1660,6 +1736,16 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
tagCount = &tgCount
|
||||
}
|
||||
|
||||
f, err := os.Open(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open object: %w", err)
|
||||
}
|
||||
|
||||
rdr := io.NewSectionReader(f, startOffset, length)
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
ContentLength: &length,
|
||||
@@ -1670,6 +1756,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
Metadata: userMetaData,
|
||||
TagCount: tagCount,
|
||||
ContentRange: &contentRange,
|
||||
Body: &backend.FileSectionReadCloser{R: rdr, F: f},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1732,6 +1819,7 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
@@ -1739,6 +1827,9 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
contentType, contentEncoding := p.loadUserMetaData(bucket, object, userMetaData)
|
||||
@@ -1869,7 +1960,13 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
if input.ExpectedBucketOwner == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
srcBucket, srcObject, ok := strings.Cut(*input.CopySource, "/")
|
||||
|
||||
cpSrc := *input.CopySource
|
||||
if cpSrc[0] == '/' {
|
||||
cpSrc = cpSrc[1:]
|
||||
}
|
||||
|
||||
srcBucket, srcObject, ok := strings.Cut(cpSrc, "/")
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
@@ -1902,10 +1999,13 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fInfo, err := f.Stat()
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
if strings.HasSuffix(srcObject, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
meta := make(map[string]string)
|
||||
p.loadUserMetaData(srcBucket, srcObject, meta)
|
||||
@@ -1931,7 +2031,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
}
|
||||
}
|
||||
|
||||
contentLength := fInfo.Size()
|
||||
contentLength := fi.Size()
|
||||
|
||||
etag, err := p.PutObject(ctx,
|
||||
&s3.PutObjectInput{
|
||||
@@ -1945,7 +2045,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(dstObjdPath)
|
||||
fi, err = os.Stat(dstObjdPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat dst object: %w", err)
|
||||
}
|
||||
@@ -1958,7 +2058,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
func (p *Posix) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
@@ -1989,7 +2089,7 @@ func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
||||
p.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -2071,7 +2171,7 @@ func (p *Posix) fileToObj(bucket string) backend.GetObjFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
func (p *Posix) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
@@ -2110,7 +2210,7 @@ func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
||||
p.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -2564,39 +2664,8 @@ func (p *Posix) GetObjectRetention(_ context.Context, bucket, object, versionId
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
aclTag, err := p.meta.RetrieveAttribute(bucket, "", aclkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get acl: %w", err)
|
||||
}
|
||||
|
||||
var acl auth.ACL
|
||||
err = json.Unmarshal(aclTag, &acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshal acl: %w", err)
|
||||
}
|
||||
|
||||
acl.Owner = newOwner
|
||||
|
||||
newAcl, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal acl: %w", err)
|
||||
}
|
||||
|
||||
err = p.meta.StoreAttribute(bucket, "", aclkey, newAcl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
|
||||
return p.PutBucketAcl(ctx, bucket, acl)
|
||||
}
|
||||
|
||||
func (p *Posix) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.Bucket, err error) {
|
||||
|
||||
@@ -55,6 +55,8 @@ type S3Proxy struct {
|
||||
debug bool
|
||||
}
|
||||
|
||||
var _ backend.Backend = &S3Proxy{}
|
||||
|
||||
func New(access, secret, endpoint, region string, disableChecksum, sslSkipVerify, debug bool) (*S3Proxy, error) {
|
||||
s := &S3Proxy{
|
||||
access: access,
|
||||
@@ -128,6 +130,37 @@ func (s *S3Proxy) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutBucketOwnershipControls(ctx context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
_, err := s.client.PutBucketOwnershipControls(ctx, &s3.PutBucketOwnershipControlsInput{
|
||||
Bucket: &bucket,
|
||||
OwnershipControls: &types.OwnershipControls{
|
||||
Rules: []types.OwnershipControlsRule{
|
||||
{
|
||||
ObjectOwnership: ownership,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetBucketOwnershipControls(ctx context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
var ownship types.ObjectOwnership
|
||||
resp, err := s.client.GetBucketOwnershipControls(ctx, &s3.GetBucketOwnershipControlsInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return ownship, handleError(err)
|
||||
}
|
||||
return resp.OwnershipControls.Rules[0].ObjectOwnership, nil
|
||||
}
|
||||
func (s *S3Proxy) DeleteBucketOwnershipControls(ctx context.Context, bucket string) error {
|
||||
_, err := s.client.DeleteBucketOwnershipControls(ctx, &s3.DeleteBucketOwnershipControlsInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
out, err := s.client.CreateMultipartUpload(ctx, input)
|
||||
return out, handleError(err)
|
||||
@@ -281,17 +314,11 @@ func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput, w io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
output, err := s.client.GetObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
defer output.Body.Close()
|
||||
|
||||
_, err = io.Copy(w, output.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
@@ -592,8 +619,12 @@ func (s *S3Proxy) GetObjectLegalHold(ctx context.Context, bucket, object, versio
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", s.endpoint, bucket, newOwner), nil)
|
||||
func (s *S3Proxy) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
|
||||
var acll auth.ACL
|
||||
if err := json.Unmarshal(acl, &acll); err != nil {
|
||||
return fmt.Errorf("unmarshal acl: %w", err)
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", s.endpoint, bucket, acll.Owner), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
@@ -42,6 +42,7 @@ type ScoutfsOpts struct {
|
||||
ChownUID bool
|
||||
ChownGID bool
|
||||
GlacierMode bool
|
||||
BucketLinks bool
|
||||
}
|
||||
|
||||
type ScoutFS struct {
|
||||
@@ -486,6 +487,7 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
@@ -493,6 +495,9 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
contentType, contentEncoding := s.loadUserMetaData(bucket, object, userMetaData)
|
||||
@@ -589,7 +594,7 @@ func (s *ScoutFS) retrieveUploadId(bucket, object string) (string, [32]byte, err
|
||||
return entries[0].Name(), sum, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
acceptRange := *input.Range
|
||||
@@ -603,6 +608,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
@@ -611,6 +617,10 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -658,13 +668,8 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open object: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
rdr := io.NewSectionReader(f, startOffset, length)
|
||||
_, err = io.Copy(writer, rdr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
@@ -694,6 +699,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
TagCount: &tagCount,
|
||||
StorageClass: types.StorageClassStandard,
|
||||
ContentRange: &contentRange,
|
||||
Body: &backend.FileSectionReadCloser{R: rdr, F: f},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -718,7 +724,7 @@ func (s *ScoutFS) getXattrTags(bucket, object string) (map[string]string, error)
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
@@ -749,7 +755,7 @@ func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
||||
s.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -768,7 +774,7 @@ func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
@@ -799,7 +805,7 @@ func (s *ScoutFS) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input)
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
s.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -824,11 +830,14 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
if d.IsDir() {
|
||||
// directory object only happens if directory empty
|
||||
// check to see if this is a directory object by checking etag
|
||||
b, err := s.meta.RetrieveAttribute(bucket, path, etagkey)
|
||||
etagBytes, err := s.meta.RetrieveAttribute(bucket, path, etagkey)
|
||||
if errors.Is(err, meta.ErrNoSuchKey) || errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return types.Object{}, fmt.Errorf("get etag: %w", err)
|
||||
}
|
||||
etag := string(b)
|
||||
etag := string(etagBytes)
|
||||
|
||||
fi, err := d.Info()
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -844,6 +853,7 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
ETag: &etag,
|
||||
Key: &key,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
StorageClass: types.ObjectStorageClassStandard,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -852,9 +862,12 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return types.Object{}, fmt.Errorf("get etag: %w", err)
|
||||
}
|
||||
// note: meta.ErrNoSuchKey will return etagBytes = []byte{}
|
||||
// so this will just set etag to "" if its not already set
|
||||
|
||||
etag := string(b)
|
||||
|
||||
fi, err := d.Info()
|
||||
|
||||
@@ -37,8 +37,9 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
metastore := meta.XattrMeta{}
|
||||
|
||||
p, err := posix.New(rootdir, metastore, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
@@ -38,7 +39,7 @@ var ErrSkipObj = errors.New("skip this object")
|
||||
|
||||
// Walk walks the supplied fs.FS and returns results compatible with list
|
||||
// objects responses
|
||||
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
|
||||
func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
|
||||
cpmap := make(map[string]struct{})
|
||||
var objects []types.Object
|
||||
|
||||
@@ -55,6 +56,9 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Ignore the root directory
|
||||
if path == "." {
|
||||
return nil
|
||||
|
||||
@@ -15,12 +15,15 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
@@ -108,7 +111,7 @@ func TestWalk(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
res, err := backend.Walk(tt.fsys, "", "/", "", 1000, tt.getobj, []string{})
|
||||
res, err := backend.Walk(context.Background(), tt.fsys, "", "/", "", 1000, tt.getobj, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("walk: %v", err)
|
||||
}
|
||||
@@ -204,3 +207,50 @@ func printObjects(list []types.Object) string {
|
||||
}
|
||||
return res + "]"
|
||||
}
|
||||
|
||||
type slowFS struct {
|
||||
fstest.MapFS
|
||||
}
|
||||
|
||||
const (
|
||||
readDirPause = 100 * time.Millisecond
|
||||
|
||||
// walkTimeOut should be less than the tree traversal time
|
||||
// which is the readdirPause time * the number of directories
|
||||
walkTimeOut = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
func (s *slowFS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
time.Sleep(readDirPause)
|
||||
return s.MapFS.ReadDir(name)
|
||||
}
|
||||
|
||||
func TestWalkStop(t *testing.T) {
|
||||
s := &slowFS{MapFS: fstest.MapFS{
|
||||
"/a/b/c/d/e/f/g/h/i/g/k/l/m/n": &fstest.MapFile{},
|
||||
}}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), walkTimeOut)
|
||||
defer cancel()
|
||||
|
||||
var err error
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, err = backend.Walk(ctx, s, "", "/", "", 1000,
|
||||
func(path string, d fs.DirEntry) (types.Object, error) {
|
||||
return types.Object{}, nil
|
||||
}, []string{})
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("walk is not terminated in time")
|
||||
case <-ctx.Done():
|
||||
}
|
||||
wg.Wait()
|
||||
if err != ctx.Err() {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,8 +45,8 @@ var (
|
||||
natsURL, natsTopic string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL string
|
||||
accessLog string
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
debug bool
|
||||
pprof string
|
||||
@@ -223,6 +223,12 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"LOGFILE", "VGW_ACCESS_LOG"},
|
||||
Destination: &accessLog,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "admin-access-log",
|
||||
Usage: "enable admin server access logging to specified file",
|
||||
EnvVars: []string{"LOGFILE", "VGW_ADMIN_ACCESS_LOG"},
|
||||
Destination: &adminLogFile,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "log-webhook-url",
|
||||
Usage: "webhook url to send the audit logs",
|
||||
@@ -608,9 +614,10 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
}
|
||||
|
||||
logger, err := s3log.InitLogger(&s3log.LogConfig{
|
||||
LogFile: accessLog,
|
||||
WebhookURL: logWebhookURL,
|
||||
loggers, err := s3log.InitLogger(&s3log.LogConfig{
|
||||
LogFile: accessLog,
|
||||
WebhookURL: logWebhookURL,
|
||||
AdminLogFile: adminLogFile,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup logger: %w", err)
|
||||
@@ -641,12 +648,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
srv, err := s3api.New(app, be, middlewares.RootUserConfig{
|
||||
Access: rootUserAccess,
|
||||
Secret: rootUserSecret,
|
||||
}, port, region, iam, logger, evSender, metricsManager, opts...)
|
||||
}, port, region, iam, loggers.S3Logger, loggers.AdminLogger, evSender, metricsManager, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init gateway: %v", err)
|
||||
}
|
||||
|
||||
admSrv := s3api.NewAdminServer(admApp, be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, admOpts...)
|
||||
admSrv := s3api.NewAdminServer(admApp, be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, loggers.AdminLogger, admOpts...)
|
||||
|
||||
c := make(chan error, 2)
|
||||
go func() { c <- srv.Serve() }()
|
||||
@@ -663,10 +670,17 @@ Loop:
|
||||
case err = <-c:
|
||||
break Loop
|
||||
case <-sigHup:
|
||||
if logger != nil {
|
||||
err = logger.HangUp()
|
||||
if loggers.S3Logger != nil {
|
||||
err = loggers.S3Logger.HangUp()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("HUP logger: %w", err)
|
||||
err = fmt.Errorf("HUP s3 logger: %w", err)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
if loggers.AdminLogger != nil {
|
||||
err = loggers.AdminLogger.HangUp()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("HUP admin logger: %w", err)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
@@ -684,13 +698,22 @@ Loop:
|
||||
fmt.Fprintf(os.Stderr, "shutdown iam: %v\n", err)
|
||||
}
|
||||
|
||||
if logger != nil {
|
||||
err := logger.Shutdown()
|
||||
if loggers.S3Logger != nil {
|
||||
err := loggers.S3Logger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown logger: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "shutdown s3 logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
if loggers.AdminLogger != nil {
|
||||
err := loggers.AdminLogger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown admin logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
var (
|
||||
chownuid, chowngid bool
|
||||
bucketlinks bool
|
||||
)
|
||||
|
||||
func posixCommand() *cli.Command {
|
||||
@@ -54,6 +55,12 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
|
||||
EnvVars: []string{"VGW_CHOWN_GID"},
|
||||
Destination: &chowngid,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bucketlinks",
|
||||
Usage: "allow symlinked directories at bucket level to be treated as buckets",
|
||||
EnvVars: []string{"VGW_BUCKET_LINKS"},
|
||||
Destination: &bucketlinks,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -70,8 +77,9 @@ func runPosix(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
be, err := posix.New(gwroot, meta.XattrMeta{}, posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
|
||||
@@ -63,6 +63,12 @@ move interfaces as well as support for tiered filesystems.`,
|
||||
EnvVars: []string{"VGW_CHOWN_GID"},
|
||||
Destination: &chowngid,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bucketlinks",
|
||||
Usage: "allow symlinked directories at bucket level to be treated as buckets",
|
||||
EnvVars: []string{"VGW_BUCKET_LINKS"},
|
||||
Destination: &bucketlinks,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -76,6 +82,7 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
opts.GlacierMode = glacier
|
||||
opts.ChownUID = chownuid
|
||||
opts.ChownGID = chowngid
|
||||
opts.BucketLinks = bucketlinks
|
||||
|
||||
be, err := scoutfs.New(ctx.Args().Get(0), opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -305,6 +305,10 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_CHOWN_UID=false
|
||||
#VGW_CHOWN_GID=false
|
||||
|
||||
# The VGW_BUCKET_LINKS option will enable the gateway to treat symbolic links
|
||||
# to directories at the top level gateway directory as buckets.
|
||||
#VGW_BUCKET_LINKS=false
|
||||
|
||||
###########
|
||||
# scoutfs #
|
||||
###########
|
||||
@@ -336,6 +340,10 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_CHOWN_UID=false
|
||||
#VGW_CHOWN_GID=false
|
||||
|
||||
# The VGW_BUCKET_LINKS option will enable the gateway to treat symbolic links
|
||||
# to directories at the top level gateway directory as buckets.
|
||||
#VGW_BUCKET_LINKS=false
|
||||
|
||||
######
|
||||
# s3 #
|
||||
######
|
||||
|
||||
61
go.mod
61
go.mod
@@ -3,45 +3,44 @@ module github.com/versity/versitygw
|
||||
go 1.21.0
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.56.1
|
||||
github.com/aws/smithy-go v1.20.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3
|
||||
github.com/aws/smithy-go v1.20.3
|
||||
github.com/go-ldap/ldap/v3 v3.4.8
|
||||
github.com/gofiber/fiber/v2 v2.52.4
|
||||
github.com/gofiber/fiber/v2 v2.52.5
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/nats-io/nats.go v1.36.0
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/pkg/xattr v0.4.10
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
github.com/smira/go-statsd v1.3.3
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
github.com/urfave/cli/v2 v2.27.3
|
||||
github.com/valyala/fasthttp v1.55.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sys v0.21.0
|
||||
golang.org/x/sys v0.23.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.21.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.25.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.29.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
@@ -49,33 +48,33 @@ require (
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/crypto v0.25.0 // indirect
|
||||
golang.org/x/net v0.27.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.21
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.21
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.1
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
)
|
||||
|
||||
131
go.sum
131
go.sum
@@ -1,13 +1,13 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
@@ -21,44 +21,44 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.0 h1:6qAwtzlfcTtcL8NHtbDQAqgM5s6NDipQTkPxyH/6kAA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.21 h1:yPX3pjGCe2hJsetlmGNB4Mngu7UPmvWPzzWCv1+boeM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.21/go.mod h1:4XtlEU6DzNai8RMbjSF5MgGZtYvrhBP/aKZcRtZAVdM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.21 h1:pjAqgzfgFhTv5grc7xPHtXCAaMapzmwA7aU+c/SZQGw=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.21/go.mod h1:nhK6PtBlfHTUDVmBLr1dg+WHCOCK+1Fu/WQyVHPsgNQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.8 h1:FR+oWPFb/8qMVYMWN98bUZAGqPvLHiyqg1wqQGfUAXY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.8/go.mod h1:EgSKcHiuuakEIxJcKGzVNWh5srVAQ3jKaSrBGRYvM48=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.1 h1:D9VqWMuw7lJAX6d5eINfRQ/PkvtcJAK3Qmd6f6xEeUw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.1/go.mod h1:ckvBx7codI4wzc5inOfDp5ZbK7TjMFa7eXwmLvXQrRk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.12 h1:SJ04WXGTwnHlWIODtC5kJzKbeuHt+OUNOgKg7nfnUGw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.12/go.mod h1:FkpvXhA92gb3GE9LD6Og0pHHycTxW7xGpnEh5E7Opwo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.12 h1:hb5KgeYfObi5MHkSSZMEudnIvX30iB+E21evI4r6BnQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.12/go.mod h1:CroKe/eWJdyfy9Vx4rljP5wTUjNJfb+fPz1uMYUhEGM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 h1:DXFWyt7ymx/l1ygdyTTS0X923e+Q2wXIxConJzrgwc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12/go.mod h1:mVOr/LbvaNySK1/BTy4cBOCjhCNY2raWBwK4v+WR5J4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14 h1:oWccitSnByVU74rQRHac4gLfDqjB6Z1YQGOY/dXKedI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14/go.mod h1:8SaZBlQdCLrc/2U3CEO48rYj9uR8qRsPRkmzwNM52pM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.14 h1:zSDPny/pVnkqABXYRicYuPf9z2bTqfH13HT3v6UheIk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.14/go.mod h1:3TTcI5JSzda1nw/pkVC9dhgLre0SNBFj2lYS4GctXKI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.12 h1:tzha+v1SCEBpXWEuw6B/+jm4h5z8hZbTpXz0zRZqTnw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.12/go.mod h1:n+nt2qjHGoseWeLHt1vEr6ZRCCxIN2KcNpJxBcYQSwI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.56.1 h1:wsg9Z/vNnCmxWikfGIoOlnExtEU459cR+2d+iDJ8elo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.56.1/go.mod h1:8rDw3mVwmvIWWX/+LWY3PPIMZuwnQdJMCt0iVFVT3qw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.21.1 h1:sd0BsnAvLH8gsp2e3cbaIr+9D7T1xugueQ7V/zUAsS4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.21.1/go.mod h1:lcQG/MmxydijbeTOp04hIuJwXGWPZGI3bwdFDGRTv14=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.25.1 h1:1uEFNNskK/I1KoZ9Q8wJxMz5V9jyBlsiaNrM7vA3YUQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.25.1/go.mod h1:z0P8K+cBIsFXUr5rzo/psUeJ20XjPN0+Nn8067Nd+E4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.29.1 h1:myX5CxqXE0QMZNja6FA1/FSE3Vu1rVmeUmpJMMzeZg0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.29.1/go.mod h1:N2mQiucsO0VwK9CYuS4/c2n6Smeh1v47Rz3dWCPFLdE=
|
||||
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
|
||||
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 h1:hT8ZAZRIfqBqHbzKTII+CIiY8G2oC9OpLedkZ51DWl8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
|
||||
github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
|
||||
github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -71,8 +71,8 @@ github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl5
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ=
|
||||
github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
|
||||
github.com/gofiber/fiber/v2 v2.52.4 h1:P+T+4iK7VaqUsq2PALYEfBBo6bJZ4q3FP8cZ84EggTM=
|
||||
github.com/gofiber/fiber/v2 v2.52.4/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
|
||||
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
@@ -109,10 +109,6 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
@@ -123,8 +119,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
|
||||
@@ -139,8 +135,8 @@ github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFu
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
|
||||
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -166,8 +162,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
|
||||
github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
|
||||
github.com/urfave/cli/v2 v2.27.3 h1:/POWahRmdh7uztQ3CYnaDddk0Rm90PyOgIxgW2rr41M=
|
||||
github.com/urfave/cli/v2 v2.27.3/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8=
|
||||
@@ -182,8 +178,8 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -193,8 +189,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
@@ -210,8 +206,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -235,8 +231,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
|
||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -254,8 +250,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
@@ -266,9 +262,6 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -24,51 +24,54 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
ActionUndetected = "ActionUnDetected"
|
||||
ActionAbortMultipartUpload = "s3_AbortMultipartUpload"
|
||||
ActionCompleteMultipartUpload = "s3_CompleteMultipartUpload"
|
||||
ActionCopyObject = "s3_CopyObject"
|
||||
ActionCreateBucket = "s3_CreateBucket"
|
||||
ActionCreateMultipartUpload = "s3_CreateMultipartUpload"
|
||||
ActionDeleteBucket = "s3_DeleteBucket"
|
||||
ActionDeleteBucketPolicy = "s3_DeleteBucketPolicy"
|
||||
ActionDeleteBucketTagging = "s3_DeleteBucketTagging"
|
||||
ActionDeleteObject = "s3_DeleteObject"
|
||||
ActionDeleteObjectTagging = "s3_DeleteObjectTagging"
|
||||
ActionDeleteObjects = "s3_DeleteObjects"
|
||||
ActionGetBucketAcl = "s3_GetBucketAcl"
|
||||
ActionGetBucketPolicy = "s3_GetBucketPolicy"
|
||||
ActionGetBucketTagging = "s3_GetBucketTagging"
|
||||
ActionGetBucketVersioning = "s3_GetBucketVersioning"
|
||||
ActionGetObject = "s3_GetObject"
|
||||
ActionGetObjectAcl = "s3_GetObjectAcl"
|
||||
ActionGetObjectAttributes = "s3_GetObjectAttributes"
|
||||
ActionGetObjectLegalHold = "s3_GetObjectLegalHold"
|
||||
ActionGetObjectLockConfiguration = "s3_GetObjectLockConfiguration"
|
||||
ActionGetObjectRetention = "s3_GetObjectRetention"
|
||||
ActionGetObjectTagging = "s3_GetObjectTagging"
|
||||
ActionHeadBucket = "s3_HeadBucket"
|
||||
ActionHeadObject = "s3_HeadObject"
|
||||
ActionListAllMyBuckets = "s3_ListAllMyBuckets"
|
||||
ActionListMultipartUploads = "s3_ListMultipartUploads"
|
||||
ActionListObjectVersions = "s3_ListObjectVersions"
|
||||
ActionListObjects = "s3_ListObjects"
|
||||
ActionListObjectsV2 = "s3_ListObjectsV2"
|
||||
ActionListParts = "s3_ListParts"
|
||||
ActionPutBucketAcl = "s3_PutBucketAcl"
|
||||
ActionPutBucketPolicy = "s3_PutBucketPolicy"
|
||||
ActionPutBucketTagging = "s3_PutBucketTagging"
|
||||
ActionPutBucketVersioning = "s3_PutBucketVersioning"
|
||||
ActionPutObject = "s3_PutObject"
|
||||
ActionPutObjectAcl = "s3_PutObjectAcl"
|
||||
ActionPutObjectLegalHold = "s3_PutObjectLegalHold"
|
||||
ActionPutObjectLockConfiguration = "s3_PutObjectLockConfiguration"
|
||||
ActionPutObjectRetention = "s3_PutObjectRetention"
|
||||
ActionPutObjectTagging = "s3_PutObjectTagging"
|
||||
ActionRestoreObject = "s3_RestoreObject"
|
||||
ActionSelectObjectContent = "s3_SelectObjectContent"
|
||||
ActionUploadPart = "s3_UploadPart"
|
||||
ActionUploadPartCopy = "s3_UploadPartCopy"
|
||||
ActionUndetected = "ActionUnDetected"
|
||||
ActionAbortMultipartUpload = "s3_AbortMultipartUpload"
|
||||
ActionCompleteMultipartUpload = "s3_CompleteMultipartUpload"
|
||||
ActionCopyObject = "s3_CopyObject"
|
||||
ActionCreateBucket = "s3_CreateBucket"
|
||||
ActionCreateMultipartUpload = "s3_CreateMultipartUpload"
|
||||
ActionDeleteBucket = "s3_DeleteBucket"
|
||||
ActionDeleteBucketPolicy = "s3_DeleteBucketPolicy"
|
||||
ActionDeleteBucketTagging = "s3_DeleteBucketTagging"
|
||||
ActionDeleteObject = "s3_DeleteObject"
|
||||
ActionDeleteObjectTagging = "s3_DeleteObjectTagging"
|
||||
ActionDeleteObjects = "s3_DeleteObjects"
|
||||
ActionGetBucketAcl = "s3_GetBucketAcl"
|
||||
ActionGetBucketPolicy = "s3_GetBucketPolicy"
|
||||
ActionGetBucketTagging = "s3_GetBucketTagging"
|
||||
ActionGetBucketVersioning = "s3_GetBucketVersioning"
|
||||
ActionGetObject = "s3_GetObject"
|
||||
ActionGetObjectAcl = "s3_GetObjectAcl"
|
||||
ActionGetObjectAttributes = "s3_GetObjectAttributes"
|
||||
ActionGetObjectLegalHold = "s3_GetObjectLegalHold"
|
||||
ActionGetObjectLockConfiguration = "s3_GetObjectLockConfiguration"
|
||||
ActionGetObjectRetention = "s3_GetObjectRetention"
|
||||
ActionGetObjectTagging = "s3_GetObjectTagging"
|
||||
ActionHeadBucket = "s3_HeadBucket"
|
||||
ActionHeadObject = "s3_HeadObject"
|
||||
ActionListAllMyBuckets = "s3_ListAllMyBuckets"
|
||||
ActionListMultipartUploads = "s3_ListMultipartUploads"
|
||||
ActionListObjectVersions = "s3_ListObjectVersions"
|
||||
ActionListObjects = "s3_ListObjects"
|
||||
ActionListObjectsV2 = "s3_ListObjectsV2"
|
||||
ActionListParts = "s3_ListParts"
|
||||
ActionPutBucketAcl = "s3_PutBucketAcl"
|
||||
ActionPutBucketPolicy = "s3_PutBucketPolicy"
|
||||
ActionPutBucketTagging = "s3_PutBucketTagging"
|
||||
ActionPutBucketVersioning = "s3_PutBucketVersioning"
|
||||
ActionPutObject = "s3_PutObject"
|
||||
ActionPutObjectAcl = "s3_PutObjectAcl"
|
||||
ActionPutObjectLegalHold = "s3_PutObjectLegalHold"
|
||||
ActionPutObjectLockConfiguration = "s3_PutObjectLockConfiguration"
|
||||
ActionPutObjectRetention = "s3_PutObjectRetention"
|
||||
ActionPutObjectTagging = "s3_PutObjectTagging"
|
||||
ActionRestoreObject = "s3_RestoreObject"
|
||||
ActionSelectObjectContent = "s3_SelectObjectContent"
|
||||
ActionUploadPart = "s3_UploadPart"
|
||||
ActionUploadPartCopy = "s3_UploadPartCopy"
|
||||
ActionPutBucketOwnershipControls = "s3_PutBucketOwnershipControls"
|
||||
ActionGetBucketOwnershipControls = "s3_GetBucketOwnershipControls"
|
||||
ActionDeleteBucketOwnershipControls = "s3_DeleteBucketOwnershipControls"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -19,12 +19,13 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3AdminRouter struct{}
|
||||
|
||||
func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService) {
|
||||
controller := controllers.NewAdminController(iam, be)
|
||||
func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger) {
|
||||
controller := controllers.NewAdminController(iam, be, logger)
|
||||
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user", controller.CreateUser)
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3AdminServer struct {
|
||||
@@ -32,7 +33,7 @@ type S3AdminServer struct {
|
||||
cert *tls.Certificate
|
||||
}
|
||||
|
||||
func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, opts ...AdminOpt) *S3AdminServer {
|
||||
func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, opts ...AdminOpt) *S3AdminServer {
|
||||
server := &S3AdminServer{
|
||||
app: app,
|
||||
backend: be,
|
||||
@@ -46,13 +47,13 @@ func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUse
|
||||
|
||||
// Logging middlewares
|
||||
app.Use(logger.New())
|
||||
app.Use(middlewares.DecodeURL(nil, nil))
|
||||
app.Use(middlewares.DecodeURL(l, nil))
|
||||
|
||||
// Authentication middlewares
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, nil, nil, region, false))
|
||||
app.Use(middlewares.VerifyMD5Body(nil))
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, l, nil, region, false))
|
||||
app.Use(middlewares.VerifyMD5Body(l))
|
||||
|
||||
server.router.Init(app, be, iam)
|
||||
server.router.Init(app, be, iam, l)
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
@@ -16,146 +16,310 @@ package controllers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type AdminController struct {
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
l s3log.AuditLogger
|
||||
}
|
||||
|
||||
func NewAdminController(iam auth.IAMService, be backend.Backend) AdminController {
|
||||
return AdminController{iam: iam, be: be}
|
||||
func NewAdminController(iam auth.IAMService, be backend.Backend, l s3log.AuditLogger) AdminController {
|
||||
return AdminController{iam: iam, be: be, l: l}
|
||||
}
|
||||
|
||||
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return ctx.Status(fiber.StatusForbidden).SendString("access denied: only admin users have access to this resource")
|
||||
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusForbidden,
|
||||
action: "admin:CreateUser",
|
||||
})
|
||||
}
|
||||
var usr auth.Account
|
||||
err := json.Unmarshal(ctx.Body(), &usr)
|
||||
if err != nil {
|
||||
return ctx.Status(fiber.StatusBadRequest).SendString(fmt.Errorf("failed to parse request body: %w", err).Error())
|
||||
return sendResponse(ctx, fmt.Errorf("failed to parse request body: %w", err), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusBadRequest,
|
||||
action: "admin:CreateUser",
|
||||
})
|
||||
}
|
||||
|
||||
if usr.Role != auth.RoleAdmin && usr.Role != auth.RoleUser && usr.Role != auth.RoleUserPlus {
|
||||
return ctx.Status(fiber.StatusBadRequest).SendString("invalid parameters: user role have to be one of the following: 'user', 'admin', 'userplus'")
|
||||
return sendResponse(ctx, errors.New("invalid parameters: user role have to be one of the following: 'user', 'admin', 'userplus'"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusBadRequest,
|
||||
action: "admin:CreateUser",
|
||||
})
|
||||
}
|
||||
|
||||
err = c.iam.CreateAccount(usr)
|
||||
if err != nil {
|
||||
status := fiber.StatusInternalServerError
|
||||
msg := fmt.Errorf("failed to create user: %w", err).Error()
|
||||
err = fmt.Errorf("failed to create user: %w", err)
|
||||
|
||||
if strings.Contains(msg, "user already exists") {
|
||||
if strings.Contains(err.Error(), "user already exists") {
|
||||
status = fiber.StatusConflict
|
||||
}
|
||||
|
||||
return ctx.Status(status).SendString(msg)
|
||||
return sendResponse(ctx, err, nil,
|
||||
&metaOptions{
|
||||
status: status,
|
||||
logger: c.l,
|
||||
action: "admin:CreateUser",
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Status(fiber.StatusCreated).SendString("The user has been created successfully")
|
||||
return sendResponse(ctx, nil, "The user has been created successfully", &metaOptions{
|
||||
status: fiber.StatusCreated,
|
||||
logger: c.l,
|
||||
action: "admin:CreateUser",
|
||||
})
|
||||
}
|
||||
|
||||
func (c AdminController) UpdateUser(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return ctx.Status(fiber.StatusForbidden).SendString("access denied: only admin users have access to this resource")
|
||||
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusForbidden,
|
||||
action: "admin:UpdateUser",
|
||||
})
|
||||
}
|
||||
|
||||
access := ctx.Query("access")
|
||||
if access == "" {
|
||||
return ctx.Status(fiber.StatusBadRequest).SendString("missing user access parameter")
|
||||
return sendResponse(ctx, errors.New("missing user access parameter"), nil,
|
||||
&metaOptions{
|
||||
status: fiber.StatusBadRequest,
|
||||
logger: c.l,
|
||||
action: "admin:UpdateUser",
|
||||
})
|
||||
}
|
||||
|
||||
var props auth.MutableProps
|
||||
if err := json.Unmarshal(ctx.Body(), &props); err != nil {
|
||||
return ctx.Status(fiber.StatusBadRequest).SendString(fmt.Errorf("invalid request body %w", err).Error())
|
||||
return sendResponse(ctx, fmt.Errorf("invalid request body %w", err), nil,
|
||||
&metaOptions{
|
||||
status: fiber.StatusBadRequest,
|
||||
logger: c.l,
|
||||
action: "admin:UpdateUser",
|
||||
})
|
||||
}
|
||||
|
||||
err := c.iam.UpdateUserAccount(access, props)
|
||||
if err != nil {
|
||||
status := fiber.StatusInternalServerError
|
||||
msg := fmt.Errorf("failed to update user account: %w", err).Error()
|
||||
err = fmt.Errorf("failed to update user account: %w", err)
|
||||
|
||||
if strings.Contains(msg, "user not found") {
|
||||
if strings.Contains(err.Error(), "user not found") {
|
||||
status = fiber.StatusNotFound
|
||||
}
|
||||
|
||||
return ctx.Status(status).SendString(msg)
|
||||
return sendResponse(ctx, err, nil,
|
||||
&metaOptions{
|
||||
status: status,
|
||||
logger: c.l,
|
||||
action: "admin:UpdateUser",
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.SendString("the user has been updated successfully")
|
||||
return sendResponse(ctx, nil, "the user has been updated successfully",
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:UpdateUser",
|
||||
})
|
||||
}
|
||||
|
||||
func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
|
||||
access := ctx.Query("access")
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return ctx.Status(fiber.StatusForbidden).SendString("access denied: only admin users have access to this resource")
|
||||
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusForbidden,
|
||||
action: "admin:DeleteUser",
|
||||
})
|
||||
}
|
||||
|
||||
err := c.iam.DeleteUserAccount(access)
|
||||
if err != nil {
|
||||
return err
|
||||
return sendResponse(ctx, err, nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:DeleteUser",
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.SendString("The user has been deleted successfully")
|
||||
return sendResponse(ctx, nil, "The user has been deleted successfully",
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:DeleteUser",
|
||||
})
|
||||
}
|
||||
|
||||
func (c AdminController) ListUsers(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return ctx.Status(fiber.StatusForbidden).SendString("access denied: only admin users have access to this resource")
|
||||
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusForbidden,
|
||||
action: "admin:ListUsers",
|
||||
})
|
||||
}
|
||||
accs, err := c.iam.ListUserAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.JSON(accs)
|
||||
return sendResponse(ctx, err, accs,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:ListUsers",
|
||||
})
|
||||
}
|
||||
|
||||
func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return ctx.Status(fiber.StatusForbidden).SendString("access denied: only admin users have access to this resource")
|
||||
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusForbidden,
|
||||
action: "admin:ChangeBucketOwner",
|
||||
})
|
||||
}
|
||||
owner := ctx.Query("owner")
|
||||
bucket := ctx.Query("bucket")
|
||||
|
||||
accs, err := auth.CheckIfAccountsExist([]string{owner}, c.iam)
|
||||
if err != nil {
|
||||
return err
|
||||
return sendResponse(ctx, err, nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:ChangeBucketOwner",
|
||||
})
|
||||
}
|
||||
if len(accs) > 0 {
|
||||
return ctx.Status(fiber.StatusNotFound).SendString("user specified as the new bucket owner does not exist")
|
||||
return sendResponse(ctx, errors.New("user specified as the new bucket owner does not exist"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:ChangeBucketOwner",
|
||||
status: fiber.StatusNotFound,
|
||||
})
|
||||
}
|
||||
|
||||
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, owner)
|
||||
acl := auth.ACL{
|
||||
Owner: owner,
|
||||
Grantees: []auth.Grantee{
|
||||
{
|
||||
Permission: types.PermissionFullControl,
|
||||
Access: owner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
aclParsed, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return err
|
||||
return sendResponse(ctx, fmt.Errorf("failed to marshal the bucket acl: %w", err), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:ChangeBucketOwner",
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.SendString("Bucket owner has been updated successfully")
|
||||
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, aclParsed)
|
||||
return sendResponse(ctx, err, "Bucket owner has been updated successfully",
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:ChangeBucketOwner",
|
||||
})
|
||||
}
|
||||
|
||||
func (c AdminController) ListBuckets(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return ctx.Status(fiber.StatusForbidden).SendString("access denied: only admin users have access to this resource")
|
||||
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
status: fiber.StatusForbidden,
|
||||
action: "admin:ListBuckets",
|
||||
})
|
||||
}
|
||||
|
||||
buckets, err := c.be.ListBucketsAndOwners(ctx.Context())
|
||||
return sendResponse(ctx, err, buckets,
|
||||
&metaOptions{
|
||||
logger: c.l,
|
||||
action: "admin:ListBuckets",
|
||||
})
|
||||
}
|
||||
|
||||
type metaOptions struct {
|
||||
action string
|
||||
status int
|
||||
logger s3log.AuditLogger
|
||||
}
|
||||
|
||||
func sendResponse(ctx *fiber.Ctx, err error, data any, m *metaOptions) error {
|
||||
status := m.status
|
||||
if err != nil {
|
||||
if status == 0 {
|
||||
status = fiber.StatusInternalServerError
|
||||
}
|
||||
if m.logger != nil {
|
||||
m.logger.Log(ctx, err, []byte(err.Error()), s3log.LogMeta{
|
||||
Action: m.action,
|
||||
HttpStatus: status,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Status(status).SendString(err.Error())
|
||||
}
|
||||
|
||||
if status == 0 {
|
||||
status = fiber.StatusOK
|
||||
}
|
||||
|
||||
msg, ok := data.(string)
|
||||
if ok {
|
||||
if m.logger != nil {
|
||||
m.logger.Log(ctx, nil, []byte(msg), s3log.LogMeta{
|
||||
Action: m.action,
|
||||
HttpStatus: status,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Status(status).SendString(msg)
|
||||
}
|
||||
|
||||
dataJSON, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.JSON(buckets)
|
||||
if m.logger != nil {
|
||||
m.logger.Log(ctx, nil, dataJSON, s3log.LogMeta{
|
||||
HttpStatus: status,
|
||||
Action: m.action,
|
||||
})
|
||||
}
|
||||
|
||||
ctx.Set(fiber.HeaderContentType, fiber.MIMEApplicationJSON)
|
||||
|
||||
return ctx.Status(status).Send(dataJSON)
|
||||
}
|
||||
|
||||
@@ -407,7 +407,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
}
|
||||
adminController := AdminController{
|
||||
be: &BackendMock{
|
||||
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket, newOwner string) error {
|
||||
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -26,7 +26,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// AbortMultipartUploadFunc: func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error {
|
||||
// panic("mock out the AbortMultipartUpload method")
|
||||
// },
|
||||
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, newOwner string) error {
|
||||
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
|
||||
// panic("mock out the ChangeBucketOwner method")
|
||||
// },
|
||||
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
@@ -44,6 +44,9 @@ var _ backend.Backend = &BackendMock{}
|
||||
// DeleteBucketFunc: func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error {
|
||||
// panic("mock out the DeleteBucket method")
|
||||
// },
|
||||
// DeleteBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
// panic("mock out the DeleteBucketOwnershipControls method")
|
||||
// },
|
||||
// DeleteBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
// panic("mock out the DeleteBucketPolicy method")
|
||||
// },
|
||||
@@ -62,6 +65,9 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetBucketAclFunc: func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error) {
|
||||
// panic("mock out the GetBucketAcl method")
|
||||
// },
|
||||
// GetBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
// panic("mock out the GetBucketOwnershipControls method")
|
||||
// },
|
||||
// GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
// panic("mock out the GetBucketPolicy method")
|
||||
// },
|
||||
@@ -71,7 +77,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
|
||||
// panic("mock out the GetBucketVersioning method")
|
||||
// },
|
||||
// GetObjectFunc: func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
// GetObjectFunc: func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
// panic("mock out the GetObject method")
|
||||
// },
|
||||
// GetObjectAclFunc: func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
@@ -122,6 +128,9 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutBucketAclFunc: func(contextMoqParam context.Context, bucket string, data []byte) error {
|
||||
// panic("mock out the PutBucketAcl method")
|
||||
// },
|
||||
// PutBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
// panic("mock out the PutBucketOwnershipControls method")
|
||||
// },
|
||||
// PutBucketPolicyFunc: func(contextMoqParam context.Context, bucket string, policy []byte) error {
|
||||
// panic("mock out the PutBucketPolicy method")
|
||||
// },
|
||||
@@ -178,7 +187,7 @@ type BackendMock struct {
|
||||
AbortMultipartUploadFunc func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error
|
||||
|
||||
// ChangeBucketOwnerFunc mocks the ChangeBucketOwner method.
|
||||
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, newOwner string) error
|
||||
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, acl []byte) error
|
||||
|
||||
// CompleteMultipartUploadFunc mocks the CompleteMultipartUpload method.
|
||||
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
@@ -195,6 +204,9 @@ type BackendMock struct {
|
||||
// DeleteBucketFunc mocks the DeleteBucket method.
|
||||
DeleteBucketFunc func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error
|
||||
|
||||
// DeleteBucketOwnershipControlsFunc mocks the DeleteBucketOwnershipControls method.
|
||||
DeleteBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) error
|
||||
|
||||
// DeleteBucketPolicyFunc mocks the DeleteBucketPolicy method.
|
||||
DeleteBucketPolicyFunc func(contextMoqParam context.Context, bucket string) error
|
||||
|
||||
@@ -213,6 +225,9 @@ type BackendMock struct {
|
||||
// GetBucketAclFunc mocks the GetBucketAcl method.
|
||||
GetBucketAclFunc func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error)
|
||||
|
||||
// GetBucketOwnershipControlsFunc mocks the GetBucketOwnershipControls method.
|
||||
GetBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error)
|
||||
|
||||
// GetBucketPolicyFunc mocks the GetBucketPolicy method.
|
||||
GetBucketPolicyFunc func(contextMoqParam context.Context, bucket string) ([]byte, error)
|
||||
|
||||
@@ -223,7 +238,7 @@ type BackendMock struct {
|
||||
GetBucketVersioningFunc func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error)
|
||||
|
||||
// GetObjectFunc mocks the GetObject method.
|
||||
GetObjectFunc func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectFunc func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
|
||||
// GetObjectAclFunc mocks the GetObjectAcl method.
|
||||
GetObjectAclFunc func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
@@ -273,6 +288,9 @@ type BackendMock struct {
|
||||
// PutBucketAclFunc mocks the PutBucketAcl method.
|
||||
PutBucketAclFunc func(contextMoqParam context.Context, bucket string, data []byte) error
|
||||
|
||||
// PutBucketOwnershipControlsFunc mocks the PutBucketOwnershipControls method.
|
||||
PutBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error
|
||||
|
||||
// PutBucketPolicyFunc mocks the PutBucketPolicy method.
|
||||
PutBucketPolicyFunc func(contextMoqParam context.Context, bucket string, policy []byte) error
|
||||
|
||||
@@ -333,8 +351,8 @@ type BackendMock struct {
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// NewOwner is the newOwner argument value.
|
||||
NewOwner string
|
||||
// ACL is the acl argument value.
|
||||
ACL []byte
|
||||
}
|
||||
// CompleteMultipartUpload holds details about calls to the CompleteMultipartUpload method.
|
||||
CompleteMultipartUpload []struct {
|
||||
@@ -373,6 +391,13 @@ type BackendMock struct {
|
||||
// DeleteBucketInput is the deleteBucketInput argument value.
|
||||
DeleteBucketInput *s3.DeleteBucketInput
|
||||
}
|
||||
// DeleteBucketOwnershipControls holds details about calls to the DeleteBucketOwnershipControls method.
|
||||
DeleteBucketOwnershipControls []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// DeleteBucketPolicy holds details about calls to the DeleteBucketPolicy method.
|
||||
DeleteBucketPolicy []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -417,6 +442,13 @@ type BackendMock struct {
|
||||
// GetBucketAclInput is the getBucketAclInput argument value.
|
||||
GetBucketAclInput *s3.GetBucketAclInput
|
||||
}
|
||||
// GetBucketOwnershipControls holds details about calls to the GetBucketOwnershipControls method.
|
||||
GetBucketOwnershipControls []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// GetBucketPolicy holds details about calls to the GetBucketPolicy method.
|
||||
GetBucketPolicy []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -444,8 +476,6 @@ type BackendMock struct {
|
||||
ContextMoqParam context.Context
|
||||
// GetObjectInput is the getObjectInput argument value.
|
||||
GetObjectInput *s3.GetObjectInput
|
||||
// Writer is the writer argument value.
|
||||
Writer io.Writer
|
||||
}
|
||||
// GetObjectAcl holds details about calls to the GetObjectAcl method.
|
||||
GetObjectAcl []struct {
|
||||
@@ -571,6 +601,15 @@ type BackendMock struct {
|
||||
// Data is the data argument value.
|
||||
Data []byte
|
||||
}
|
||||
// PutBucketOwnershipControls holds details about calls to the PutBucketOwnershipControls method.
|
||||
PutBucketOwnershipControls []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Ownership is the ownership argument value.
|
||||
Ownership types.ObjectOwnership
|
||||
}
|
||||
// PutBucketPolicy holds details about calls to the PutBucketPolicy method.
|
||||
PutBucketPolicy []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -693,54 +732,57 @@ type BackendMock struct {
|
||||
UploadPartCopyInput *s3.UploadPartCopyInput
|
||||
}
|
||||
}
|
||||
lockAbortMultipartUpload sync.RWMutex
|
||||
lockChangeBucketOwner sync.RWMutex
|
||||
lockCompleteMultipartUpload sync.RWMutex
|
||||
lockCopyObject sync.RWMutex
|
||||
lockCreateBucket sync.RWMutex
|
||||
lockCreateMultipartUpload sync.RWMutex
|
||||
lockDeleteBucket sync.RWMutex
|
||||
lockDeleteBucketPolicy sync.RWMutex
|
||||
lockDeleteBucketTagging sync.RWMutex
|
||||
lockDeleteObject sync.RWMutex
|
||||
lockDeleteObjectTagging sync.RWMutex
|
||||
lockDeleteObjects sync.RWMutex
|
||||
lockGetBucketAcl sync.RWMutex
|
||||
lockGetBucketPolicy sync.RWMutex
|
||||
lockGetBucketTagging sync.RWMutex
|
||||
lockGetBucketVersioning sync.RWMutex
|
||||
lockGetObject sync.RWMutex
|
||||
lockGetObjectAcl sync.RWMutex
|
||||
lockGetObjectAttributes sync.RWMutex
|
||||
lockGetObjectLegalHold sync.RWMutex
|
||||
lockGetObjectLockConfiguration sync.RWMutex
|
||||
lockGetObjectRetention sync.RWMutex
|
||||
lockGetObjectTagging sync.RWMutex
|
||||
lockHeadBucket sync.RWMutex
|
||||
lockHeadObject sync.RWMutex
|
||||
lockListBuckets sync.RWMutex
|
||||
lockListBucketsAndOwners sync.RWMutex
|
||||
lockListMultipartUploads sync.RWMutex
|
||||
lockListObjectVersions sync.RWMutex
|
||||
lockListObjects sync.RWMutex
|
||||
lockListObjectsV2 sync.RWMutex
|
||||
lockListParts sync.RWMutex
|
||||
lockPutBucketAcl sync.RWMutex
|
||||
lockPutBucketPolicy sync.RWMutex
|
||||
lockPutBucketTagging sync.RWMutex
|
||||
lockPutBucketVersioning sync.RWMutex
|
||||
lockPutObject sync.RWMutex
|
||||
lockPutObjectAcl sync.RWMutex
|
||||
lockPutObjectLegalHold sync.RWMutex
|
||||
lockPutObjectLockConfiguration sync.RWMutex
|
||||
lockPutObjectRetention sync.RWMutex
|
||||
lockPutObjectTagging sync.RWMutex
|
||||
lockRestoreObject sync.RWMutex
|
||||
lockSelectObjectContent sync.RWMutex
|
||||
lockShutdown sync.RWMutex
|
||||
lockString sync.RWMutex
|
||||
lockUploadPart sync.RWMutex
|
||||
lockUploadPartCopy sync.RWMutex
|
||||
lockAbortMultipartUpload sync.RWMutex
|
||||
lockChangeBucketOwner sync.RWMutex
|
||||
lockCompleteMultipartUpload sync.RWMutex
|
||||
lockCopyObject sync.RWMutex
|
||||
lockCreateBucket sync.RWMutex
|
||||
lockCreateMultipartUpload sync.RWMutex
|
||||
lockDeleteBucket sync.RWMutex
|
||||
lockDeleteBucketOwnershipControls sync.RWMutex
|
||||
lockDeleteBucketPolicy sync.RWMutex
|
||||
lockDeleteBucketTagging sync.RWMutex
|
||||
lockDeleteObject sync.RWMutex
|
||||
lockDeleteObjectTagging sync.RWMutex
|
||||
lockDeleteObjects sync.RWMutex
|
||||
lockGetBucketAcl sync.RWMutex
|
||||
lockGetBucketOwnershipControls sync.RWMutex
|
||||
lockGetBucketPolicy sync.RWMutex
|
||||
lockGetBucketTagging sync.RWMutex
|
||||
lockGetBucketVersioning sync.RWMutex
|
||||
lockGetObject sync.RWMutex
|
||||
lockGetObjectAcl sync.RWMutex
|
||||
lockGetObjectAttributes sync.RWMutex
|
||||
lockGetObjectLegalHold sync.RWMutex
|
||||
lockGetObjectLockConfiguration sync.RWMutex
|
||||
lockGetObjectRetention sync.RWMutex
|
||||
lockGetObjectTagging sync.RWMutex
|
||||
lockHeadBucket sync.RWMutex
|
||||
lockHeadObject sync.RWMutex
|
||||
lockListBuckets sync.RWMutex
|
||||
lockListBucketsAndOwners sync.RWMutex
|
||||
lockListMultipartUploads sync.RWMutex
|
||||
lockListObjectVersions sync.RWMutex
|
||||
lockListObjects sync.RWMutex
|
||||
lockListObjectsV2 sync.RWMutex
|
||||
lockListParts sync.RWMutex
|
||||
lockPutBucketAcl sync.RWMutex
|
||||
lockPutBucketOwnershipControls sync.RWMutex
|
||||
lockPutBucketPolicy sync.RWMutex
|
||||
lockPutBucketTagging sync.RWMutex
|
||||
lockPutBucketVersioning sync.RWMutex
|
||||
lockPutObject sync.RWMutex
|
||||
lockPutObjectAcl sync.RWMutex
|
||||
lockPutObjectLegalHold sync.RWMutex
|
||||
lockPutObjectLockConfiguration sync.RWMutex
|
||||
lockPutObjectRetention sync.RWMutex
|
||||
lockPutObjectTagging sync.RWMutex
|
||||
lockRestoreObject sync.RWMutex
|
||||
lockSelectObjectContent sync.RWMutex
|
||||
lockShutdown sync.RWMutex
|
||||
lockString sync.RWMutex
|
||||
lockUploadPart sync.RWMutex
|
||||
lockUploadPartCopy sync.RWMutex
|
||||
}
|
||||
|
||||
// AbortMultipartUpload calls AbortMultipartUploadFunc.
|
||||
@@ -780,23 +822,23 @@ func (mock *BackendMock) AbortMultipartUploadCalls() []struct {
|
||||
}
|
||||
|
||||
// ChangeBucketOwner calls ChangeBucketOwnerFunc.
|
||||
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, newOwner string) error {
|
||||
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, acl []byte) error {
|
||||
if mock.ChangeBucketOwnerFunc == nil {
|
||||
panic("BackendMock.ChangeBucketOwnerFunc: method is nil but Backend.ChangeBucketOwner was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
NewOwner string
|
||||
ACL []byte
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
NewOwner: newOwner,
|
||||
ACL: acl,
|
||||
}
|
||||
mock.lockChangeBucketOwner.Lock()
|
||||
mock.calls.ChangeBucketOwner = append(mock.calls.ChangeBucketOwner, callInfo)
|
||||
mock.lockChangeBucketOwner.Unlock()
|
||||
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, newOwner)
|
||||
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, acl)
|
||||
}
|
||||
|
||||
// ChangeBucketOwnerCalls gets all the calls that were made to ChangeBucketOwner.
|
||||
@@ -806,12 +848,12 @@ func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, buck
|
||||
func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
NewOwner string
|
||||
ACL []byte
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
NewOwner string
|
||||
ACL []byte
|
||||
}
|
||||
mock.lockChangeBucketOwner.RLock()
|
||||
calls = mock.calls.ChangeBucketOwner
|
||||
@@ -1003,6 +1045,42 @@ func (mock *BackendMock) DeleteBucketCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteBucketOwnershipControls calls DeleteBucketOwnershipControlsFunc.
|
||||
func (mock *BackendMock) DeleteBucketOwnershipControls(contextMoqParam context.Context, bucket string) error {
|
||||
if mock.DeleteBucketOwnershipControlsFunc == nil {
|
||||
panic("BackendMock.DeleteBucketOwnershipControlsFunc: method is nil but Backend.DeleteBucketOwnershipControls was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
}
|
||||
mock.lockDeleteBucketOwnershipControls.Lock()
|
||||
mock.calls.DeleteBucketOwnershipControls = append(mock.calls.DeleteBucketOwnershipControls, callInfo)
|
||||
mock.lockDeleteBucketOwnershipControls.Unlock()
|
||||
return mock.DeleteBucketOwnershipControlsFunc(contextMoqParam, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketOwnershipControlsCalls gets all the calls that were made to DeleteBucketOwnershipControls.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.DeleteBucketOwnershipControlsCalls())
|
||||
func (mock *BackendMock) DeleteBucketOwnershipControlsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}
|
||||
mock.lockDeleteBucketOwnershipControls.RLock()
|
||||
calls = mock.calls.DeleteBucketOwnershipControls
|
||||
mock.lockDeleteBucketOwnershipControls.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy calls DeleteBucketPolicyFunc.
|
||||
func (mock *BackendMock) DeleteBucketPolicy(contextMoqParam context.Context, bucket string) error {
|
||||
if mock.DeleteBucketPolicyFunc == nil {
|
||||
@@ -1223,6 +1301,42 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetBucketOwnershipControls calls GetBucketOwnershipControlsFunc.
|
||||
func (mock *BackendMock) GetBucketOwnershipControls(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
if mock.GetBucketOwnershipControlsFunc == nil {
|
||||
panic("BackendMock.GetBucketOwnershipControlsFunc: method is nil but Backend.GetBucketOwnershipControls was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
}
|
||||
mock.lockGetBucketOwnershipControls.Lock()
|
||||
mock.calls.GetBucketOwnershipControls = append(mock.calls.GetBucketOwnershipControls, callInfo)
|
||||
mock.lockGetBucketOwnershipControls.Unlock()
|
||||
return mock.GetBucketOwnershipControlsFunc(contextMoqParam, bucket)
|
||||
}
|
||||
|
||||
// GetBucketOwnershipControlsCalls gets all the calls that were made to GetBucketOwnershipControls.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.GetBucketOwnershipControlsCalls())
|
||||
func (mock *BackendMock) GetBucketOwnershipControlsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}
|
||||
mock.lockGetBucketOwnershipControls.RLock()
|
||||
calls = mock.calls.GetBucketOwnershipControls
|
||||
mock.lockGetBucketOwnershipControls.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetBucketPolicy calls GetBucketPolicyFunc.
|
||||
func (mock *BackendMock) GetBucketPolicy(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
if mock.GetBucketPolicyFunc == nil {
|
||||
@@ -1332,23 +1446,21 @@ func (mock *BackendMock) GetBucketVersioningCalls() []struct {
|
||||
}
|
||||
|
||||
// GetObject calls GetObjectFunc.
|
||||
func (mock *BackendMock) GetObject(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (mock *BackendMock) GetObject(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
if mock.GetObjectFunc == nil {
|
||||
panic("BackendMock.GetObjectFunc: method is nil but Backend.GetObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
GetObjectInput *s3.GetObjectInput
|
||||
Writer io.Writer
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
GetObjectInput: getObjectInput,
|
||||
Writer: writer,
|
||||
}
|
||||
mock.lockGetObject.Lock()
|
||||
mock.calls.GetObject = append(mock.calls.GetObject, callInfo)
|
||||
mock.lockGetObject.Unlock()
|
||||
return mock.GetObjectFunc(contextMoqParam, getObjectInput, writer)
|
||||
return mock.GetObjectFunc(contextMoqParam, getObjectInput)
|
||||
}
|
||||
|
||||
// GetObjectCalls gets all the calls that were made to GetObject.
|
||||
@@ -1358,12 +1470,10 @@ func (mock *BackendMock) GetObject(contextMoqParam context.Context, getObjectInp
|
||||
func (mock *BackendMock) GetObjectCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
GetObjectInput *s3.GetObjectInput
|
||||
Writer io.Writer
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
GetObjectInput *s3.GetObjectInput
|
||||
Writer io.Writer
|
||||
}
|
||||
mock.lockGetObject.RLock()
|
||||
calls = mock.calls.GetObject
|
||||
@@ -1971,6 +2081,46 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// PutBucketOwnershipControls calls PutBucketOwnershipControlsFunc.
|
||||
func (mock *BackendMock) PutBucketOwnershipControls(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
if mock.PutBucketOwnershipControlsFunc == nil {
|
||||
panic("BackendMock.PutBucketOwnershipControlsFunc: method is nil but Backend.PutBucketOwnershipControls was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Ownership types.ObjectOwnership
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Ownership: ownership,
|
||||
}
|
||||
mock.lockPutBucketOwnershipControls.Lock()
|
||||
mock.calls.PutBucketOwnershipControls = append(mock.calls.PutBucketOwnershipControls, callInfo)
|
||||
mock.lockPutBucketOwnershipControls.Unlock()
|
||||
return mock.PutBucketOwnershipControlsFunc(contextMoqParam, bucket, ownership)
|
||||
}
|
||||
|
||||
// PutBucketOwnershipControlsCalls gets all the calls that were made to PutBucketOwnershipControls.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.PutBucketOwnershipControlsCalls())
|
||||
func (mock *BackendMock) PutBucketOwnershipControlsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Ownership types.ObjectOwnership
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Ownership types.ObjectOwnership
|
||||
}
|
||||
mock.lockPutBucketOwnershipControls.RLock()
|
||||
calls = mock.calls.PutBucketOwnershipControls
|
||||
mock.lockPutBucketOwnershipControls.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// PutBucketPolicy calls PutBucketPolicyFunc.
|
||||
func (mock *BackendMock) PutBucketPolicy(contextMoqParam context.Context, bucket string, policy []byte) error {
|
||||
if mock.PutBucketPolicyFunc == nil {
|
||||
|
||||
@@ -50,7 +50,8 @@ type S3ApiController struct {
|
||||
}
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
iso8601Format = "20060102T150405Z"
|
||||
defaultContentType = "binary/octet-stream"
|
||||
)
|
||||
|
||||
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm *metrics.Manager, debug bool, readonly bool) S3ApiController {
|
||||
@@ -91,6 +92,10 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
path := ctx.Path()
|
||||
if path[len(path)-1:] == "/" && key[len(key)-1:] != "/" {
|
||||
key = key + "/"
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("tagging") {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
@@ -402,7 +407,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
Key: &key,
|
||||
Range: &acceptRange,
|
||||
VersionId: &versionId,
|
||||
}, ctx.Response().BodyWriter())
|
||||
})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -412,15 +417,6 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
if res == nil {
|
||||
return SendResponse(ctx, fmt.Errorf("get object nil response"),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionGetObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
utils.SetMetaHeaders(ctx, res.Metadata)
|
||||
var lastmod string
|
||||
@@ -428,14 +424,15 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
lastmod = res.LastModified.Format(timefmt)
|
||||
}
|
||||
|
||||
contentType := getstring(res.ContentType)
|
||||
if contentType == "" {
|
||||
contentType = defaultContentType
|
||||
}
|
||||
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "Content-Length",
|
||||
Value: fmt.Sprint(getint64(res.ContentLength)),
|
||||
},
|
||||
{
|
||||
Key: "Content-Type",
|
||||
Value: getstring(res.ContentType),
|
||||
Value: contentType,
|
||||
},
|
||||
{
|
||||
Key: "Content-Encoding",
|
||||
@@ -477,6 +474,10 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
status = http.StatusPartialContent
|
||||
}
|
||||
|
||||
if res.Body != nil {
|
||||
ctx.Response().SetBodyStream(res.Body, int(getint64(res.ContentLength)))
|
||||
}
|
||||
|
||||
return SendResponse(ctx, nil,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -566,6 +567,43 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("ownershipControls") {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Action: auth.GetBucketOwnershipControlsAction,
|
||||
})
|
||||
if err != nil {
|
||||
return SendXMLResponse(ctx, nil, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionGetBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
data, err := c.be.GetBucketOwnershipControls(ctx.Context(), bucket)
|
||||
return SendXMLResponse(ctx,
|
||||
s3response.OwnershipControls{
|
||||
Rules: []types.OwnershipControlsRule{
|
||||
{
|
||||
ObjectOwnership: data,
|
||||
},
|
||||
},
|
||||
}, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionGetBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("versioning") {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
@@ -933,6 +971,9 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
grantReadACP := ctx.Get("X-Amz-Grant-Read-Acp")
|
||||
granWrite := ctx.Get("X-Amz-Grant-Write")
|
||||
grantWriteACP := ctx.Get("X-Amz-Grant-Write-Acp")
|
||||
objectOwnership := types.ObjectOwnership(
|
||||
ctx.Get("X-Amz-Object-Ownership", string(types.ObjectOwnershipBucketOwnerEnforced)),
|
||||
)
|
||||
mfa := ctx.Get("X-Amz-Mfa")
|
||||
contentMD5 := ctx.Get("Content-MD5")
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
@@ -1000,6 +1041,57 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("ownershipControls") {
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
var ownershipControls s3response.OwnershipControls
|
||||
if err := xml.Unmarshal(ctx.Body(), &ownershipControls); err != nil {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
if len(ownershipControls.Rules) != 1 || !utils.IsValidOwnership(ownershipControls.Rules[0].ObjectOwnership) {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Action: auth.PutBucketOwnershipControlsAction,
|
||||
}); err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
err := c.be.PutBucketOwnershipControls(ctx.Context(), bucket, ownershipControls.Rules[0].ObjectOwnership)
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("versioning") {
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
@@ -1141,10 +1233,33 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
grants := grantFullControl + grantRead + grantReadACP + granWrite + grantWriteACP
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
var input *s3.PutBucketAclInput
|
||||
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
var input *auth.PutBucketAclInput
|
||||
|
||||
ownership, err := c.be.GetBucketOwnershipControls(ctx.Context(), bucket)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound)) {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketAcl,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
if ownership == types.ObjectOwnershipBucketOwnerEnforced {
|
||||
if c.debug {
|
||||
log.Println("bucket acls are disabled")
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAclNotSupported),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketAcl,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -1194,13 +1309,10 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
input = &s3.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
ACL: "",
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Owner: &accessControlPolicy.Owner,
|
||||
Grants: accessControlPolicy.AccessControlList.Grants,
|
||||
},
|
||||
input = &auth.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
ACL: "",
|
||||
AccessControlPolicy: &accessControlPolicy,
|
||||
}
|
||||
}
|
||||
if acl != "" {
|
||||
@@ -1232,26 +1344,26 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
input = &s3.PutBucketAclInput{
|
||||
input = &auth.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
ACL: types.BucketCannedACL(acl),
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Owner: &types.Owner{
|
||||
AccessControlPolicy: &auth.AccessControlPolicy{
|
||||
Owner: types.Owner{
|
||||
ID: &acct.Access,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
if grants != "" {
|
||||
input = &s3.PutBucketAclInput{
|
||||
input = &auth.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
GrantFullControl: &grantFullControl,
|
||||
GrantRead: &grantRead,
|
||||
GrantReadACP: &grantReadACP,
|
||||
GrantWrite: &granWrite,
|
||||
GrantWriteACP: &grantWriteACP,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Owner: &types.Owner{
|
||||
AccessControlPolicy: &auth.AccessControlPolicy{
|
||||
Owner: types.Owner{
|
||||
ID: &acct.Access,
|
||||
},
|
||||
},
|
||||
@@ -1259,8 +1371,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println(*input, parsedAcl)
|
||||
updAcl, err := auth.UpdateACL(input, parsedAcl, c.iam)
|
||||
updAcl, err := auth.UpdateACL(input, parsedAcl, c.iam, acct.Role == auth.RoleAdmin)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -1289,16 +1400,45 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
Action: metrics.ActionCreateBucket,
|
||||
})
|
||||
}
|
||||
if ok := utils.IsValidOwnership(objectOwnership); !ok {
|
||||
if c.debug {
|
||||
log.Printf("invalid bucket object ownership: %v", objectOwnership)
|
||||
}
|
||||
return SendResponse(ctx, s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid x-amz-object-ownership header: %v", objectOwnership),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionCreateBucket,
|
||||
BucketOwner: acct.Access,
|
||||
})
|
||||
}
|
||||
|
||||
if acl+grants != "" && objectOwnership == types.ObjectOwnershipBucketOwnerEnforced {
|
||||
if c.debug {
|
||||
log.Printf("bucket acls are disabled for %v object ownership", objectOwnership)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketAclWithObjectOwnership),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionCreateBucket,
|
||||
BucketOwner: acct.Access,
|
||||
})
|
||||
}
|
||||
|
||||
if acl != "" && grants != "" {
|
||||
if c.debug {
|
||||
log.Printf("invalid request: %q (grants) %q (acl)", grants, acl)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrBothCannedAndHeaderGrants),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionPutBucketAcl,
|
||||
Action: metrics.ActionCreateBucket,
|
||||
BucketOwner: acct.Access,
|
||||
})
|
||||
}
|
||||
@@ -1307,17 +1447,18 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
Owner: acct.Access,
|
||||
}
|
||||
|
||||
updAcl, err := auth.UpdateACL(&s3.PutBucketAclInput{
|
||||
updAcl, err := auth.UpdateACL(&auth.PutBucketAclInput{
|
||||
GrantFullControl: &grantFullControl,
|
||||
GrantRead: &grantRead,
|
||||
GrantReadACP: &grantReadACP,
|
||||
GrantWrite: &granWrite,
|
||||
GrantWriteACP: &grantWriteACP,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{Owner: &types.Owner{
|
||||
ID: &acct.Access,
|
||||
}},
|
||||
AccessControlPolicy: &auth.AccessControlPolicy{
|
||||
Owner: types.Owner{
|
||||
ID: &acct.Access,
|
||||
}},
|
||||
ACL: types.BucketCannedACL(acl),
|
||||
}, defACL, c.iam)
|
||||
}, defACL, c.iam, acct.Role == auth.RoleAdmin)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -1334,7 +1475,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
err = c.be.CreateBucket(ctx.Context(), &s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: types.ObjectOwnership(acct.Access),
|
||||
ObjectOwnership: objectOwnership,
|
||||
ObjectLockEnabledForBucket: &lockEnabled,
|
||||
}, updAcl)
|
||||
return SendResponse(ctx, err,
|
||||
@@ -1479,7 +1620,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
bypassHdr := ctx.Get("X-Amz-Bypass-Governance-Retention")
|
||||
bypass := bypassHdr == "true"
|
||||
bypass := strings.EqualFold(bypassHdr, "true")
|
||||
if bypass {
|
||||
policy, err := c.be.GetBucketPolicy(ctx.Context(), bucket)
|
||||
if err != nil {
|
||||
@@ -1734,13 +1875,26 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
//TODO: This part will be changed when object acls are implemented
|
||||
|
||||
grants := []types.Grant{}
|
||||
for _, grt := range accessControlPolicy.AccessControlList.Grants {
|
||||
grants = append(grants, types.Grant{
|
||||
Grantee: &types.Grantee{
|
||||
ID: &grt.Grantee.ID,
|
||||
Type: grt.Grantee.Type,
|
||||
},
|
||||
Permission: grt.Permission,
|
||||
})
|
||||
}
|
||||
|
||||
input = &s3.PutObjectAclInput{
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
ACL: "",
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Owner: &accessControlPolicy.Owner,
|
||||
Grants: accessControlPolicy.AccessControlList.Grants,
|
||||
Grants: grants,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -2042,6 +2196,38 @@ func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("ownershipControls") {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Action: auth.PutBucketOwnershipControlsAction,
|
||||
})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionDeleteBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
err = c.be.DeleteBucketOwnershipControls(ctx.Context(), bucket)
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionDeleteBucketOwnershipControls,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
Status: http.StatusNoContent,
|
||||
})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("policy") {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
@@ -2113,7 +2299,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
bypass := ctx.Get("X-Amz-Bypass-Governance-Retention")
|
||||
bypassHdr := ctx.Get("X-Amz-Bypass-Governance-Retention")
|
||||
var dObj s3response.DeleteObjects
|
||||
|
||||
err := xml.Unmarshal(ctx.Body(), &dObj)
|
||||
@@ -2150,7 +2336,10 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, utils.ParseDeleteObjects(dObj.Objects), bypass == "true", c.be)
|
||||
// The AWS CLI sends 'True', while Go SDK sends 'true'
|
||||
bypass := strings.EqualFold(bypassHdr, "true")
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, utils.ParseDeleteObjects(dObj.Objects), bypass, c.be)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -2189,11 +2378,15 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
bypass := ctx.Get("X-Amz-Bypass-Governance-Retention")
|
||||
bypassHdr := ctx.Get("X-Amz-Bypass-Governance-Retention")
|
||||
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
path := ctx.Path()
|
||||
if path[len(path)-1:] == "/" && key[len(key)-1:] != "/" {
|
||||
key = key + "/"
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("tagging") {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
@@ -2294,7 +2487,10 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []string{key}, bypass == "true", c.be)
|
||||
// The AWS CLI sends 'True', while Go SDK sends 'true'
|
||||
bypass := strings.EqualFold(bypassHdr, "true")
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []string{key}, bypass, c.be)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
@@ -2389,6 +2585,10 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
path := ctx.Path()
|
||||
if path[len(path)-1:] == "/" && key[len(key)-1:] != "/" {
|
||||
key = key + "/"
|
||||
}
|
||||
|
||||
var partNumber *int32
|
||||
if ctx.Request().URI().QueryArgs().Has("partNumber") {
|
||||
@@ -2511,12 +2711,16 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
Value: getstring(res.ContentEncoding),
|
||||
})
|
||||
}
|
||||
if res.ContentType != nil {
|
||||
headers = append(headers, utils.CustomHeader{
|
||||
Key: "Content-Type",
|
||||
Value: getstring(res.ContentType),
|
||||
})
|
||||
|
||||
contentType := getstring(res.ContentType)
|
||||
if contentType == "" {
|
||||
contentType = defaultContentType
|
||||
}
|
||||
headers = append(headers, utils.CustomHeader{
|
||||
Key: "Content-Type",
|
||||
Value: contentType,
|
||||
})
|
||||
|
||||
utils.SetResponseHeaders(ctx, headers)
|
||||
|
||||
return SendResponse(ctx, nil,
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
@@ -191,7 +190,7 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
GetObjectAttributesFunc: func(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
|
||||
return s3response.GetObjectAttributesResult{}, nil
|
||||
},
|
||||
GetObjectFunc: func(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
|
||||
GetObjectFunc: func(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
return &s3.GetObjectOutput{
|
||||
Metadata: map[string]string{"hello": "world"},
|
||||
ContentType: getPtr("application/xml"),
|
||||
@@ -395,6 +394,9 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return objectLockResult, nil
|
||||
},
|
||||
GetBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
return types.ObjectOwnershipBucketOwnerEnforced, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -448,6 +450,15 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 404,
|
||||
},
|
||||
{
|
||||
name: "Get-bucket-ownership-control-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodGet, "/my-bucket?ownershipControls", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Get-bucket-tagging-success",
|
||||
app: app,
|
||||
@@ -562,7 +573,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
app := fiber.New()
|
||||
|
||||
// Mock valid acl
|
||||
acl := auth.ACL{Owner: "valid access", ACL: "public-read-write"}
|
||||
acl := auth.ACL{Owner: "valid access"}
|
||||
acldata, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to parse the params: %v", err.Error())
|
||||
@@ -636,6 +647,22 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
</ObjectLockConfiguration>
|
||||
`
|
||||
|
||||
ownershipBody := `
|
||||
<OwnershipControls xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Rule>
|
||||
<ObjectOwnership>BucketOwnerEnforced</ObjectOwnership>
|
||||
</Rule>
|
||||
</OwnershipControls>
|
||||
`
|
||||
|
||||
invalidOwnershipBody := `
|
||||
<OwnershipControls xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Rule>
|
||||
<ObjectOwnership>invalid_value</ObjectOwnership>
|
||||
</Rule>
|
||||
</OwnershipControls>
|
||||
`
|
||||
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
@@ -659,6 +686,12 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
PutObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string, config []byte) error {
|
||||
return nil
|
||||
},
|
||||
PutBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
return nil
|
||||
},
|
||||
GetBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
return types.ObjectOwnershipBucketOwnerPreferred, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
// Mock ctx.Locals
|
||||
@@ -691,6 +724,9 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
errAclBodyReq := httptest.NewRequest(http.MethodPut, "/my-bucket?acl", strings.NewReader(body))
|
||||
errAclBodyReq.Header.Set("X-Amz-Grant-Read", "hello")
|
||||
|
||||
invAclOwnershipReq := httptest.NewRequest(http.MethodPut, "/my-bucket", nil)
|
||||
invAclOwnershipReq.Header.Set("X-Amz-Grant-Read", "hello")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
@@ -716,6 +752,24 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Put-bucket-ownership-controls-invalid-ownership",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket?ownershipControls", strings.NewReader(invalidOwnershipBody)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Put-bucket-ownership-controls-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket?ownershipControls", strings.NewReader(ownershipBody)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Put-object-lock-configuration-invalid-body",
|
||||
app: app,
|
||||
@@ -816,7 +870,16 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Put-bucket-invalid-bucket-name",
|
||||
name: "Create-bucket-invalid-acl-ownership-combination",
|
||||
app: app,
|
||||
args: args{
|
||||
req: invAclOwnershipReq,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Create-bucket-invalid-bucket-name",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/aa", nil),
|
||||
@@ -825,7 +888,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Put-bucket-success",
|
||||
name: "Create-bucket-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket", nil),
|
||||
@@ -1160,6 +1223,12 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
return nil
|
||||
},
|
||||
DeleteBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
return nil
|
||||
},
|
||||
DeleteBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1198,6 +1267,23 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 204,
|
||||
},
|
||||
{
|
||||
name: "Delete-bucket-ownership-controls-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodDelete, "/my-bucket?ownershipControls", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 204,
|
||||
}, {
|
||||
name: "Delete-bucket-policy-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodDelete, "/my-bucket?policy", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 204,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
@@ -50,7 +50,8 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
|
||||
!ctx.Request().URI().QueryArgs().Has("tagging") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("versioning") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("policy") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("object-lock") {
|
||||
!ctx.Request().URI().QueryArgs().Has("object-lock") &&
|
||||
!ctx.Request().URI().QueryArgs().Has("ownershipControls") {
|
||||
if err := auth.MayCreateBucket(acct, isRoot); err != nil {
|
||||
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
|
||||
}
|
||||
|
||||
@@ -28,11 +28,11 @@ type S3ApiRouter struct {
|
||||
WithAdmSrv bool
|
||||
}
|
||||
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm *metrics.Manager, debug bool, readonly bool) {
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, aLogger s3log.AuditLogger, evs s3event.S3EventSender, mm *metrics.Manager, debug bool, readonly bool) {
|
||||
s3ApiController := controllers.New(be, iam, logger, evs, mm, debug, readonly)
|
||||
|
||||
if sa.WithAdmSrv {
|
||||
adminController := controllers.NewAdminController(iam, be)
|
||||
adminController := controllers.NewAdminController(iam, be, aLogger)
|
||||
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user", adminController.CreateUser)
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestS3ApiRouter_Init(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil, nil, false, false)
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil, nil, nil, false, false)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ func New(
|
||||
port, region string,
|
||||
iam auth.IAMService,
|
||||
l s3log.AuditLogger,
|
||||
adminLogger s3log.AuditLogger,
|
||||
evs s3event.S3EventSender,
|
||||
mm *metrics.Manager,
|
||||
opts ...Option,
|
||||
@@ -64,7 +65,9 @@ func New(
|
||||
|
||||
// Logging middlewares
|
||||
if !server.quiet {
|
||||
app.Use(logger.New())
|
||||
app.Use(logger.New(logger.Config{
|
||||
Format: "${time} | ${status} | ${latency} | ${ip} | ${method} | ${path} | ${error} | ${queryParams}\n",
|
||||
}))
|
||||
}
|
||||
// Set up health endpoint if specified
|
||||
if server.health != "" {
|
||||
@@ -82,7 +85,7 @@ func New(
|
||||
app.Use(middlewares.VerifyMD5Body(l))
|
||||
app.Use(middlewares.AclParser(be, l, server.readonly))
|
||||
|
||||
server.router.Init(app, be, iam, l, evs, mm, server.debug, server.readonly)
|
||||
server.router.Init(app, be, iam, l, adminLogger, evs, mm, server.debug, server.readonly)
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestNew(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.root,
|
||||
tt.args.port, "us-east-1", &auth.IAMServiceInternal{}, nil, nil, nil)
|
||||
tt.args.port, "us-east-1", &auth.IAMServiceInternal{}, nil, nil, nil, nil)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
@@ -326,3 +326,16 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
|
||||
LegalHoldStatus: legalHold,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func IsValidOwnership(val types.ObjectOwnership) bool {
|
||||
switch val {
|
||||
case types.ObjectOwnershipBucketOwnerEnforced:
|
||||
return true
|
||||
case types.ObjectOwnershipBucketOwnerPreferred:
|
||||
return true
|
||||
case types.ObjectOwnershipObjectWriter:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,3 +335,50 @@ func TestFilterObjectAttributes(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidOwnership(t *testing.T) {
|
||||
type args struct {
|
||||
val types.ObjectOwnership
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "valid-BucketOwnerEnforced",
|
||||
args: args{
|
||||
val: types.ObjectOwnershipBucketOwnerEnforced,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "valid-BucketOwnerPreferred",
|
||||
args: args{
|
||||
val: types.ObjectOwnershipBucketOwnerPreferred,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "valid-ObjectWriter",
|
||||
args: args{
|
||||
val: types.ObjectOwnershipObjectWriter,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "invalid_value",
|
||||
args: args{
|
||||
val: types.ObjectOwnership("invalid_value"),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := IsValidOwnership(tt.args.val); got != tt.want {
|
||||
t.Errorf("IsValidOwnership() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,6 +123,10 @@ const (
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrRequestTimeTooSkewed
|
||||
ErrInvalidBucketAclWithObjectOwnership
|
||||
ErrBothCannedAndHeaderGrants
|
||||
ErrOwnershipControlsNotFound
|
||||
ErrAclNotSupported
|
||||
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
@@ -472,6 +476,26 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The difference between the request time and the server's time is too large.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketAclWithObjectOwnership: {
|
||||
Code: "ErrInvalidBucketAclWithObjectOwnership",
|
||||
Description: "Bucket cannot have ACLs set with ObjectOwnership's BucketOwnerEnforced setting",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBothCannedAndHeaderGrants: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Specifying both Canned ACLs and Header Grants is not allowed",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrOwnershipControlsNotFound: {
|
||||
Code: "OwnershipControlsNotFoundError",
|
||||
Description: "The bucket ownership controls were not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAclNotSupported: {
|
||||
Code: "AccessControlListNotSupported",
|
||||
Description: "The bucket does not allow ACLs",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// non aws errors
|
||||
ErrExistingObjectIsDirectory: {
|
||||
|
||||
@@ -35,11 +35,13 @@ type LogMeta struct {
|
||||
BucketOwner string
|
||||
ObjectSize int64
|
||||
Action string
|
||||
HttpStatus int
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
LogFile string
|
||||
WebhookURL string
|
||||
LogFile string
|
||||
WebhookURL string
|
||||
AdminLogFile string
|
||||
}
|
||||
|
||||
type LogFields struct {
|
||||
@@ -71,18 +73,66 @@ type LogFields struct {
|
||||
AclRequired string
|
||||
}
|
||||
|
||||
func InitLogger(cfg *LogConfig) (AuditLogger, error) {
|
||||
type AdminLogFields struct {
|
||||
Time time.Time
|
||||
RemoteIP string
|
||||
Requester string
|
||||
RequestID string
|
||||
Operation string
|
||||
RequestURI string
|
||||
HttpStatus int
|
||||
ErrorCode string
|
||||
BytesSent int
|
||||
TotalTime int64
|
||||
TurnAroundTime int64
|
||||
Referer string
|
||||
UserAgent string
|
||||
SignatureVersion string
|
||||
CipherSuite string
|
||||
AuthenticationType string
|
||||
TLSVersion string
|
||||
}
|
||||
|
||||
type Loggers struct {
|
||||
S3Logger AuditLogger
|
||||
AdminLogger AuditLogger
|
||||
}
|
||||
|
||||
func InitLogger(cfg *LogConfig) (*Loggers, error) {
|
||||
if cfg.WebhookURL != "" && cfg.LogFile != "" {
|
||||
return nil, fmt.Errorf("there should be specified one of the following: file, webhook")
|
||||
}
|
||||
if cfg.WebhookURL != "" {
|
||||
return InitWebhookLogger(cfg.WebhookURL)
|
||||
}
|
||||
if cfg.LogFile != "" {
|
||||
return InitFileLogger(cfg.LogFile)
|
||||
loggers := new(Loggers)
|
||||
|
||||
switch {
|
||||
case cfg.WebhookURL != "":
|
||||
fmt.Printf("initializing S3 access logs with '%v' webhook url\n", cfg.WebhookURL)
|
||||
l, err := InitWebhookLogger(cfg.WebhookURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loggers.S3Logger = l
|
||||
case cfg.LogFile != "":
|
||||
fmt.Printf("initializing S3 access logs with '%v' file\n", cfg.LogFile)
|
||||
l, err := InitFileLogger(cfg.LogFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loggers.S3Logger = l
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
if cfg.AdminLogFile != "" {
|
||||
fmt.Printf("initializing admin access logs with '%v' file\n", cfg.AdminLogFile)
|
||||
l, err := InitAdminFileLogger(cfg.AdminLogFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loggers.AdminLogger = l
|
||||
}
|
||||
|
||||
return loggers, nil
|
||||
}
|
||||
|
||||
func genID() string {
|
||||
|
||||
151
s3log/file_admin.go
Normal file
151
s3log/file_admin.go
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3log
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
)
|
||||
|
||||
// FileLogger is a local file audit log
|
||||
type AdminFileLogger struct {
|
||||
FileLogger
|
||||
}
|
||||
|
||||
var _ AuditLogger = &AdminFileLogger{}
|
||||
|
||||
// InitFileLogger initializes audit logs to local file
|
||||
func InitAdminFileLogger(logname string) (AuditLogger, error) {
|
||||
f, err := os.OpenFile(logname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open log: %w", err)
|
||||
}
|
||||
|
||||
f.WriteString(fmt.Sprintf("log starts %v\n", time.Now()))
|
||||
|
||||
return &AdminFileLogger{FileLogger: FileLogger{logfile: logname, f: f}}, nil
|
||||
}
|
||||
|
||||
// Log sends log message to file logger
|
||||
func (f *AdminFileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if f.gotErr {
|
||||
return
|
||||
}
|
||||
|
||||
lf := AdminLogFields{}
|
||||
|
||||
access := "-"
|
||||
reqURI := ctx.OriginalURL()
|
||||
errorCode := ""
|
||||
startTime := ctx.Locals("startTime").(time.Time)
|
||||
tlsConnState := ctx.Context().TLSConnectionState()
|
||||
if tlsConnState != nil {
|
||||
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
|
||||
lf.TLSVersion = getTLSVersionName(tlsConnState.Version)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errorCode = err.Error()
|
||||
}
|
||||
|
||||
switch ctx.Locals("account").(type) {
|
||||
case auth.Account:
|
||||
access = ctx.Locals("account").(auth.Account).Access
|
||||
}
|
||||
|
||||
lf.Time = time.Now()
|
||||
lf.RemoteIP = ctx.IP()
|
||||
lf.Requester = access
|
||||
lf.RequestID = genID()
|
||||
lf.Operation = meta.Action
|
||||
lf.RequestURI = reqURI
|
||||
lf.HttpStatus = meta.HttpStatus
|
||||
lf.ErrorCode = errorCode
|
||||
lf.BytesSent = len(body)
|
||||
lf.TotalTime = time.Since(startTime).Milliseconds()
|
||||
lf.TurnAroundTime = time.Since(startTime).Milliseconds()
|
||||
lf.Referer = ctx.Get("Referer")
|
||||
lf.UserAgent = ctx.Get("User-Agent")
|
||||
lf.SignatureVersion = "SigV4"
|
||||
lf.AuthenticationType = "AuthHeader"
|
||||
|
||||
f.writeLog(lf)
|
||||
}
|
||||
|
||||
func (f *AdminFileLogger) writeLog(lf AdminLogFields) {
|
||||
if lf.RemoteIP == "" {
|
||||
lf.RemoteIP = "-"
|
||||
}
|
||||
if lf.Requester == "" {
|
||||
lf.Requester = "-"
|
||||
}
|
||||
if lf.Operation == "" {
|
||||
lf.Operation = "-"
|
||||
}
|
||||
if lf.RequestURI == "" {
|
||||
lf.RequestURI = "-"
|
||||
}
|
||||
if lf.ErrorCode == "" {
|
||||
lf.ErrorCode = "-"
|
||||
}
|
||||
if lf.Referer == "" {
|
||||
lf.Referer = "-"
|
||||
}
|
||||
if lf.UserAgent == "" {
|
||||
lf.UserAgent = "-"
|
||||
}
|
||||
if lf.CipherSuite == "" {
|
||||
lf.CipherSuite = "-"
|
||||
}
|
||||
if lf.TLSVersion == "" {
|
||||
lf.TLSVersion = "-"
|
||||
}
|
||||
|
||||
log := fmt.Sprintf("%v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v\n",
|
||||
fmt.Sprintf("[%v]", lf.Time.Format(timeFormat)),
|
||||
lf.RemoteIP,
|
||||
lf.Requester,
|
||||
lf.RequestID,
|
||||
lf.Operation,
|
||||
lf.RequestURI,
|
||||
lf.HttpStatus,
|
||||
lf.ErrorCode,
|
||||
lf.BytesSent,
|
||||
lf.TotalTime,
|
||||
lf.TurnAroundTime,
|
||||
lf.Referer,
|
||||
lf.UserAgent,
|
||||
lf.SignatureVersion,
|
||||
lf.CipherSuite,
|
||||
lf.AuthenticationType,
|
||||
lf.TLSVersion,
|
||||
)
|
||||
|
||||
_, err := f.f.WriteString(log)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error writing to log file: %v\n", err)
|
||||
// TODO: do we need to terminate on log error?
|
||||
// set err for now so that we don't spew errors
|
||||
f.gotErr = true
|
||||
}
|
||||
}
|
||||
@@ -217,3 +217,7 @@ type Grantee struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
type OwnershipControls struct {
|
||||
Rules []types.OwnershipControlsRule `xml:"Rule"`
|
||||
}
|
||||
|
||||
@@ -1,14 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
abort_multipart_upload() {
|
||||
record_command "abort-multipart-upload" "client:s3api"
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "command to run abort requires bucket, key, upload ID"
|
||||
log 2 "'abort multipart upload' command requires bucket, key, upload ID"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
|
||||
log 2 "Error aborting upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3") || local aborted=$?
|
||||
if [[ $aborted -ne 0 ]]; then
|
||||
echo "Error aborting upload: $error"
|
||||
abort_multipart_upload_with_user() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "'abort multipart upload' command requires bucket, key, upload ID, username, password"
|
||||
return 1
|
||||
fi
|
||||
record_command "abort-multipart-upload" "client:s3api"
|
||||
if ! abort_multipart_upload_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
|
||||
log 2 "Error aborting upload: $abort_multipart_upload_error"
|
||||
export abort_multipart_upload_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
@@ -6,6 +6,7 @@ complete_multipart_upload() {
|
||||
return 1
|
||||
fi
|
||||
log 5 "complete multipart upload id: $3, parts: $4"
|
||||
record_command "complete-multipart-upload" "client:s3api"
|
||||
error=$(aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" --multipart-upload '{"Parts": '"$4"'}' 2>&1) || local completed=$?
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
log 2 "error completing multipart upload: $error"
|
||||
|
||||
@@ -7,6 +7,7 @@ copy_object() {
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
record_command "copy-object" "client:$1"
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 cp "$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
@@ -29,6 +30,7 @@ copy_object() {
|
||||
}
|
||||
|
||||
copy_object_empty() {
|
||||
record-command "copy-object" "client:s3api"
|
||||
error=$(aws --no-verify-ssl s3api copy-object 2>&1) || local result=$?
|
||||
if [[ $result -eq 0 ]]; then
|
||||
log 2 "copy object with empty parameters returned no error"
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/report.sh
|
||||
|
||||
# create an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
@@ -9,6 +11,7 @@ create_bucket() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
record_command "create-bucket" "client:$1"
|
||||
local exit_code=0
|
||||
local error
|
||||
log 6 "create bucket"
|
||||
@@ -32,7 +35,32 @@ create_bucket() {
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_with_user() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "create bucket missing command type, bucket name, access, secret"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]] || [[ $1 == "s3api" ]]; then
|
||||
error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb --access_key="$3" --secret_key="$4" s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "error creating bucket: $error"
|
||||
export error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_object_lock_enabled() {
|
||||
record_command "create-bucket" "client:s3api"
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "create bucket missing bucket name"
|
||||
return 1
|
||||
|
||||
@@ -4,24 +4,49 @@
|
||||
# params: bucket, key
|
||||
# return 0 for success, 1 for failure
|
||||
create_multipart_upload() {
|
||||
record_command "create-multipart-upload" "client:s3api"
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "create multipart upload function must have bucket, key"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local multipart_data
|
||||
multipart_data=$(aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2") || local created=$?
|
||||
if [[ $created -ne 0 ]]; then
|
||||
log 2 "Error creating multipart upload: $upload_id"
|
||||
if ! multipart_data=$(aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
|
||||
log 2 "Error creating multipart upload: $multipart_data"
|
||||
return 1
|
||||
fi
|
||||
|
||||
upload_id=$(echo "$multipart_data" | jq '.UploadId')
|
||||
if ! upload_id=$(echo "$multipart_data" | grep -v "InsecureRequestWarning" | jq -r '.UploadId' 2>&1); then
|
||||
log 2 "error parsing upload ID: $upload_id"
|
||||
return 1
|
||||
fi
|
||||
upload_id="${upload_id//\"/}"
|
||||
export upload_id
|
||||
return 0
|
||||
}
|
||||
|
||||
create_multipart_upload_with_user() {
|
||||
record_command "create-multipart-upload" "client:s3api"
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "create multipart upload function must have bucket, key, username, password"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! multipart_data=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
|
||||
log 2 "Error creating multipart upload: $multipart_data"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! upload_id=$(echo "$multipart_data" | grep -v "InsecureRequestWarning" | jq -r '.UploadId' 2>&1); then
|
||||
log 2 "error parsing upload ID: $upload_id"
|
||||
return 1
|
||||
fi
|
||||
upload_id="${upload_id//\"/}"
|
||||
export upload_id
|
||||
return 0
|
||||
}
|
||||
|
||||
create_multipart_upload_params() {
|
||||
record_command "create-multipart-upload" "client:s3api"
|
||||
if [ $# -ne 8 ]; then
|
||||
log 2 "create multipart upload function with params must have bucket, key, content type, metadata, object lock legal hold status, " \
|
||||
"object lock mode, object lock retain until date, and tagging"
|
||||
@@ -49,6 +74,7 @@ create_multipart_upload_params() {
|
||||
}
|
||||
|
||||
create_multipart_upload_custom() {
|
||||
record_command "create-multipart-upload" "client:s3api"
|
||||
if [ $# -lt 2 ]; then
|
||||
log 2 "create multipart upload custom function must have at least bucket and key"
|
||||
return 1
|
||||
|
||||
@@ -4,11 +4,17 @@
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_bucket() {
|
||||
record_command "delete-bucket" "client:$1"
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "delete bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ ( $RECREATE_BUCKETS == "false" ) && (( "$2" == "$BUCKET_ONE_NAME" ) || ( "$2" == "$BUCKET_TWO_NAME" )) ]]; then
|
||||
log 2 "attempt to delete main buckets in static mode"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_bucket_policy() {
|
||||
record_command "delete-bucket-policy" "client:$1"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "delete bucket policy command requires command type, bucket"
|
||||
return 1
|
||||
fi
|
||||
local delete_result=0
|
||||
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2" 2>&1) || delete_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
@@ -20,4 +22,18 @@ delete_bucket_policy() {
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_bucket_policy_with_user() {
|
||||
record_command "delete-bucket-policy" "client:s3api"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "'delete bucket policy with user' command requires bucket, username, password"
|
||||
return 1
|
||||
fi
|
||||
if ! delete_bucket_policy_error=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3api delete-bucket-policy --bucket "$1" 2>&1); then
|
||||
log 2 "error deleting bucket policy: $delete_bucket_policy_error"
|
||||
export delete_bucket_policy_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
23
tests/commands/delete_bucket_tagging.sh
Normal file
23
tests/commands/delete_bucket_tagging.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_bucket_tagging() {
|
||||
record_command "delete-bucket-tagging" "client:$1"
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "delete bucket tagging command missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
local result
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tags=$(aws --no-verify-ssl s3api delete-bucket-tagging --bucket "$2" 2>&1) || result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
tags=$(mc --insecure tag remove "$MC_ALIAS"/"$2" 2>&1) || result=$?
|
||||
else
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [[ $result -ne 0 ]]; then
|
||||
log 2 "error deleting bucket tagging: $tags"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_object() {
|
||||
record_command "delete-object" "client:$1"
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "delete object command requires command type, bucket, key"
|
||||
return 1
|
||||
@@ -27,7 +28,20 @@ delete_object() {
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_object_bypass_retention() {
|
||||
if [[ $# -ne 4 ]]; then
|
||||
log 2 "'delete-object with bypass retention' requires bucket, key, user, password"
|
||||
return 1
|
||||
fi
|
||||
if ! delete_object_error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --bypass-governance-retention 2>&1); then
|
||||
log 2 "error deleting object with bypass retention: $delete_object_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_object_with_user() {
|
||||
record_command "delete-object" "client:$1"
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "delete object with user command requires command type, bucket, key, access ID, secret key"
|
||||
return 1
|
||||
@@ -36,7 +50,7 @@ delete_object_with_user() {
|
||||
if [[ $1 == 's3' ]]; then
|
||||
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" --bypass-governance-retention 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
delete_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
else
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_object_tagging() {
|
||||
record_command "delete-object-tagging" "client:$1"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "delete object tagging command missing command type, bucket, key"
|
||||
return 1
|
||||
|
||||
19
tests/commands/delete_objects.sh
Normal file
19
tests/commands/delete_objects.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
delete_objects() {
|
||||
record_command "delete-objects" "client:s3api"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "'delete-objects' command requires bucket name, two object keys"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(aws --no-verify-ssl s3api delete-objects --bucket "$1" --delete "{
|
||||
\"Objects\": [
|
||||
{\"Key\": \"$2\"},
|
||||
{\"Key\": \"$3\"}
|
||||
]
|
||||
}" 2>&1); then
|
||||
log 2 "error deleting objects: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_bucket_acl() {
|
||||
record_command "get-bucket-acl" "client:$1"
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "bucket ACL command missing command type, bucket name"
|
||||
return 1
|
||||
@@ -18,5 +19,20 @@ get_bucket_acl() {
|
||||
log 2 "Error getting bucket ACLs: $acl"
|
||||
return 1
|
||||
fi
|
||||
acl=$(echo "$acl" | grep -v "InsecureRequestWarning")
|
||||
export acl
|
||||
}
|
||||
|
||||
get_bucket_acl_with_user() {
|
||||
record_command "get-bucket-acl" "client:s3api"
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'get bucket ACL with user' command requires bucket name, username, password"
|
||||
return 1
|
||||
fi
|
||||
if ! bucket_acl=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3api get-bucket-acl --bucket "$1" 2>&1); then
|
||||
log 2 "error getting bucket ACLs: $bucket_acl"
|
||||
return 1
|
||||
fi
|
||||
export bucket_acl
|
||||
return 0
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_bucket_location() {
|
||||
record_command "get-bucket-location" "client:$1"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "get bucket location command requires command type, bucket name"
|
||||
return 1
|
||||
@@ -23,6 +24,7 @@ get_bucket_location() {
|
||||
}
|
||||
|
||||
get_bucket_location_aws() {
|
||||
record_command "get-bucket-location" "client:s3api"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "get bucket location (aws) requires bucket name"
|
||||
return 1
|
||||
@@ -38,6 +40,7 @@ get_bucket_location_aws() {
|
||||
}
|
||||
|
||||
get_bucket_location_s3cmd() {
|
||||
record_command "get-bucket-location" "client:s3cmd"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "get bucket location (s3cmd) requires bucket name"
|
||||
return 1
|
||||
@@ -53,6 +56,7 @@ get_bucket_location_s3cmd() {
|
||||
}
|
||||
|
||||
get_bucket_location_mc() {
|
||||
record_command "get-bucket-location" "client:mc"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "get bucket location (mc) requires bucket name"
|
||||
return 1
|
||||
|
||||
37
tests/commands/get_bucket_ownership_controls.sh
Normal file
37
tests/commands/get_bucket_ownership_controls.sh
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_bucket_ownership_controls() {
|
||||
record_command "get-bucket-ownership-controls" "client:s3api"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'get bucket ownership controls' command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! raw_bucket_ownership_controls=$(aws --no-verify-ssl s3api get-bucket-ownership-controls --bucket "$1" 2>&1); then
|
||||
log 2 "error getting bucket ownership controls: $raw_bucket_ownership_controls"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 5 "Raw bucket Ownership Controls: $raw_bucket_ownership_controls"
|
||||
bucket_ownership_controls=$(echo "$raw_bucket_ownership_controls" | grep -v "InsecureRequestWarning")
|
||||
export bucket_ownership_controls
|
||||
return 0
|
||||
}
|
||||
|
||||
get_object_ownership_rule() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'get object ownership rule' command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_ownership_controls "$1"; then
|
||||
log 2 "error getting bucket ownership controls"
|
||||
return 1
|
||||
fi
|
||||
if ! object_ownership_rule=$(echo "$bucket_ownership_controls" | jq -r ".OwnershipControls.Rules[0].ObjectOwnership" 2>&1); then
|
||||
log 2 "error getting object ownership rule: $object_ownership_rule"
|
||||
return 1
|
||||
fi
|
||||
log 5 "object ownership rule: $object_ownership_rule"
|
||||
export object_ownership_rule
|
||||
return 0
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_bucket_policy() {
|
||||
record_command "get-bucket-policy" "client:$1"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "get bucket policy command requires command type, bucket"
|
||||
return 1
|
||||
@@ -25,6 +26,7 @@ get_bucket_policy() {
|
||||
}
|
||||
|
||||
get_bucket_policy_aws() {
|
||||
record_command "get-bucket-policy" "client:s3api"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "aws 'get bucket policy' command requires bucket"
|
||||
return 1
|
||||
@@ -46,29 +48,58 @@ get_bucket_policy_aws() {
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_policy_with_user() {
|
||||
record_command "get-bucket-policy" "client:s3api"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "'get bucket policy with user' command requires bucket, username, password"
|
||||
return 1
|
||||
fi
|
||||
if policy_json=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1); then
|
||||
policy_json=$(echo "$policy_json" | grep -v "InsecureRequestWarning")
|
||||
bucket_policy=$(echo "$policy_json" | jq -r '.Policy')
|
||||
else
|
||||
if [[ "$policy_json" == *"(NoSuchBucketPolicy)"* ]]; then
|
||||
bucket_policy=
|
||||
else
|
||||
log 2 "error getting policy for user $2: $policy_json"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
export bucket_policy
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_policy_s3cmd() {
|
||||
record_command "get-bucket-policy" "client:s3cmd"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "s3cmd 'get bucket policy' command requires bucket"
|
||||
log 2 "s3cmd 'get bucket policy' command requires bucket"
|
||||
return 1
|
||||
fi
|
||||
|
||||
info=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$1") || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
echo "error getting bucket policy: $info"
|
||||
if ! info=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$1" 2>&1); then
|
||||
log 2 "error getting bucket policy: $info"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 5 "policy info: $info"
|
||||
bucket_policy=""
|
||||
policy_brackets=false
|
||||
# NOTE: versitygw sends policies back in multiple lines here, direct in single line
|
||||
while IFS= read -r line; do
|
||||
if [[ $policy_brackets == false ]]; then
|
||||
policy_line=$(echo "$line" | grep 'Policy: ')
|
||||
if [[ $policy_line != "" ]]; then
|
||||
if [[ $policy_line != *'{' ]]; then
|
||||
if [[ $policy_line != *'{'* ]]; then
|
||||
break
|
||||
fi
|
||||
policy_brackets=true
|
||||
bucket_policy+="{"
|
||||
if [[ $policy_line == *'}'* ]]; then
|
||||
log 5 "policy on single line"
|
||||
bucket_policy=${policy_line//Policy:/}
|
||||
break
|
||||
else
|
||||
policy_brackets=true
|
||||
bucket_policy+="{"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
bucket_policy+=$line
|
||||
@@ -77,11 +108,13 @@ get_bucket_policy_s3cmd() {
|
||||
fi
|
||||
fi
|
||||
done <<< "$info"
|
||||
log 5 "bucket policy: $bucket_policy"
|
||||
export bucket_policy
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_policy_mc() {
|
||||
record_command "get-bucket-policy" "client:mc"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "aws 'get bucket policy' command requires bucket"
|
||||
return 1
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# params: bucket
|
||||
# export 'tags' on success, return 1 for error
|
||||
get_bucket_tagging() {
|
||||
record_command "get-bucket-tagging" "client:$1"
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "get bucket tag command missing command type, bucket name"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_bucket_versioning() {
|
||||
record_command "get-bucket-versioning" "client:s3api"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "put bucket versioning command requires command type, bucket name"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_object() {
|
||||
record_command "get-object" "client:$1"
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "get object command requires command type, bucket, key, destination"
|
||||
return 1
|
||||
@@ -8,26 +9,28 @@ get_object() {
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mv "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
get_object_error=$(aws --no-verify-ssl s3 mv "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
|
||||
get_object_error=$(aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
get_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
get_object_error=$(mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "'get object' command not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
log 5 "get object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "error getting object: $error"
|
||||
log 2 "error getting object: $get_object_error"
|
||||
export get_object_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_object_with_range() {
|
||||
record_command "get-object" "client:s3api"
|
||||
if [[ $# -ne 4 ]]; then
|
||||
log 2 "'get object with range' requires bucket, key, range, outfile"
|
||||
return 1
|
||||
@@ -41,6 +44,7 @@ get_object_with_range() {
|
||||
}
|
||||
|
||||
get_object_with_user() {
|
||||
record_command "get-object" "client:$1"
|
||||
if [ $# -ne 6 ]; then
|
||||
log 2 "'get object with user' command requires command type, bucket, key, save location, aws ID, aws secret key"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_object_attributes() {
|
||||
record_command "get-object-attributes" "client:s3api"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "'get object attributes' command requires bucket, key"
|
||||
return 1
|
||||
|
||||
@@ -5,6 +5,7 @@ get_object_legal_hold() {
|
||||
log 2 "'get object legal hold' command requires bucket, key"
|
||||
return 1
|
||||
fi
|
||||
record_command "get-object-legal-hold" "client:s3api"
|
||||
legal_hold=$(aws --no-verify-ssl s3api get-object-legal-hold --bucket "$1" --key "$2" 2>&1) || local get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
log 2 "error getting object legal hold: $legal_hold"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_object_lock_configuration() {
|
||||
record_command "get-object-lock-configuration" "client:s3api"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'get object lock configuration' command missing bucket name"
|
||||
return 1
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_object_retention() {
|
||||
record_command "get-object-retention" "client:s3api"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "'get object retention' command requires bucket, key"
|
||||
return 1
|
||||
fi
|
||||
retention=$(aws --no-verify-ssl s3api get-object-retention --bucket "$1" --key "$2" 2>&1) || local get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
if ! retention=$(aws --no-verify-ssl s3api get-object-retention --bucket "$1" --key "$2" 2>&1); then
|
||||
log 2 "error getting object retention: $retention"
|
||||
get_object_retention_error=$retention
|
||||
export get_object_retention_error
|
||||
return 1
|
||||
fi
|
||||
export retention
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_object_tagging() {
|
||||
record_command "get-object-tagging" "client:$1"
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "get object tag command missing command type, bucket, and/or key"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/report.sh
|
||||
|
||||
head_bucket() {
|
||||
record_command "head-bucket" "client:$1"
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "head bucket command missing command type, bucket name"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
head_object() {
|
||||
record_command "head-object" "client:$1"
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "head-object missing command, bucket name, object name"
|
||||
return 2
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
list_buckets() {
|
||||
record_command "list-buckets" "client:$1"
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "list buckets command missing command type"
|
||||
return 1
|
||||
@@ -10,7 +11,7 @@ list_buckets() {
|
||||
if [[ $1 == 's3' ]]; then
|
||||
buckets=$(aws --no-verify-ssl s3 ls 2>&1 s3://) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
list_buckets_s3api || exit_code=$?
|
||||
list_buckets_s3api "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
buckets=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3:// 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
@@ -37,12 +38,54 @@ list_buckets() {
|
||||
return 0
|
||||
}
|
||||
|
||||
list_buckets_with_user() {
|
||||
record_command "list-buckets" "client:$1"
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "'list buckets as user' command missing command type, username, password"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
if [[ $1 == 's3' ]]; then
|
||||
buckets=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3 ls 2>&1 s3://) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
list_buckets_s3api "$2" "$3" || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
buckets=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --access_key="$2" --secret_key="$3" ls s3:// 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
buckets=$(mc --insecure ls "$MC_ALIAS" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "list buckets command not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing buckets: $buckets"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
bucket_array=()
|
||||
while IFS= read -r line; do
|
||||
bucket_name=$(echo "$line" | awk '{print $NF}')
|
||||
bucket_array+=("${bucket_name%/}")
|
||||
done <<< "$buckets"
|
||||
export bucket_array
|
||||
return 0
|
||||
}
|
||||
|
||||
list_buckets_s3api() {
|
||||
output=$(aws --no-verify-ssl s3api list-buckets 2>&1) || exit_code=$?
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "'list_buckets_s3api' requires username, password"
|
||||
return 1
|
||||
fi
|
||||
if ! output=$(AWS_ACCESS_KEY_ID="$1" AWS_SECRET_ACCESS_KEY="$2" aws --no-verify-ssl s3api list-buckets 2>&1); then
|
||||
echo "error listing buckets: $output"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket data: $output"
|
||||
|
||||
modified_output=""
|
||||
while IFS= read -r line; do
|
||||
|
||||
29
tests/commands/list_multipart_uploads.sh
Normal file
29
tests/commands/list_multipart_uploads.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
list_multipart_uploads() {
|
||||
record_command "list-multipart-uploads" "client:s3api"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'list multipart uploads' command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! uploads=$(aws --no-verify-ssl s3api list-multipart-uploads --bucket "$1" 2>&1); then
|
||||
log 2 "error listing uploads: $uploads"
|
||||
return 1
|
||||
fi
|
||||
export uploads
|
||||
}
|
||||
|
||||
list_multipart_uploads_with_user() {
|
||||
record_command "list-multipart-uploads" "client:s3api"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "'list multipart uploads' command requires bucket name, username, password"
|
||||
return 1
|
||||
fi
|
||||
if ! uploads=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3api list-multipart-uploads --bucket "$1" 2>&1); then
|
||||
log 2 "error listing uploads: $uploads"
|
||||
list_multipart_uploads_error=$uploads
|
||||
export list_multipart_uploads_error
|
||||
return 1
|
||||
fi
|
||||
export uploads
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
list_object_versions() {
|
||||
record_command "list-object-versions" "client:s3api"
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'list object versions' command requires bucket name"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
list_objects() {
|
||||
record_command "list-objects" "client:$1"
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "list objects command requires command type, and bucket or folder"
|
||||
return 1
|
||||
|
||||
18
tests/commands/list_objects_v2.sh
Normal file
18
tests/commands/list_objects_v2.sh
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# list objects in bucket, v2
|
||||
# param: bucket
|
||||
# export objects on success, return 1 for failure
|
||||
list_objects_v2() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "list objects command missing bucket and/or path"
|
||||
return 1
|
||||
fi
|
||||
record_command "list-objects-v2 client:s3api"
|
||||
objects=$(aws --no-verify-ssl s3api list-objects-v2 --bucket "$1") || local result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error listing objects: $objects"
|
||||
return 1
|
||||
fi
|
||||
export objects
|
||||
}
|
||||
14
tests/commands/list_parts.sh
Normal file
14
tests/commands/list_parts.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
list_parts() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "'list-parts' command requires bucket, key, upload ID"
|
||||
return 1
|
||||
fi
|
||||
record_command "list-parts" "client:s3api"
|
||||
if ! listed_parts=$(aws --no-verify-ssl s3api list-parts --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
|
||||
log 2 "Error listing multipart upload parts: $listed_parts"
|
||||
return 1
|
||||
fi
|
||||
export listed_parts
|
||||
}
|
||||
@@ -1,23 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_bucket_acl() {
|
||||
put_bucket_acl_s3api() {
|
||||
record_command "put-bucket-acl" "client:$1"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "put bucket acl command requires command type, bucket name, acls or username"
|
||||
return 1
|
||||
fi
|
||||
local error=""
|
||||
local put_result=0
|
||||
if [[ $1 == 's3api' ]]; then
|
||||
log 5 "bucket name: $2, acls: $3"
|
||||
error=$(aws --no-verify-ssl s3api put-bucket-acl --bucket "$2" --access-control-policy "file://$3" 2>&1) || put_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate setacl "s3://$2" --acl-grant=read:"$3" 2>&1) || put_result=$?
|
||||
else
|
||||
log 2 "put_bucket_acl not implemented for '$1'"
|
||||
log 5 "bucket name: $2, acls: $3"
|
||||
if ! error=$(aws --no-verify-ssl s3api put-bucket-acl --bucket "$2" --access-control-policy "file://$3" 2>&1); then
|
||||
log 2 "error putting bucket acl: $error"
|
||||
return 1
|
||||
fi
|
||||
if [[ $put_result -ne 0 ]]; then
|
||||
log 2 "error putting bucket acl: $error"
|
||||
return 0
|
||||
}
|
||||
|
||||
put_bucket_canned_acl_s3cmd() {
|
||||
record_command "put-bucket-acl" "client:s3cmd"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "put bucket acl command requires bucket name, permission"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate setacl "s3://$1" "$2" 2>&1); then
|
||||
log 2 "error putting s3cmd canned ACL: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
@@ -28,9 +32,23 @@ put_bucket_canned_acl() {
|
||||
log 2 "'put bucket canned acl' command requires bucket name, canned ACL"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(aws --no-verify-ssl s3api put-bucket-acl --bucket "$1" --acl "$2"); then
|
||||
log 2 "error resetting bucket acls: $error"
|
||||
record_command "put-bucket-acl" "client:s3api"
|
||||
if ! error=$(aws --no-verify-ssl s3api put-bucket-acl --bucket "$1" --acl "$2" 2>&1); then
|
||||
log 2 "error re-setting bucket acls: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
put_bucket_canned_acl_with_user() {
|
||||
if [[ $# -ne 4 ]]; then
|
||||
log 2 "'put bucket canned acl with user' command requires bucket name, canned ACL, username, password"
|
||||
return 1
|
||||
fi
|
||||
record_command "put-bucket-acl" "client:s3api"
|
||||
if ! error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3api put-bucket-acl --bucket "$1" --acl "$2" 2>&1); then
|
||||
log 2 "error re-setting bucket acls: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
15
tests/commands/put_bucket_ownership_controls.sh
Normal file
15
tests/commands/put_bucket_ownership_controls.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_bucket_ownership_controls() {
|
||||
record_command "put-bucket-ownership-controls" "client:s3api"
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "'put bucket ownership controls' command requires bucket name, control"
|
||||
return 1
|
||||
fi
|
||||
if ! controls_error=$(aws --no-verify-ssl s3api put-bucket-ownership-controls --bucket "$1" \
|
||||
--ownership-controls="Rules=[{ObjectOwnership=$2}]" 2>&1); then
|
||||
log 2 "error putting bucket ownership controls: $controls_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_bucket_policy() {
|
||||
record_command "put-bucket-policy" "client:$1"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "get bucket policy command requires command type, bucket, policy file"
|
||||
log 2 "'put bucket policy' command requires command type, bucket, policy file"
|
||||
return 1
|
||||
fi
|
||||
local put_policy_result=0
|
||||
@@ -23,4 +24,19 @@ put_bucket_policy() {
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
put_bucket_policy_with_user() {
|
||||
record_command "put-bucket-policy" "client:s3api"
|
||||
if [[ $# -ne 4 ]]; then
|
||||
log 2 "'put bucket policy with user' command requires bucket, policy file, username, password"
|
||||
return 1
|
||||
fi
|
||||
if ! policy=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3api put-bucket-policy --bucket "$1" --policy "file://$2" 2>&1); then
|
||||
log 2 "error putting bucket policy with user $3: $policy"
|
||||
put_bucket_policy_error=$policy
|
||||
export put_bucket_policy_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
24
tests/commands/put_bucket_tagging.sh
Normal file
24
tests/commands/put_bucket_tagging.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_bucket_tagging() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "bucket tag command missing command type, bucket name, key, value"
|
||||
return 1
|
||||
fi
|
||||
local error
|
||||
local result
|
||||
record_command "put-bucket-tagging" "client:$1"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api put-bucket-tagging --bucket "$2" --tagging "TagSet=[{Key=$3,Value=$4}]") || result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure tag set "$MC_ALIAS"/"$2" "$3=$4" 2>&1) || result=$?
|
||||
else
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "Error adding bucket tag: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_bucket_versioning() {
|
||||
record_command "put-bucket-versioning" "client:s3api"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "put bucket versioning command requires command type, bucket name, 'Enabled' or 'Suspended'"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/report.sh
|
||||
|
||||
put_object() {
|
||||
record_command "put-object" "client:$1"
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "put object command requires command type, source, destination bucket, destination key"
|
||||
return 1
|
||||
@@ -28,6 +31,7 @@ put_object() {
|
||||
}
|
||||
|
||||
put_object_with_user() {
|
||||
record_command "put-object" "client:$1"
|
||||
if [ $# -ne 6 ]; then
|
||||
log 2 "put object command requires command type, source, destination bucket, destination key, aws ID, aws secret key"
|
||||
return 1
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_object_legal_hold() {
|
||||
record_command "put-object-legal-hold" "client:s3api"
|
||||
if [[ $# -ne 3 ]]; then
|
||||
log 2 "'put object legal hold' command requires bucket, key, hold status ('ON' or 'OFF')"
|
||||
return 1
|
||||
|
||||
27
tests/commands/put_object_lock_configuration.sh
Normal file
27
tests/commands/put_object_lock_configuration.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_object_lock_configuration() {
|
||||
if [[ $# -ne 4 ]]; then
|
||||
log 2 "'put-object-lock-configuration' command requires bucket name, enabled, mode, period"
|
||||
return 1
|
||||
fi
|
||||
local config="{\"ObjectLockEnabled\": \"$2\", \"Rule\": {\"DefaultRetention\": {\"Mode\": \"$3\", \"Days\": $4}}}"
|
||||
if ! error=$(aws --no-verify-ssl s3api put-object-lock-configuration --bucket "$1" --object-lock-configuration "$config" 2>&1); then
|
||||
log 2 "error putting object lock configuration: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_object_lock_configuration_disabled() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'put-object-lock-configuration' disable command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
local config="{\"ObjectLockEnabled\": \"Enabled\"}"
|
||||
if ! error=$(aws --no-verify-ssl s3api put-object-lock-configuration --bucket "$1" --object-lock-configuration "$config" 2>&1); then
|
||||
log 2 "error putting object lock configuration: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_object_retention() {
|
||||
record_command "put-object-retention" "client:s3api"
|
||||
if [[ $# -ne 4 ]]; then
|
||||
log 2 "'put object retention' command requires bucket, key, retention mode, retention date"
|
||||
return 1
|
||||
|
||||
24
tests/commands/put_object_tagging.sh
Normal file
24
tests/commands/put_object_tagging.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_object_tagging() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "'put-object-tagging' command missing command type, object name, file, key, and/or value"
|
||||
return 1
|
||||
fi
|
||||
local error
|
||||
local result
|
||||
record_command "put-object-tagging" "client:$1"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api put-object-tagging --bucket "$2" --key "$3" --tagging "TagSet=[{Key=$4,Value=$5}]" 2>&1) || result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure tag set "$MC_ALIAS"/"$2"/"$3" "$4=$5" 2>&1) || result=$?
|
||||
else
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [[ $result -ne 0 ]]; then
|
||||
log 2 "Error adding object tag: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
37
tests/commands/put_public_access_block.sh
Normal file
37
tests/commands/put_public_access_block.sh
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
put_public_access_block() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
log 2 "'put_public_access_block' command requires bucket, access block list"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(aws --no-verify-ssl s3api put-public-access-block --bucket "$1" --public-access-block-configuration "$2"); then
|
||||
log 2 "error updating public access block: $error"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
put_public_access_block_enable_public_acls() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "command requires bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! put_public_access_block "$1" "BlockPublicAcls=false,IgnorePublicAcls=false,BlockPublicPolicy=true,RestrictPublicBuckets=true"; then
|
||||
log 2 "error putting public acccess block"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_public_access_block_disable_public_acls() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "command requires bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! put_public_access_block "$1" "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true"; then
|
||||
log 2 "error putting public access block"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
select_object_content() {
|
||||
record_command "select-object-content" "client:s3api"
|
||||
if [[ $# -ne 7 ]]; then
|
||||
log 2 "'select object content' command requires bucket, key, expression, expression type, input serialization, output serialization, outfile"
|
||||
return 1
|
||||
|
||||
19
tests/commands/upload_part.sh
Normal file
19
tests/commands/upload_part.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
upload_part() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "upload multipart part function must have bucket, key, upload ID, file name, part number"
|
||||
return 1
|
||||
fi
|
||||
local etag_json
|
||||
record_command "upload-part" "client:s3api"
|
||||
if ! etag_json=$(aws --no-verify-ssl s3api upload-part --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --body "$4-$(($5-1))" 2>&1); then
|
||||
log 2 "Error uploading part $5: $etag_json"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(echo "$etag_json" | grep -v "InsecureRequestWarning" | jq '.ETag' 2>&1); then
|
||||
log 2 "error obtaining etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
export etag
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
upload_part_copy() {
|
||||
record_command "upload-part-copy" "client:s3api"
|
||||
if [ $# -ne 5 ]; then
|
||||
echo "upload multipart part copy function must have bucket, key, upload ID, file name, part number"
|
||||
return 1
|
||||
@@ -17,6 +18,7 @@ upload_part_copy() {
|
||||
}
|
||||
|
||||
upload_part_copy_with_range() {
|
||||
record_command "upload-part-copy" "client:s3api"
|
||||
if [ $# -ne 6 ]; then
|
||||
log 2 "upload multipart part copy function must have bucket, key, upload ID, file name, part number, range"
|
||||
return 1
|
||||
|
||||
16
tests/env.sh
16
tests/env.sh
@@ -56,10 +56,13 @@ check_universal_vars() {
|
||||
elif [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||
log 2 "No AWS secret access key set"
|
||||
return 1
|
||||
elif [ -z "$AWS_REGION" ]; then
|
||||
log 2 "No AWS region set"
|
||||
return 1
|
||||
elif [ -z "$AWS_PROFILE" ]; then
|
||||
log 2 "No AWS profile set"
|
||||
return 1
|
||||
elif [ -z "$AWS_ENDPOINT_URL" ]; then
|
||||
elif [ "$DIRECT" != "true" ] && [ -z "$AWS_ENDPOINT_URL" ]; then
|
||||
log 2 "No AWS endpoint URL set"
|
||||
return 1
|
||||
elif [[ $RUN_VERSITYGW != "true" ]] && [[ $RUN_VERSITYGW != "false" ]]; then
|
||||
@@ -78,7 +81,7 @@ check_universal_vars() {
|
||||
log 2 "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
return 1
|
||||
fi
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_PROFILE AWS_ENDPOINT_URL RUN_VERSITYGW \
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE AWS_ENDPOINT_URL RUN_VERSITYGW \
|
||||
BUCKET_ONE_NAME BUCKET_TWO_NAME RECREATE_BUCKETS
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
export TEST_LOG_FILE
|
||||
@@ -86,6 +89,15 @@ check_universal_vars() {
|
||||
if [[ -n "$VERSITY_LOG_FILE" ]]; then
|
||||
export VERSITY_LOG_FILE
|
||||
fi
|
||||
if [[ -n "$DIRECT" ]]; then
|
||||
export DIRECT
|
||||
fi
|
||||
if [[ -n "$DIRECT_DISPLAY_NAME" ]]; then
|
||||
export DIRECT_DISPLAY_NAME
|
||||
fi
|
||||
if [[ -n "$COVERAGE_DB" ]]; then
|
||||
export COVERAGE_DB
|
||||
fi
|
||||
}
|
||||
|
||||
check_versity_vars() {
|
||||
|
||||
@@ -65,6 +65,8 @@ func TestCreateBucket(s *S3Conf) {
|
||||
CreateBucket_invalid_bucket_name(s)
|
||||
CreateBucket_existing_bucket(s)
|
||||
CreateBucket_owned_by_you(s)
|
||||
CreateBucket_invalid_ownership(s)
|
||||
CreateBucket_ownership_with_acl(s)
|
||||
CreateBucket_as_user(s)
|
||||
CreateBucket_default_acl(s)
|
||||
CreateBucket_non_default_acl(s)
|
||||
@@ -89,6 +91,24 @@ func TestDeleteBucket(s *S3Conf) {
|
||||
DeleteBucket_success_status_code(s)
|
||||
}
|
||||
|
||||
func TestPutBucketOwnershipControls(s *S3Conf) {
|
||||
PutBucketOwnershipControls_non_existing_bucket(s)
|
||||
PutBucketOwnershipControls_multiple_rules(s)
|
||||
PutBucketOwnershipControls_invalid_ownership(s)
|
||||
PutBucketOwnershipControls_success(s)
|
||||
}
|
||||
|
||||
func TestGetBucketOwnershipControls(s *S3Conf) {
|
||||
GetBucketOwnershipControls_non_existing_bucket(s)
|
||||
GetBucketOwnershipControls_default_ownership(s)
|
||||
GetBucketOwnershipControls_success(s)
|
||||
}
|
||||
|
||||
func TestDeleteBucketOwnershipControls(s *S3Conf) {
|
||||
DeleteBucketOwnershipControls_non_existing_bucket(s)
|
||||
DeleteBucketOwnershipControls_success(s)
|
||||
}
|
||||
|
||||
func TestPutBucketTagging(s *S3Conf) {
|
||||
PutBucketTagging_non_existing_bucket(s)
|
||||
PutBucketTagging_long_tags(s)
|
||||
@@ -122,6 +142,7 @@ func TestHeadObject(s *S3Conf) {
|
||||
HeadObject_invalid_part_number(s)
|
||||
HeadObject_non_existing_mp(s)
|
||||
HeadObject_mp_success(s)
|
||||
HeadObject_non_existing_dir_object(s)
|
||||
HeadObject_success(s)
|
||||
}
|
||||
|
||||
@@ -140,6 +161,7 @@ func TestGetObject(s *S3Conf) {
|
||||
GetObject_success(s)
|
||||
GetObject_by_range_success(s)
|
||||
GetObject_by_range_resp_status(s)
|
||||
GetObject_non_existing_dir_object(s)
|
||||
}
|
||||
|
||||
func TestListObjects(s *S3Conf) {
|
||||
@@ -162,6 +184,7 @@ func TestListObjectsV2(s *S3Conf) {
|
||||
|
||||
func TestDeleteObject(s *S3Conf) {
|
||||
DeleteObject_non_existing_object(s)
|
||||
DeleteObject_non_existing_dir_object(s)
|
||||
DeleteObject_success(s)
|
||||
DeleteObject_success_status_code(s)
|
||||
}
|
||||
@@ -177,6 +200,8 @@ func TestCopyObject(s *S3Conf) {
|
||||
CopyObject_not_owned_source_bucket(s)
|
||||
CopyObject_copy_to_itself(s)
|
||||
CopyObject_to_itself_with_new_metadata(s)
|
||||
CopyObject_CopySource_starting_with_slash(s)
|
||||
CopyObject_non_existing_dir_object(s)
|
||||
CopyObject_success(s)
|
||||
}
|
||||
|
||||
@@ -267,6 +292,7 @@ func TestCompleteMultipartUpload(s *S3Conf) {
|
||||
|
||||
func TestPutBucketAcl(s *S3Conf) {
|
||||
PutBucketAcl_non_existing_bucket(s)
|
||||
PutBucketAcl_disabled(s)
|
||||
PutBucketAcl_invalid_acl_canned_and_acp(s)
|
||||
PutBucketAcl_invalid_acl_canned_and_grants(s)
|
||||
PutBucketAcl_invalid_acl_acp_and_grants(s)
|
||||
@@ -280,6 +306,9 @@ func TestPutBucketAcl(s *S3Conf) {
|
||||
|
||||
func TestGetBucketAcl(s *S3Conf) {
|
||||
GetBucketAcl_non_existing_bucket(s)
|
||||
GetBucketAcl_translation_canned_public_read(s)
|
||||
GetBucketAcl_translation_canned_public_read_write(s)
|
||||
GetBucketAcl_translation_canned_private(s)
|
||||
GetBucketAcl_access_denied(s)
|
||||
GetBucketAcl_success(s)
|
||||
}
|
||||
@@ -395,6 +424,9 @@ func TestFullFlow(s *S3Conf) {
|
||||
TestHeadBucket(s)
|
||||
TestListBuckets(s)
|
||||
TestDeleteBucket(s)
|
||||
TestPutBucketOwnershipControls(s)
|
||||
TestGetBucketOwnershipControls(s)
|
||||
TestDeleteBucketOwnershipControls(s)
|
||||
TestPutBucketTagging(s)
|
||||
TestGetBucketTagging(s)
|
||||
TestDeleteBucketTagging(s)
|
||||
@@ -453,6 +485,8 @@ func TestAccessControl(s *S3Conf) {
|
||||
AccessControl_bucket_resource_all_action(s)
|
||||
AccessControl_single_object_resource_actions(s)
|
||||
AccessControl_multi_statement_policy(s)
|
||||
AccessControl_bucket_ownership_to_user(s)
|
||||
AccessControl_root_PutBucketAcl(s)
|
||||
}
|
||||
|
||||
type IntTests map[string]func(s *S3Conf) error
|
||||
@@ -505,6 +539,8 @@ func GetIntTests() IntTests {
|
||||
"CreateBucket_invalid_bucket_name": CreateBucket_invalid_bucket_name,
|
||||
"CreateBucket_existing_bucket": CreateBucket_existing_bucket,
|
||||
"CreateBucket_owned_by_you": CreateBucket_owned_by_you,
|
||||
"CreateBucket_invalid_ownership": CreateBucket_invalid_ownership,
|
||||
"CreateBucket_ownership_with_acl": CreateBucket_ownership_with_acl,
|
||||
"CreateBucket_as_user": CreateBucket_as_user,
|
||||
"CreateDeleteBucket_success": CreateDeleteBucket_success,
|
||||
"CreateBucket_default_acl": CreateBucket_default_acl,
|
||||
@@ -518,6 +554,15 @@ func GetIntTests() IntTests {
|
||||
"DeleteBucket_non_existing_bucket": DeleteBucket_non_existing_bucket,
|
||||
"DeleteBucket_non_empty_bucket": DeleteBucket_non_empty_bucket,
|
||||
"DeleteBucket_success_status_code": DeleteBucket_success_status_code,
|
||||
"PutBucketOwnershipControls_non_existing_bucket": PutBucketOwnershipControls_non_existing_bucket,
|
||||
"PutBucketOwnershipControls_multiple_rules": PutBucketOwnershipControls_multiple_rules,
|
||||
"PutBucketOwnershipControls_invalid_ownership": PutBucketOwnershipControls_invalid_ownership,
|
||||
"PutBucketOwnershipControls_success": PutBucketOwnershipControls_success,
|
||||
"GetBucketOwnershipControls_non_existing_bucket": GetBucketOwnershipControls_non_existing_bucket,
|
||||
"GetBucketOwnershipControls_default_ownership": GetBucketOwnershipControls_default_ownership,
|
||||
"GetBucketOwnershipControls_success": GetBucketOwnershipControls_success,
|
||||
"DeleteBucketOwnershipControls_non_existing_bucket": DeleteBucketOwnershipControls_non_existing_bucket,
|
||||
"DeleteBucketOwnershipControls_success": DeleteBucketOwnershipControls_success,
|
||||
"PutBucketTagging_non_existing_bucket": PutBucketTagging_non_existing_bucket,
|
||||
"PutBucketTagging_long_tags": PutBucketTagging_long_tags,
|
||||
"PutBucketTagging_success": PutBucketTagging_success,
|
||||
@@ -535,6 +580,7 @@ func GetIntTests() IntTests {
|
||||
"HeadObject_invalid_part_number": HeadObject_invalid_part_number,
|
||||
"HeadObject_non_existing_mp": HeadObject_non_existing_mp,
|
||||
"HeadObject_mp_success": HeadObject_mp_success,
|
||||
"HeadObject_non_existing_dir_object": HeadObject_non_existing_dir_object,
|
||||
"HeadObject_success": HeadObject_success,
|
||||
"GetObjectAttributes_non_existing_bucket": GetObjectAttributes_non_existing_bucket,
|
||||
"GetObjectAttributes_non_existing_object": GetObjectAttributes_non_existing_object,
|
||||
@@ -547,6 +593,7 @@ func GetIntTests() IntTests {
|
||||
"GetObject_success": GetObject_success,
|
||||
"GetObject_by_range_success": GetObject_by_range_success,
|
||||
"GetObject_by_range_resp_status": GetObject_by_range_resp_status,
|
||||
"GetObject_non_existing_dir_object": GetObject_non_existing_dir_object,
|
||||
"ListObjects_non_existing_bucket": ListObjects_non_existing_bucket,
|
||||
"ListObjects_with_prefix": ListObjects_with_prefix,
|
||||
"ListObject_truncated": ListObject_truncated,
|
||||
@@ -560,6 +607,7 @@ func GetIntTests() IntTests {
|
||||
"ListObjectsV2_start_after_not_in_list": ListObjectsV2_start_after_not_in_list,
|
||||
"ListObjectsV2_start_after_empty_result": ListObjectsV2_start_after_empty_result,
|
||||
"DeleteObject_non_existing_object": DeleteObject_non_existing_object,
|
||||
"DeleteObject_non_existing_dir_object": DeleteObject_non_existing_dir_object,
|
||||
"DeleteObject_success": DeleteObject_success,
|
||||
"DeleteObject_success_status_code": DeleteObject_success_status_code,
|
||||
"DeleteObjects_empty_input": DeleteObjects_empty_input,
|
||||
@@ -569,6 +617,8 @@ func GetIntTests() IntTests {
|
||||
"CopyObject_not_owned_source_bucket": CopyObject_not_owned_source_bucket,
|
||||
"CopyObject_copy_to_itself": CopyObject_copy_to_itself,
|
||||
"CopyObject_to_itself_with_new_metadata": CopyObject_to_itself_with_new_metadata,
|
||||
"CopyObject_CopySource_starting_with_slash": CopyObject_CopySource_starting_with_slash,
|
||||
"CopyObject_non_existing_dir_object": CopyObject_non_existing_dir_object,
|
||||
"CopyObject_success": CopyObject_success,
|
||||
"PutObjectTagging_non_existing_object": PutObjectTagging_non_existing_object,
|
||||
"PutObjectTagging_long_tags": PutObjectTagging_long_tags,
|
||||
@@ -636,6 +686,9 @@ func GetIntTests() IntTests {
|
||||
"PutBucketAcl_success_canned_acl": PutBucketAcl_success_canned_acl,
|
||||
"PutBucketAcl_success_acp": PutBucketAcl_success_acp,
|
||||
"GetBucketAcl_non_existing_bucket": GetBucketAcl_non_existing_bucket,
|
||||
"GetBucketAcl_translation_canned_public_read": GetBucketAcl_translation_canned_public_read,
|
||||
"GetBucketAcl_translation_canned_public_read_write": GetBucketAcl_translation_canned_public_read_write,
|
||||
"GetBucketAcl_translation_canned_private": GetBucketAcl_translation_canned_private,
|
||||
"GetBucketAcl_access_denied": GetBucketAcl_access_denied,
|
||||
"GetBucketAcl_success": GetBucketAcl_success,
|
||||
"PutBucketPolicy_non_existing_bucket": PutBucketPolicy_non_existing_bucket,
|
||||
@@ -721,5 +774,14 @@ func GetIntTests() IntTests {
|
||||
"IAM_userplus_access_denied": IAM_userplus_access_denied,
|
||||
"IAM_userplus_CreateBucket": IAM_userplus_CreateBucket,
|
||||
"IAM_admin_ChangeBucketOwner": IAM_admin_ChangeBucketOwner,
|
||||
"AccessControl_default_ACL_user_access_denied": AccessControl_default_ACL_user_access_denied,
|
||||
"AccessControl_default_ACL_userplus_access_denied": AccessControl_default_ACL_userplus_access_denied,
|
||||
"AccessControl_default_ACL_admin_successful_access": AccessControl_default_ACL_admin_successful_access,
|
||||
"AccessControl_bucket_resource_single_action": AccessControl_bucket_resource_single_action,
|
||||
"AccessControl_bucket_resource_all_action": AccessControl_bucket_resource_all_action,
|
||||
"AccessControl_single_object_resource_actions": AccessControl_single_object_resource_actions,
|
||||
"AccessControl_multi_statement_policy": AccessControl_multi_statement_policy,
|
||||
"AccessControl_bucket_ownership_to_user": AccessControl_bucket_ownership_to_user,
|
||||
"AccessControl_root_PutBucketAcl": AccessControl_root_PutBucketAcl,
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -66,6 +66,7 @@ func setup(s *S3Conf, bucket string, opts ...setupOpt) error {
|
||||
_, err := s3client.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ObjectLockEnabledForBucket: &cfg.LockEnabled,
|
||||
ObjectOwnership: cfg.Ownership,
|
||||
})
|
||||
cancel()
|
||||
return err
|
||||
@@ -121,6 +122,7 @@ func teardown(s *S3Conf, bucket string) error {
|
||||
|
||||
type setupCfg struct {
|
||||
LockEnabled bool
|
||||
Ownership types.ObjectOwnership
|
||||
}
|
||||
|
||||
type setupOpt func(*setupCfg)
|
||||
@@ -128,6 +130,9 @@ type setupOpt func(*setupCfg)
|
||||
func withLock() setupOpt {
|
||||
return func(s *setupCfg) { s.LockEnabled = true }
|
||||
}
|
||||
func withOwnership(o types.ObjectOwnership) setupOpt {
|
||||
return func(s *setupCfg) { s.Ownership = o }
|
||||
}
|
||||
|
||||
func actionHandler(s *S3Conf, testName string, handler func(s3client *s3.Client, bucket string) error, opts ...setupOpt) error {
|
||||
runF(testName)
|
||||
@@ -393,6 +398,9 @@ func compareGrants(grts1, grts2 []types.Grant) bool {
|
||||
if *grt.Grantee.ID != *grts2[i].Grantee.ID {
|
||||
return false
|
||||
}
|
||||
if grt.Grantee.Type != grts2[i].Grantee.Type {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
91
tests/report.sh
Normal file
91
tests/report.sh
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
check_and_create_database() {
|
||||
# Define SQL commands to create a table
|
||||
SQL_CREATE_TABLE="CREATE TABLE IF NOT EXISTS entries (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
command TEXT NOT NULL,
|
||||
client TEXT NOT NULL,
|
||||
count INTEGER DEFAULT 1,
|
||||
UNIQUE(command, client)
|
||||
);"
|
||||
|
||||
# Execute the SQL commands to create the database and table
|
||||
sqlite3 "$COVERAGE_DB" <<EOF
|
||||
$SQL_CREATE_TABLE
|
||||
.exit
|
||||
EOF
|
||||
log 5 "Database '$COVERAGE_DB' and table 'entries' created successfully."
|
||||
}
|
||||
|
||||
record_command() {
|
||||
if [ -z "$COVERAGE_DB" ]; then
|
||||
log 5 "no coverage db set, not recording"
|
||||
return 0
|
||||
fi
|
||||
if [[ $# -lt 1 ]]; then
|
||||
log 2 "'record command' requires at least command name"
|
||||
return 1
|
||||
fi
|
||||
check_and_create_database
|
||||
log 5 "command to record: $1"
|
||||
client=""
|
||||
#role="root"
|
||||
for arg in "${@:2}"; do
|
||||
log 5 "Argument: $arg"
|
||||
if [[ $arg != *":"* ]]; then
|
||||
log 3 "'$arg' must contain colon to record client"
|
||||
continue
|
||||
fi
|
||||
header=$(echo "$arg" | awk -F: '{print $1}')
|
||||
case $header in
|
||||
"client")
|
||||
client=$(echo "$arg" | awk -F: '{print $2}')
|
||||
;;
|
||||
#"role")
|
||||
# role=$(echo "$arg" | awk -F: '{print $2}')
|
||||
# ;;
|
||||
esac
|
||||
done
|
||||
if ! error=$(sqlite3 "$COVERAGE_DB" "INSERT INTO entries (command, client, count) VALUES(\"$1\", \"$client\", 1) ON CONFLICT(command, client) DO UPDATE SET count = count + 1" 2>&1); then
|
||||
log 2 "error in sqlite statement: $error"
|
||||
fi
|
||||
}
|
||||
|
||||
record_result() {
|
||||
if [ -z "$COVERAGE_DB" ]; then
|
||||
log 5 "no coverage db set, not recording"
|
||||
return 0
|
||||
fi
|
||||
# Define SQL commands to create a table
|
||||
SQL_CREATE_TABLE="CREATE TABLE IF NOT EXISTS results (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
command TEXT NOT NULL,
|
||||
client TEXT,
|
||||
count INTEGER,
|
||||
pass INTEGER DEFAULT 1,
|
||||
UNIQUE(command, client)
|
||||
);"
|
||||
# Execute the SQL commands to create the database and table
|
||||
sqlite3 "$COVERAGE_DB" <<EOF
|
||||
$SQL_CREATE_TABLE
|
||||
.exit
|
||||
EOF
|
||||
|
||||
# Iterate over each command in the entries table
|
||||
while IFS="|" read -r command client count; do
|
||||
if [[ $BATS_TEST_STATUS -eq 0 ]]; then
|
||||
# Test passed
|
||||
sqlite3 "$COVERAGE_DB" "INSERT INTO results (command, client, count) VALUES ('$command', '$client', '$count')
|
||||
ON CONFLICT(command, client) DO UPDATE SET count = count + $count;"
|
||||
else
|
||||
# Test failed
|
||||
sqlite3 "$COVERAGE_DB" "INSERT INTO results (command, client, count, pass) VALUES ('$command', '$client', '$count', 0)
|
||||
ON CONFLICT(command, client) DO UPDATE SET count = count + $count;"
|
||||
fi
|
||||
done < <(sqlite3 "$COVERAGE_DB" "SELECT command, client, count FROM entries;")
|
||||
|
||||
sqlite3 "$COVERAGE_DB" "DROP TABLE entries;"
|
||||
|
||||
log 5 "Database '$COVERAGE_DB' and table 'entries' created successfully."
|
||||
}
|
||||
@@ -57,4 +57,7 @@ teardown() {
|
||||
end_time=$(date +%s)
|
||||
log 4 "Total test time: $((end_time - start_time))"
|
||||
fi
|
||||
if [[ -n "$COVERAGE_DB" ]]; then
|
||||
record_result
|
||||
fi
|
||||
}
|
||||
|
||||
1024
tests/test_aws.sh
1024
tests/test_aws.sh
File diff suppressed because it is too large
Load Diff
494
tests/test_aws_root_inner.sh
Executable file
494
tests/test_aws_root_inner.sh
Executable file
@@ -0,0 +1,494 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/commands/delete_objects.sh
|
||||
source ./tests/commands/list_objects_v2.sh
|
||||
source ./tests/commands/list_parts.sh
|
||||
|
||||
test_abort_multipart_upload_aws_root() {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
# shellcheck disable=SC2154
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "abort failed"
|
||||
|
||||
if object_exists "aws" "$BUCKET_ONE_NAME" "$bucket_file"; then
|
||||
fail "Upload file exists after abort"
|
||||
fi
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
test_complete_multipart_upload_aws_root() {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "error performing multipart upload"
|
||||
|
||||
download_and_compare_file "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "error downloading and comparing file"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
test_create_multipart_upload_properties_aws_root() {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
local expected_content_type="application/zip"
|
||||
local expected_meta_key="testKey"
|
||||
local expected_meta_val="testValue"
|
||||
local expected_hold_status="ON"
|
||||
local expected_retention_mode="GOVERNANCE"
|
||||
local expected_tag_key="TestTag"
|
||||
local expected_tag_val="TestTagVal"
|
||||
local five_seconds_later
|
||||
|
||||
os_name="$(uname)"
|
||||
if [[ "$os_name" == "Darwin" ]]; then
|
||||
now=$(date -u +"%Y-%m-%dT%H:%M:%S")
|
||||
later=$(date -j -v +15S -f "%Y-%m-%dT%H:%M:%S" "$now" +"%Y-%m-%dT%H:%M:%S")
|
||||
else
|
||||
now=$(date +"%Y-%m-%dT%H:%M:%S")
|
||||
later=$(date -d "$now 15 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test file"
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
|
||||
delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME" || fail "error deleting bucket, or checking for existence"
|
||||
# in static bucket config, bucket will still exist
|
||||
bucket_exists "s3api" "$BUCKET_ONE_NAME" || local exists_result=$?
|
||||
[[ $exists_result -ne 2 ]] || fail "error checking for bucket existence"
|
||||
if [[ $exists_result -eq 1 ]]; then
|
||||
create_bucket_object_lock_enabled "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
fi
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting log config"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "LOG CONFIG: $log_config"
|
||||
|
||||
log 5 "LATER: $later"
|
||||
multipart_upload_with_params "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 \
|
||||
"$expected_content_type" \
|
||||
"{\"$expected_meta_key\": \"$expected_meta_val\"}" \
|
||||
"$expected_hold_status" \
|
||||
"$expected_retention_mode" \
|
||||
"$later" \
|
||||
"$expected_tag_key=$expected_tag_val" || fail "error performing multipart upload"
|
||||
|
||||
head_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting metadata"
|
||||
# shellcheck disable=SC2154
|
||||
raw_metadata=$(echo "$metadata" | grep -v "InsecureRequestWarning")
|
||||
log 5 "raw metadata: $raw_metadata"
|
||||
|
||||
content_type=$(echo "$raw_metadata" | jq -r ".ContentType")
|
||||
[[ $content_type == "$expected_content_type" ]] || fail "content type mismatch ($content_type, $expected_content_type)"
|
||||
meta_val=$(echo "$raw_metadata" | jq -r ".Metadata.$expected_meta_key")
|
||||
[[ $meta_val == "$expected_meta_val" ]] || fail "metadata val mismatch ($meta_val, $expected_meta_val)"
|
||||
hold_status=$(echo "$raw_metadata" | jq -r ".ObjectLockLegalHoldStatus")
|
||||
[[ $hold_status == "$expected_hold_status" ]] || fail "hold status mismatch ($hold_status, $expected_hold_status)"
|
||||
retention_mode=$(echo "$raw_metadata" | jq -r ".ObjectLockMode")
|
||||
[[ $retention_mode == "$expected_retention_mode" ]] || fail "retention mode mismatch ($retention_mode, $expected_retention_mode)"
|
||||
retain_until_date=$(echo "$raw_metadata" | jq -r ".ObjectLockRetainUntilDate")
|
||||
[[ $retain_until_date == "$later"* ]] || fail "retention date mismatch ($retain_until_date, $five_seconds_later)"
|
||||
|
||||
get_object_tagging "aws" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting tagging"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "tags: $tags"
|
||||
tag_key=$(echo "$tags" | jq -r ".TagSet[0].Key")
|
||||
[[ $tag_key == "$expected_tag_key" ]] || fail "tag mismatch ($tag_key, $expected_tag_key)"
|
||||
tag_val=$(echo "$tags" | jq -r ".TagSet[0].Value")
|
||||
[[ $tag_val == "$expected_tag_val" ]] || fail "tag mismatch ($tag_val, $expected_tag_val)"
|
||||
|
||||
put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "OFF" || fail "error disabling legal hold"
|
||||
head_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting metadata"
|
||||
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "error getting object"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "files not equal"
|
||||
|
||||
sleep 15
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
test_delete_objects_aws_root() {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || fail "error creating test files"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || fail "error adding object one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || fail "error adding object two"
|
||||
|
||||
delete_objects "$BUCKET_ONE_NAME" "$object_one" "$object_two" || fail "error deleting objects"
|
||||
|
||||
object_exists "s3api" "$BUCKET_ONE_NAME" "$object_one" || local object_one_exists_result=$?
|
||||
[[ $object_one_exists_result -eq 1 ]] || fail "object $object_one not deleted"
|
||||
object_exists "s3api" "$BUCKET_ONE_NAME" "$object_two" || local object_two_exists_result=$?
|
||||
[[ $object_two_exists_result -eq 1 ]] || fail "object $object_two not deleted"
|
||||
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
test_get_bucket_acl_aws_root() {
|
||||
# TODO remove when able to assign bucket ownership back to root
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
skip
|
||||
fi
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
|
||||
get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || fail "error retreving ACL"
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "ACL: $acl"
|
||||
id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Owner.ID')
|
||||
[[ $id == "$AWS_ACCESS_KEY_ID" ]] || fail "Acl mismatch"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
test_get_object_full_range_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
echo -n "0123456789" > "$test_file_folder/$bucket_file"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error putting object"
|
||||
get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=9-15" "$test_file_folder/$bucket_file-range" || fail "error getting range"
|
||||
[[ "$(cat "$test_file_folder/$bucket_file-range")" == "9" ]] || fail "byte range not copied properly"
|
||||
}
|
||||
|
||||
test_get_object_invalid_range_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error putting object"
|
||||
get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=0-0" "$test_file_folder/$bucket_file-range" || local get_result=$?
|
||||
[[ $get_result -ne 0 ]] || fail "Get object with zero range returned no error"
|
||||
}
|
||||
|
||||
test_put_object_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
setup_bucket "s3api" "$BUCKET_TWO_NAME" || local setup_result_two=$?
|
||||
[[ $setup_result_two -eq 0 ]] || fail "Bucket two setup error"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
copy_error=$(aws --no-verify-ssl s3api copy-object --copy-source "$BUCKET_ONE_NAME/$bucket_file" --key "$bucket_file" --bucket "$BUCKET_TWO_NAME" 2>&1) || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Error copying file: $copy_error"
|
||||
copy_file "s3://$BUCKET_TWO_NAME/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || file "files don't match"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "aws" "$BUCKET_TWO_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
}
|
||||
|
||||
test_create_bucket_invalid_name_aws_root() {
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
create_bucket_invalid_name "aws" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Invalid name test failed"
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
[[ "$bucket_create_error" == *"Invalid bucket name "* ]] || fail "unexpected error: $bucket_create_error"
|
||||
}
|
||||
|
||||
test_get_object_attributes_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "failed to add object to bucket"
|
||||
get_object_attributes "$BUCKET_ONE_NAME" "$bucket_file" || failed "failed to get object attributes"
|
||||
# shellcheck disable=SC2154
|
||||
has_object_size=$(echo "$attributes" | jq -e '.ObjectSize' 2>&1) || fail "error checking for ObjectSize parameters: $has_object_size"
|
||||
if [[ $has_object_size -eq 0 ]]; then
|
||||
object_size=$(echo "$attributes" | jq -r ".ObjectSize")
|
||||
[[ $object_size == 0 ]] || fail "Incorrect object size: $object_size"
|
||||
else
|
||||
fail "ObjectSize parameter missing: $attributes"
|
||||
fi
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
test_get_put_object_legal_hold_aws_root() {
|
||||
# bucket must be created with lock for legal hold
|
||||
if [[ $RECREATE_BUCKETS == false ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
bucket_file="bucket_file"
|
||||
username="ABCDEFG"
|
||||
password="HIJKLMN"
|
||||
|
||||
legal_hold_retention_setup "$username" "$password" "$bucket_file"
|
||||
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "$lock_config"
|
||||
enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled")
|
||||
[[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'"
|
||||
|
||||
put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "ON" || fail "error putting legal hold on object"
|
||||
get_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting object legal hold status"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "$legal_hold"
|
||||
hold_status=$(echo "$legal_hold" | grep -v "InsecureRequestWarning" | jq -r ".LegalHold.Status" 2>&1) || fail "error obtaining hold status: $hold_status"
|
||||
[[ $hold_status == "ON" ]] || fail "Status should be 'ON', is '$hold_status'"
|
||||
|
||||
echo "fdkljafajkfs" > "$test_file_folder/$bucket_file"
|
||||
if put_object_with_user "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password"; then
|
||||
fail "able to overwrite object with hold"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
#[[ $put_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $put_object_error"
|
||||
|
||||
if delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password"; then
|
||||
fail "able to delete object with hold"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
[[ $delete_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $delete_object_error"
|
||||
put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "OFF" || fail "error removing legal hold on object"
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$password" || fail "error deleting object after removing legal hold"
|
||||
|
||||
delete_bucket_recursive "s3api" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
test_get_put_object_retention_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
username="ABCDEFG"
|
||||
secret_key="HIJKLMN"
|
||||
|
||||
# TODO remove after able to change bucket owner back to root user
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
skip
|
||||
fi
|
||||
|
||||
legal_hold_retention_setup "$username" "$secret_key" "$bucket_file"
|
||||
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration"
|
||||
log 5 "$lock_config"
|
||||
enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled")
|
||||
[[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'"
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
retention_date=$(TZ="UTC" date -v+5S +"%Y-%m-%dT%H:%M:%S")
|
||||
else
|
||||
retention_date=$(TZ="UTC" date -d "+5 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
log 5 "retention date: $retention_date"
|
||||
put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date" || fail "failed to add object retention"
|
||||
get_object_retention "$BUCKET_ONE_NAME" "$bucket_file" || fail "failed to get object retention"
|
||||
log 5 "$retention"
|
||||
retention=$(echo "$retention" | grep -v "InsecureRequestWarning")
|
||||
mode=$(echo "$retention" | jq -r ".Retention.Mode")
|
||||
retain_until_date=$(echo "$retention" | jq -r ".Retention.RetainUntilDate")
|
||||
[[ $mode == "GOVERNANCE" ]] || fail "retention mode should be governance, is $mode"
|
||||
[[ $retain_until_date == "$retention_date"* ]] || fail "retain until date should be $retention_date, is $retain_until_date"
|
||||
|
||||
echo "fdkljafajkfs" > "$test_file_folder/$bucket_file"
|
||||
put_object_with_user "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local put_result=$?
|
||||
[[ $put_result -ne 0 ]] || fail "able to overwrite object with hold"
|
||||
# shellcheck disable=SC2154
|
||||
[[ $put_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error"
|
||||
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local delete_result=$?
|
||||
[[ $delete_result -ne 0 ]] || fail "able to delete object with hold"
|
||||
[[ $delete_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error"
|
||||
|
||||
sleep 5
|
||||
|
||||
delete_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error deleting object"
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
}
|
||||
|
||||
test_retention_bypass_aws_root() {
|
||||
bucket_file="bucket_file"
|
||||
username="ABCDEFG"
|
||||
secret_key="HIJKLMN"
|
||||
policy_file="policy_file"
|
||||
|
||||
legal_hold_retention_setup "$username" "$secret_key" "$bucket_file"
|
||||
|
||||
get_object_lock_configuration "$BUCKET_ONE_NAME" || fail "error getting lock configuration"
|
||||
log 5 "$lock_config"
|
||||
enabled=$(echo "$lock_config" | jq -r ".ObjectLockConfiguration.ObjectLockEnabled")
|
||||
[[ $enabled == "Enabled" ]] || fail "ObjectLockEnabled should be 'Enabled', is '$enabled'"
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
retention_date=$(TZ="UTC" date -v+30S +"%Y-%m-%dT%H:%M:%S")
|
||||
else
|
||||
retention_date=$(TZ="UTC" date -d "+30 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
log 5 "retention date: $retention_date"
|
||||
put_object_retention "$BUCKET_ONE_NAME" "$bucket_file" "GOVERNANCE" "$retention_date" || fail "failed to add object retention"
|
||||
if delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file"; then
|
||||
log 2 "able to delete object despite retention"
|
||||
return 1
|
||||
fi
|
||||
cat <<EOF > "$test_file_folder/$policy_file"
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": "$username",
|
||||
"Action": ["s3:BypassGovernanceRetention","s3:DeleteObject"],
|
||||
"Resource": "arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$test_file_folder/$policy_file" || fail "error putting bucket policy"
|
||||
delete_object_bypass_retention "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || fail "error deleting object and bypassing retention"
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file" "$policy_file"
|
||||
}
|
||||
|
||||
legal_hold_retention_setup() {
|
||||
[[ $# -eq 3 ]] || fail "legal hold or retention setup requires username, secret key, bucket file"
|
||||
|
||||
delete_bucket_or_contents_if_exists "s3api" "$BUCKET_ONE_NAME" || fail "error deleting bucket, or checking for existence"
|
||||
setup_user "$1" "$2" "user" || fail "error creating user if nonexistent"
|
||||
create_test_files "$3" || fail "error creating test files"
|
||||
|
||||
#create_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
if [[ $RECREATE_BUCKETS == "true" ]]; then
|
||||
create_bucket_object_lock_enabled "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$1" || fail "error changing bucket ownership"
|
||||
get_bucket_policy "s3api" "$BUCKET_ONE_NAME" || fail "error getting bucket policy"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "POLICY: $bucket_policy"
|
||||
get_bucket_owner "$BUCKET_ONE_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "owner: $bucket_owner"
|
||||
#put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls"
|
||||
put_object_with_user "s3api" "$test_file_folder/$3" "$BUCKET_ONE_NAME" "$3" "$1" "$2" || fail "failed to add object to bucket"
|
||||
}
|
||||
|
||||
test_s3api_list_objects_v1_aws_root() {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local copy_result_one=$?
|
||||
[[ $copy_result_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_result_two=$?
|
||||
[[ $copy_result_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
|
||||
list_objects_s3api_v1 "$BUCKET_ONE_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
key_one=$(echo "$objects" | jq -r '.Contents[0].Key')
|
||||
[[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)"
|
||||
size_one=$(echo "$objects" | jq -r '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch ($size_one, 0)"
|
||||
key_two=$(echo "$objects" | jq -r '.Contents[1].Key')
|
||||
[[ $key_two == "$object_two" ]] || fail "Object two mismatch ($key_two, $object_two)"
|
||||
size_two=$(echo "$objects" | jq '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch ($size_two, ${#object_two_data})"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
test_s3api_list_objects_v2_aws_root() {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local copy_object_one=$?
|
||||
[[ $copy_object_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_object_two=$?
|
||||
[[ $copy_object_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
|
||||
list_objects_v2 "$BUCKET_ONE_NAME" || fail "error listing objects (v2)"
|
||||
key_one=$(echo "$objects" | jq -r '.Contents[0].Key')
|
||||
[[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)"
|
||||
size_one=$(echo "$objects" | jq -r '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch ($size_one, 0)"
|
||||
key_two=$(echo "$objects" | jq -r '.Contents[1].Key')
|
||||
[[ $key_two == "$object_two" ]] || fail "Object two mismatch ($key_two, $object_two)"
|
||||
size_two=$(echo "$objects" | jq -r '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch ($size_two, ${#object_two_data})"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
test_multipart_upload_list_parts_aws_root() {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test file"
|
||||
dd if=/dev/urandom of="$test_file_folder/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
start_multipart_upload_and_list_parts "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || fail "listing multipart upload parts failed"
|
||||
|
||||
declare -a parts_map
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "parts: $parts"
|
||||
for i in {0..3}; do
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
part=$(echo "$parts" | grep -v "InsecureRequestWarning" | jq -r ".[$i]" 2>&1) || fail "error getting part: $part"
|
||||
part_number=$(echo "$part" | jq ".PartNumber" 2>&1) || fail "error parsing part number: $part_number"
|
||||
[[ $part_number != "" ]] || fail "error: blank part number"
|
||||
|
||||
etag=$(echo "$part" | jq ".ETag" 2>&1) || fail "error parsing etag: $etag"
|
||||
[[ $etag != "" ]] || fail "error: blank etag"
|
||||
# shellcheck disable=SC2004
|
||||
parts_map[$part_number]=$etag
|
||||
done
|
||||
[[ ${#parts_map[@]} -ne 0 ]] || fail "error loading multipart upload parts to check"
|
||||
|
||||
for i in {0..3}; do
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
listed_part=$(echo "$listed_parts" | grep -v "InsecureRequestWarning" | jq -r ".Parts[$i]" 2>&1) || fail "error parsing listed part: $listed_part"
|
||||
part_number=$(echo "$listed_part" | jq ".PartNumber" 2>&1) || fail "error parsing listed part number: $part_number"
|
||||
etag=$(echo "$listed_part" | jq ".ETag" 2>&1) || fail "error getting listed etag: $etag"
|
||||
[[ ${parts_map[$part_number]} == "$etag" ]] || fail "error: etags don't match (part number: $part_number, etags ${parts_map[$part_number]},$etag)"
|
||||
done
|
||||
|
||||
run_then_abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file" 4
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
@@ -5,6 +5,7 @@ source ./tests/util.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/util_policy.sh
|
||||
source ./tests/commands/copy_object.sh
|
||||
source ./tests/commands/delete_bucket_tagging.sh
|
||||
source ./tests/commands/delete_object_tagging.sh
|
||||
source ./tests/commands/get_bucket_acl.sh
|
||||
source ./tests/commands/get_bucket_location.sh
|
||||
@@ -13,7 +14,10 @@ source ./tests/commands/get_object.sh
|
||||
source ./tests/commands/get_object_tagging.sh
|
||||
source ./tests/commands/list_buckets.sh
|
||||
source ./tests/commands/put_bucket_acl.sh
|
||||
source ./tests/commands/put_bucket_tagging.sh
|
||||
source ./tests/commands/put_object_tagging.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
source ./tests/commands/put_public_access_block.sh
|
||||
|
||||
test_common_multipart_upload() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
@@ -47,14 +51,11 @@ test_common_create_delete_bucket() {
|
||||
fail "create/delete bucket test requires command type"
|
||||
fi
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Failed to create bucket"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "failed to create bucket"
|
||||
|
||||
bucket_exists "$1" "$BUCKET_ONE_NAME" || local exists_three=$?
|
||||
[[ $exists_three -eq 0 ]] || fail "Failed bucket existence check"
|
||||
bucket_exists "$1" "$BUCKET_ONE_NAME" || fail "failed bucket existence check"
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME" || local delete_result_two=$?
|
||||
[[ $delete_result_two -eq 0 ]] || fail "Failed to delete bucket"
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME" || fail "failed to delete bucket"
|
||||
}
|
||||
|
||||
test_common_copy_object() {
|
||||
@@ -84,6 +85,7 @@ test_common_copy_object() {
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "$1" "$BUCKET_TWO_NAME"
|
||||
delete_test_files "$object_name" "$object_name-copy"
|
||||
}
|
||||
|
||||
test_common_put_object_with_data() {
|
||||
@@ -269,7 +271,7 @@ test_common_set_get_delete_bucket_tags() {
|
||||
|
||||
check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || fail "error checking if bucket tags are empty"
|
||||
|
||||
put_bucket_tag "$1" "$BUCKET_ONE_NAME" $key $value
|
||||
put_bucket_tagging "$1" "$BUCKET_ONE_NAME" $key $value || fail "error putting bucket tags"
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags second time"
|
||||
|
||||
local tag_set_key
|
||||
@@ -285,7 +287,7 @@ test_common_set_get_delete_bucket_tags() {
|
||||
[[ $tag_set_key == "$key" ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == "$value" ]] || fail "Value mismatch"
|
||||
fi
|
||||
delete_bucket_tags "$1" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_tagging "$1" "$BUCKET_ONE_NAME"
|
||||
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags third time"
|
||||
|
||||
@@ -303,15 +305,11 @@ test_common_set_get_object_tags() {
|
||||
local key="test_key"
|
||||
local value="test_value"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'"
|
||||
create_test_files "$bucket_file" || fail "error creating test files"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
get_object_tagging "$1" "$BUCKET_ONE_NAME" $bucket_file || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting object tags"
|
||||
get_object_tagging "$1" "$BUCKET_ONE_NAME" $bucket_file || fail "Error getting object tags"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set=$(echo "$tags" | jq '.TagSet')
|
||||
[[ $tag_set == "[]" ]] || [[ $tag_set == "" ]] || fail "Error: tags not empty"
|
||||
@@ -319,9 +317,8 @@ test_common_set_get_object_tags() {
|
||||
fail "no tags found (tags: $tags)"
|
||||
fi
|
||||
|
||||
put_object_tag "$1" "$BUCKET_ONE_NAME" $bucket_file $key $value
|
||||
get_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || local get_result_two=$?
|
||||
[[ $get_result_two -eq 0 ]] || fail "Error getting object tags"
|
||||
put_object_tagging "$1" "$BUCKET_ONE_NAME" $bucket_file $key $value || fail "error putting object tagging"
|
||||
get_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error getting object tags"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set_key=$(echo "$tags" | jq -r '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq -r '.TagSet[0].Value')
|
||||
@@ -399,26 +396,19 @@ test_common_delete_object_tagging() {
|
||||
tag_key="key"
|
||||
tag_value="value"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
create_test_files "$bucket_file" || fail "Error creating test files"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
put_object "$1" "$test_file_folder"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "Failed to add object to bucket"
|
||||
|
||||
put_object_tag "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "failed to add tags to object"
|
||||
put_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || fail "failed to add tags to object"
|
||||
|
||||
get_and_verify_object_tags "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "failed to get tags"
|
||||
get_and_verify_object_tags "$1" "$BUCKET_ONE_NAME" "$bucket_file" "$tag_key" "$tag_value" || fail "failed to get tags"
|
||||
|
||||
delete_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "error deleting object tagging"
|
||||
delete_object_tagging "$1" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error deleting object tagging"
|
||||
|
||||
check_object_tags_empty "$1" "$BUCKET_ONE_NAME" "$bucket_file" || get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "failed to get tags"
|
||||
check_object_tags_empty "$1" "$BUCKET_ONE_NAME" "$bucket_file" || fail "failed to get tags"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file"
|
||||
@@ -426,44 +416,93 @@ test_common_delete_object_tagging() {
|
||||
|
||||
test_common_get_bucket_location() {
|
||||
[[ $# -eq 1 ]] || fail "test common get bucket location missing command type"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
get_bucket_location "aws" "$BUCKET_ONE_NAME"
|
||||
get_bucket_location "$1" "$BUCKET_ONE_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
[[ $bucket_location == "null" ]] || [[ $bucket_location == "us-east-1" ]] || fail "wrong location: '$bucket_location'"
|
||||
}
|
||||
|
||||
test_put_bucket_acl_s3cmd() {
|
||||
if [[ $DIRECT != "true" ]]; then
|
||||
# https://github.com/versity/versitygw/issues/695
|
||||
skip
|
||||
fi
|
||||
setup_bucket "s3cmd" "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls"
|
||||
|
||||
username="abcdefgh"
|
||||
if [[ $DIRECT != "true" ]]; then
|
||||
setup_user "$username" "HIJKLMN" "user" || fail "error creating user"
|
||||
fi
|
||||
sleep 5
|
||||
|
||||
get_bucket_acl "s3cmd" "$BUCKET_ONE_NAME" || fail "error retrieving acl"
|
||||
log 5 "Initial ACLs: $acl"
|
||||
acl_line=$(echo "$acl" | grep "ACL")
|
||||
user_id=$(echo "$acl_line" | awk '{print $2}')
|
||||
if [[ $DIRECT == "true" ]]; then
|
||||
[[ $user_id == "$DIRECT_DISPLAY_NAME:" ]] || fail "ID mismatch ($user_id, $DIRECT_DISPLAY_NAME)"
|
||||
else
|
||||
[[ $user_id == "$AWS_ACCESS_KEY_ID:" ]] || fail "ID mismatch ($user_id, $AWS_ACCESS_KEY_ID)"
|
||||
fi
|
||||
permission=$(echo "$acl_line" | awk '{print $3}')
|
||||
[[ $permission == "FULL_CONTROL" ]] || fail "Permission mismatch ($permission)"
|
||||
|
||||
if [[ $DIRECT == "true" ]]; then
|
||||
put_public_access_block_enable_public_acls "$BUCKET_ONE_NAME" || fail "error enabling public ACLs"
|
||||
fi
|
||||
put_bucket_canned_acl_s3cmd "$BUCKET_ONE_NAME" "--acl-public" || fail "error putting canned s3cmd ACL"
|
||||
|
||||
get_bucket_acl "s3cmd" "$BUCKET_ONE_NAME" || fail "error retrieving acl"
|
||||
log 5 "ACL after read put: $acl"
|
||||
acl_lines=$(echo "$acl" | grep "ACL")
|
||||
log 5 "ACL lines: $acl_lines"
|
||||
while IFS= read -r line; do
|
||||
lines+=("$line")
|
||||
done <<< "$acl_lines"
|
||||
log 5 "lines: ${lines[*]}"
|
||||
[[ ${#lines[@]} -eq 2 ]] || fail "unexpected number of ACL lines: ${#lines[@]}"
|
||||
anon_name=$(echo "${lines[1]}" | awk '{print $2}')
|
||||
anon_permission=$(echo "${lines[1]}" | awk '{print $3}')
|
||||
[[ $anon_name == "*anon*:" ]] || fail "unexpected anon name: $anon_name"
|
||||
[[ $anon_permission == "READ" ]] || fail "unexpected anon permission: $anon_permission"
|
||||
|
||||
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
test_common_put_bucket_acl() {
|
||||
[[ $# -eq 1 ]] || fail "test common put bucket acl missing command type"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "error creating bucket"
|
||||
put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred" || fail "error putting bucket ownership controls"
|
||||
|
||||
if ! user_exists "ABCDEFG"; then
|
||||
create_user "ABCDEFG" "HIJKLMN" user || fail "error creating user"
|
||||
fi
|
||||
username="ABCDEFG"
|
||||
setup_user "$username" "HIJKLMN" "user" || fail "error creating user"
|
||||
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error retrieving acl"
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || fail "error retrieving acl"
|
||||
|
||||
log 5 "Initial ACLs: $acl"
|
||||
id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq '.Owner.ID')
|
||||
if [[ $id != '"'"$AWS_ACCESS_KEY_ID"'"' ]]; then
|
||||
# in some cases, ID is canonical user ID rather than AWS_ACCESS_KEY_ID
|
||||
canonical_id=$(aws --no-verify-ssl s3api list-buckets --query 'Owner.ID') || local list_result=$?
|
||||
[[ $list_result -eq 0 ]] || fail "error getting canonical ID: $canonical_id"
|
||||
id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Owner.ID' 2>&1) || fail "error getting ID: $id"
|
||||
if [[ $id != "$AWS_ACCESS_KEY_ID" ]]; then
|
||||
# for direct, ID is canonical user ID rather than AWS_ACCESS_KEY_ID
|
||||
canonical_id=$(aws --no-verify-ssl s3api list-buckets --query 'Owner.ID' 2>&1) || fail "error getting canonical ID: $canonical_id"
|
||||
[[ $id == "$canonical_id" ]] || fail "acl ID doesn't match AWS key or canonical ID"
|
||||
fi
|
||||
|
||||
acl_file="test-acl"
|
||||
create_test_files "$acl_file"
|
||||
|
||||
if [[ $DIRECT == "true" ]]; then
|
||||
grantee="{\"Type\": \"Group\", \"URI\": \"http://acs.amazonaws.com/groups/global/AllUsers\"}"
|
||||
else
|
||||
grantee="{\"ID\": \"$username\", \"Type\": \"CanonicalUser\"}"
|
||||
fi
|
||||
|
||||
cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
"Grantee": {
|
||||
"ID": "ABCDEFG",
|
||||
"Type": "CanonicalUser"
|
||||
},
|
||||
"Grantee": $grantee,
|
||||
"Permission": "READ"
|
||||
}
|
||||
],
|
||||
@@ -474,18 +513,12 @@ cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
EOF
|
||||
|
||||
log 6 "before 1st put acl"
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
put_bucket_acl "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$acl_file" || fail "error putting first acl"
|
||||
else
|
||||
put_bucket_acl "$1" "$BUCKET_ONE_NAME" "ABCDEFG" || fail "error putting first acl"
|
||||
fi
|
||||
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error retrieving second acl"
|
||||
put_bucket_acl_s3api "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$acl_file" || fail "error putting first acl"
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || fail "error retrieving second ACL"
|
||||
|
||||
log 5 "Acls after 1st put: $acl"
|
||||
public_grants=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Grants[0]')
|
||||
permission=$(echo "$public_grants" | jq -r '.Permission')
|
||||
public_grants=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Grants[1]' 2>&1) || fail "error getting public grants: $public_grants"
|
||||
permission=$(echo "$public_grants" | jq -r '.Permission' 2>&1) || fail "error getting permission: $permission"
|
||||
[[ $permission == "READ" ]] || fail "incorrect permission ($permission)"
|
||||
|
||||
cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
@@ -493,7 +526,7 @@ cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
"Grants": [
|
||||
{
|
||||
"Grantee": {
|
||||
"ID": "ABCDEFG",
|
||||
"ID": "$username",
|
||||
"Type": "CanonicalUser"
|
||||
},
|
||||
"Permission": "FULL_CONTROL"
|
||||
@@ -505,17 +538,14 @@ cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
}
|
||||
EOF
|
||||
|
||||
put_bucket_acl "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$acl_file" || local put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "Error putting second acl"
|
||||
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error retrieving second acl"
|
||||
put_bucket_acl_s3api "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$acl_file" || fail "error putting second acl"
|
||||
get_bucket_acl "$1" "$BUCKET_ONE_NAME" || fail "error retrieving second ACL"
|
||||
|
||||
log 5 "Acls after 2nd put: $acl"
|
||||
public_grants=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Grants')
|
||||
public_grant_length=$(echo "$public_grants" | jq 'length')
|
||||
[[ $public_grant_length -eq 1 ]] || fail "incorrect grant length for private ACL ($public_grant_length)"
|
||||
permission=$(echo "$public_grants" | jq -r '.[0].Permission')
|
||||
public_grants=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq -r '.Grants' 2>&1) || fail "error retrieving public grants: $public_grants"
|
||||
public_grant_length=$(echo "$public_grants" | jq -r 'length' 2>&1) || fail "Error retrieving public grant length: $public_grant_length"
|
||||
[[ $public_grant_length -eq 2 ]] || fail "incorrect grant length for private ACL ($public_grant_length)"
|
||||
permission=$(echo "$public_grants" | jq -r '.[0].Permission' 2>&1) || fail "Error retrieving permission: $permission"
|
||||
[[ $permission == "FULL_CONTROL" ]] || fail "incorrect permission ($permission)"
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
@@ -526,48 +556,55 @@ test_common_get_put_delete_bucket_policy() {
|
||||
|
||||
policy_file="policy_file"
|
||||
|
||||
create_test_files "$policy_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating policy file"
|
||||
create_test_files "$policy_file" || fail "error creating policy file"
|
||||
|
||||
effect="Allow"
|
||||
principal="*"
|
||||
#principal="*"
|
||||
if [[ $DIRECT == "true" ]]; then
|
||||
principal="{\"AWS\": \"arn:aws:iam::$DIRECT_AWS_USER_ID:user/s3user\"}"
|
||||
else
|
||||
principal="\"*\""
|
||||
fi
|
||||
action="s3:GetObject"
|
||||
resource="arn:aws:s3:::$BUCKET_ONE_NAME/*"
|
||||
|
||||
cat <<EOF > "$test_file_folder"/$policy_file
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "$effect",
|
||||
"Principal": "$principal",
|
||||
"Action": "$action",
|
||||
"Resource": "$resource"
|
||||
}
|
||||
]
|
||||
"Effect": "$effect",
|
||||
"Principal": $principal,
|
||||
"Action": "$action",
|
||||
"Resource": "$resource"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
log 5 "POLICY: $(cat "$test_file_folder/$policy_file")"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
|
||||
check_for_empty_policy "$1" "$BUCKET_ONE_NAME" || check_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "policy not empty"
|
||||
check_for_empty_policy "$1" "$BUCKET_ONE_NAME" || fail "policy not empty"
|
||||
|
||||
put_bucket_policy "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$policy_file" || put_result=$?
|
||||
[[ $put_result -eq 0 ]] || fail "error putting bucket"
|
||||
put_bucket_policy "$1" "$BUCKET_ONE_NAME" "$test_file_folder"/"$policy_file" || fail "error putting bucket policy"
|
||||
|
||||
get_bucket_policy "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "error getting bucket policy after setting"
|
||||
get_bucket_policy "$1" "$BUCKET_ONE_NAME" || fail "error getting bucket policy after setting"
|
||||
|
||||
log 5 "$bucket_policy"
|
||||
returned_effect=$(echo "$bucket_policy" | jq -r '.Statement[0].Effect')
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "POLICY: $bucket_policy"
|
||||
statement=$(echo "$bucket_policy" | jq -r '.Statement[0]' 2>&1) || fail "error getting statement value: $statement"
|
||||
returned_effect=$(echo "$statement" | jq -r '.Effect' 2>&1) || fail "error getting effect: $returned_effect"
|
||||
[[ $effect == "$returned_effect" ]] || fail "effect mismatch ($effect, $returned_effect)"
|
||||
returned_principal=$(echo "$bucket_policy" | jq -r '.Statement[0].Principal')
|
||||
[[ $principal == "$returned_principal" ]] || fail "principal mismatch ($principal, $returned_principal)"
|
||||
returned_action=$(echo "$bucket_policy" | jq -r '.Statement[0].Action')
|
||||
returned_principal=$(echo "$statement" | jq -r '.Principal')
|
||||
if [[ -n $DIRECT ]] && arn=$(echo "$returned_principal" | jq -r '.AWS' 2>&1); then
|
||||
[[ $arn == "arn:aws:iam::$DIRECT_AWS_USER_ID:user/s3user" ]] || fail "arn mismatch"
|
||||
else
|
||||
[[ $principal == "\"$returned_principal\"" ]] || fail "principal mismatch ($principal, $returned_principal)"
|
||||
fi
|
||||
returned_action=$(echo "$statement" | jq -r '.Action')
|
||||
[[ $action == "$returned_action" ]] || fail "action mismatch ($action, $returned_action)"
|
||||
returned_resource=$(echo "$bucket_policy" | jq -r '.Statement[0].Resource')
|
||||
returned_resource=$(echo "$statement" | jq -r '.Resource')
|
||||
[[ $resource == "$returned_resource" ]] || fail "resource mismatch ($resource, $returned_resource)"
|
||||
|
||||
delete_bucket_policy "$1" "$BUCKET_ONE_NAME" || delete_result=$?
|
||||
|
||||
@@ -24,7 +24,14 @@ export RUN_MC=true
|
||||
test_common_create_delete_bucket "mc"
|
||||
}
|
||||
|
||||
# delete-bucket - test_create_delete_bucket
|
||||
# delete-bucket
|
||||
@test "test_delete_bucket" {
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
skip "will not test bucket deletion in static bucket test config"
|
||||
fi
|
||||
setup_bucket "mc" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
delete_bucket "mc" "$BUCKET_ONE_NAME" || fail "error deleting bucket"
|
||||
}
|
||||
|
||||
# delete-bucket-policy
|
||||
@test "test_get_put_delete_bucket_policy" {
|
||||
|
||||
@@ -39,3 +39,11 @@ source ./tests/test_common.sh
|
||||
@test "test_list_objects_file_count" {
|
||||
test_common_list_objects_file_count "s3"
|
||||
}
|
||||
|
||||
@test "test_delete_bucket" {
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
skip "will not test bucket deletion in static bucket test config"
|
||||
fi
|
||||
setup_bucket "s3" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
delete_bucket "s3" "$BUCKET_ONE_NAME" || fail "error deleting bucket"
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ export RUN_USERS=true
|
||||
}
|
||||
|
||||
# copy-object
|
||||
#@test "test_copy_object" {
|
||||
# test_common_copy_object "s3cmd"
|
||||
#}
|
||||
@test "test_copy_object" {
|
||||
test_common_copy_object "s3cmd"
|
||||
}
|
||||
|
||||
# create-bucket
|
||||
@test "test_create_delete_bucket" {
|
||||
@@ -73,9 +73,9 @@ export RUN_USERS=true
|
||||
test_common_put_object_no_data "s3cmd"
|
||||
}
|
||||
|
||||
#@test "test_put_bucket_acl" {
|
||||
# test_common_put_bucket_acl "s3cmd"
|
||||
#}
|
||||
@test "test_put_bucket_acl" {
|
||||
test_put_bucket_acl_s3cmd
|
||||
}
|
||||
|
||||
# test listing buckets on versitygw
|
||||
@test "test_list_buckets_s3cmd" {
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
source ./tests/test_user_common.sh
|
||||
source ./tests/util_users.sh
|
||||
source ./tests/commands/get_object.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
|
||||
export RUN_USERS=true
|
||||
|
||||
@@ -26,3 +28,88 @@ export RUN_USERS=true
|
||||
@test "test_userplus_operation_aws" {
|
||||
test_userplus_operation "aws"
|
||||
}
|
||||
|
||||
@test "test_user_get_object" {
|
||||
username="ABCDEFG"
|
||||
password="HIJKLMN"
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user if nonexistent"
|
||||
create_test_files "$test_file" || fail "error creating test files"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object despite not being bucket owner"
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
}
|
||||
|
||||
@test "test_userplus_get_object" {
|
||||
username="ABCDEFG"
|
||||
password="HIJKLMN"
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "admin" || fail "error creating user if nonexistent"
|
||||
create_test_files "$test_file" || fail "error creating test files"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object despite not being bucket owner"
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
}
|
||||
|
||||
@test "test_user_delete_object" {
|
||||
username="ABCDEFG"
|
||||
password="HIJKLMN"
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user if nonexistent"
|
||||
create_test_files "$test_file" || fail "error creating test files"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
if get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password"; then
|
||||
fail "able to get object despite not being bucket owner"
|
||||
fi
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
put_object "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" || fail "failed to add object to bucket"
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "error deleting object"
|
||||
}
|
||||
|
||||
@test "test_admin_put_get_object" {
|
||||
username="ABCDEFG"
|
||||
password="HIJKLMN"
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "admin" || fail "error creating user if nonexistent"
|
||||
create_test_file_with_size "$test_file" 10 || fail "error creating test file"
|
||||
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
put_object_with_user "s3api" "$test_file_folder/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "failed to add object to bucket"
|
||||
get_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy" "$username" "$password" || fail "error getting object"
|
||||
compare_files "$test_file_folder/$test_file" "$test_file_folder/$test_file-copy" || fail "files don't match"
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password" || fail "error deleting object"
|
||||
if get_object "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_folder/$test_file-copy"; then
|
||||
fail "file not successfully deleted"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
[[ "$get_object_error" == *"NoSuchKey"* ]] || fail "unexpected error message: $get_object_error"
|
||||
delete_bucket_or_contents "s3api" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$test_file" "$test_file-copy"
|
||||
}
|
||||
|
||||
@test "test_user_create_multipart_upload" {
|
||||
username="ABCDEFG"
|
||||
password="HIJKLMN"
|
||||
test_file="test_file"
|
||||
|
||||
setup_user "$username" "$password" "user" || fail "error creating user if nonexistent"
|
||||
create_large_file "$test_file" || fail "error creating test file"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$BUCKET_ONE_NAME" "$username" || fail "error changing bucket ownership"
|
||||
create_multipart_upload_with_user "$BUCKET_ONE_NAME" "dummy" "$username" "$password" || fail "unable to create multipart upload"
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ source ./tests/setup.sh
|
||||
source ./tests/util_users.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/commands/list_buckets.sh
|
||||
|
||||
test_admin_user() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
@@ -15,31 +16,21 @@ test_admin_user() {
|
||||
admin_password="123456"
|
||||
user_password="789012"
|
||||
|
||||
user_exists "$admin_username" || local admin_exists_result=$?
|
||||
if [[ $admin_exists_result -eq 0 ]]; then
|
||||
delete_user "$admin_username" || local delete_admin_result=$?
|
||||
[[ $delete_admin_result -eq 0 ]] || fail "failed to delete admin user"
|
||||
fi
|
||||
create_user "$admin_username" "$admin_password" "admin" || create_admin_result=$?
|
||||
[[ $create_admin_result -eq 0 ]] || fail "failed to create admin user"
|
||||
setup_user "$admin_username" "$admin_password" "admin" || fail "error setting up admin user"
|
||||
|
||||
user_exists "$user_username" || local user_exists_result=$?
|
||||
if [[ $user_exists_result -eq 0 ]]; then
|
||||
delete_user "$user_username" || local delete_user_result=$?
|
||||
[[ $delete_user_result -eq 0 ]] || fail "failed to delete user user"
|
||||
if user_exists "$user_username"; then
|
||||
delete_user "$user_username" || fail "failed to delete user '$user_username'"
|
||||
fi
|
||||
create_user_with_user "$admin_username" "$admin_password" "$user_username" "$user_password" "user"
|
||||
create_user_with_user "$admin_username" "$admin_password" "$user_username" "$user_password" "user" || fail "failed to create user '$user_username'"
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "error deleting bucket if it exists"
|
||||
create_bucket_with_user "aws" "versity-gwtest-admin-bucket" "$admin_username" "$admin_password" || create_result_two=$?
|
||||
[[ $create_result_two -eq 0 ]] || fail "error creating bucket with user"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "error setting up bucket"
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket" || fail "error deleting bucket if it exists"
|
||||
create_bucket_with_user "aws" "versity-gwtest-admin-bucket" "$admin_username" "$admin_password" || fail "error creating bucket with admin user"
|
||||
|
||||
bucket_one_found=false
|
||||
bucket_two_found=false
|
||||
list_buckets_with_user "aws" "$admin_username" "$admin_password"
|
||||
list_buckets_with_user "aws" "$admin_username" "$admin_password" || fail "error listing buckets with admin user"
|
||||
# shellcheck disable=SC2154
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ]; then
|
||||
bucket_one_found=true
|
||||
@@ -53,8 +44,7 @@ test_admin_user() {
|
||||
if [ $bucket_one_found == false ] || [ $bucket_two_found == false ]; then
|
||||
fail "not all expected buckets listed"
|
||||
fi
|
||||
change_bucket_owner "$admin_username" "$admin_password" "versity-gwtest-admin-bucket" "$user_username" || local change_result=$?
|
||||
[[ $change_result -eq 0 ]] || fail "error changing bucket owner"
|
||||
change_bucket_owner "$admin_username" "$admin_password" "versity-gwtest-admin-bucket" "$user_username" || fail "error changing bucket owner"
|
||||
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket"
|
||||
delete_user "$user_username"
|
||||
@@ -69,17 +59,11 @@ test_create_user_already_exists() {
|
||||
username="ABCDEG"
|
||||
password="123456"
|
||||
|
||||
user_exists "$username" || local exists_result=$?
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
delete_user "$username" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "failed to delete user '$username'"
|
||||
setup_user "$username" "123456" "admin" || fail "error setting up user"
|
||||
if create_user "$username" "123456" "admin"; then
|
||||
fail "'user already exists' error not returned"
|
||||
fi
|
||||
|
||||
create_user "$username" "123456" "admin" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "error creating user"
|
||||
create_user "$username" "123456" "admin" || local create_result=$?
|
||||
[[ $create_result -eq 1 ]] || fail "'user already exists' error not returned"
|
||||
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket"
|
||||
delete_user "$username"
|
||||
}
|
||||
@@ -92,31 +76,24 @@ test_user_user() {
|
||||
username="ABCDEG"
|
||||
password="123456"
|
||||
|
||||
user_exists "$username" || local exists_result=$?
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
delete_user "$username" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "failed to delete user '$username'"
|
||||
fi
|
||||
setup_user "$username" "$password" "user" || fail "error setting up user"
|
||||
delete_bucket "aws" "versity-gwtest-user-bucket"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "error setting up bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
create_user "$username" "123456" "user" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "error creating user"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
create_bucket_with_user "aws" "versity-gwtest-user-bucket" "$username" "$password" || create_result_two=$?
|
||||
[[ $create_result_two -eq 1 ]] || fail "creating bucket with 'user' account failed to return error"
|
||||
if create_bucket_with_user "aws" "versity-gwtest-user-bucket" "$username" "$password"; then
|
||||
fail "creating bucket with 'user' account failed to return error"
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
[[ $error == *"Access Denied"* ]] || fail "error message '$error' doesn't contain 'Access Denied'"
|
||||
|
||||
create_bucket "aws" "versity-gwtest-user-bucket" || create_result_three=$?
|
||||
[[ $create_result_three -eq 0 ]] || fail "creating bucket account returned error"
|
||||
create_bucket "aws" "versity-gwtest-user-bucket" || fail "error creating bucket"
|
||||
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "versity-gwtest-user-bucket" "$username" || local change_result=$?
|
||||
[[ $change_result -eq 0 ]] || fail "error changing bucket owner"
|
||||
change_bucket_owner "$username" "$password" "versity-gwtest-user-bucket" "admin" || local change_result_two=$?
|
||||
[[ $change_result_two -eq 1 ]] || fail "user shouldn't be able to change bucket owner"
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "versity-gwtest-user-bucket" "$username" || fail "error changing bucket owner"
|
||||
if change_bucket_owner "$username" "$password" "versity-gwtest-user-bucket" "admin"; then
|
||||
fail "user shouldn't be able to change bucket owner"
|
||||
fi
|
||||
|
||||
list_buckets_with_user "aws" "$username" "$password"
|
||||
list_buckets_with_user "aws" "$username" "$password" || fail "error listing buckets with user '$username'"
|
||||
bucket_found=false
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ]; then
|
||||
@@ -141,22 +118,13 @@ test_userplus_operation() {
|
||||
username="ABCDEG"
|
||||
password="123456"
|
||||
|
||||
user_exists "$username" || local exists_result=$?
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
delete_user "$username" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "failed to delete user '$username'"
|
||||
fi
|
||||
delete_bucket "aws" "versity-gwtest-userplus-bucket"
|
||||
setup_user "$username" "$password" "userplus" || fail "error creating user '$username'"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "error setting up bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
create_user "$username" "123456" "userplus" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "error creating user"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
create_bucket_with_user "aws" "versity-gwtest-userplus-bucket" "$username" "$password" || fail "error creating bucket with user '$username'"
|
||||
|
||||
create_bucket_with_user "aws" "versity-gwtest-userplus-bucket" "$username" "$password" || create_result_two=$?
|
||||
[[ $create_result_two -eq 0 ]] || fail "error creating bucket"
|
||||
|
||||
list_buckets_with_user "aws" "$username" "$password"
|
||||
list_buckets_with_user "aws" "$username" "$password" || fail "error listing buckets with user '$username'"
|
||||
bucket_found=false
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ]; then
|
||||
@@ -169,10 +137,10 @@ test_userplus_operation() {
|
||||
fail "userplus-owned bucket not found in user list"
|
||||
fi
|
||||
|
||||
change_bucket_owner "$username" "$password" "versity-gwtest-userplus-bucket" "admin" || local change_result_two=$?
|
||||
[[ $change_result_two -eq 1 ]] || fail "userplus shouldn't be able to change bucket owner"
|
||||
if change_bucket_owner "$username" "$password" "versity-gwtest-userplus-bucket" "admin"; then
|
||||
fail "userplus shouldn't be able to change bucket owner"
|
||||
fi
|
||||
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket"
|
||||
delete_user "$username" || delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "error deleting user"
|
||||
delete_user "$username"
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user