mirror of
https://github.com/versity/versitygw.git
synced 2026-01-25 04:22:02 +00:00
Compare commits
11 Commits
v1.0.20
...
test/direc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e1b4d8011 | ||
|
|
82ea2723fa | ||
|
|
5e53c91090 | ||
|
|
bcc60bc933 | ||
|
|
3e8d81a2a9 | ||
|
|
38edfa2a73 | ||
|
|
2e9e6887b1 | ||
|
|
352cd5dc94 | ||
|
|
6ae48f709b | ||
|
|
3cd61859b8 | ||
|
|
4f78382ae6 |
4
.github/workflows/azurite.yml
vendored
4
.github/workflows/azurite.yml
vendored
@@ -8,10 +8,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
3
.github/workflows/docker-bats.yml
vendored
3
.github/workflows/docker-bats.yml
vendored
@@ -8,12 +8,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
cp tests/.env.docker.default tests/.env.docker
|
||||
cp tests/.secrets.default tests/.secrets
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
docker build \
|
||||
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
|
||||
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
4
.github/workflows/functional.yml
vendored
4
.github/workflows/functional.yml
vendored
@@ -9,10 +9,10 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -9,10 +9,10 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
@@ -46,10 +46,10 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
|
||||
4
.github/workflows/goreleaser.yml
vendored
4
.github/workflows/goreleaser.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
|
||||
2
.github/workflows/host-style-tests.yml
vendored
2
.github/workflows/host-style-tests.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: run host-style tests
|
||||
run: make test-host-style
|
||||
|
||||
2
.github/workflows/shellcheck.yml
vendored
2
.github/workflows/shellcheck.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run checks
|
||||
run: |
|
||||
|
||||
4
.github/workflows/static.yml
vendored
4
.github/workflows/static.yml
vendored
@@ -9,12 +9,12 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
47
.github/workflows/system.yml
vendored
47
.github/workflows/system.yml
vendored
@@ -15,140 +15,108 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "mc, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "mc-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, base|acl|multipart|put-object, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-base,rest-acl,rest-multipart,rest-put-object"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, chunked|checksum|versioning|bucket, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket,rest-list-buckets,rest-create-bucket,rest-head-bucket"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, not implemented|rest-delete-bucket-ownership-controls|rest-delete-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-not-implemented,rest-delete-bucket-ownership-controls,rest-delete-bucket-tagging"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, base|acl|multipart|put-object, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-base,rest-acl,rest-multipart,rest-put-object"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, chunked|checksum|versioning|bucket, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket,rest-list-buckets,rest-create-bucket,rest-head-bucket"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, not implemented|rest-delete-bucket-ownership-controls|rest-delete-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-not-implemented,rest-delete-bucket-ownership-controls,rest-delete-bucket-tagging"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, static, rest-put-bucket-tagging|rest-get-bucket-location|rest-put-object-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-put-bucket-tagging,rest-get-bucket-location,rest-put-object-tagging,rest-get-object-tagging,rest-list-object-versions"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "REST, posix, non-static, rest-put-bucket-tagging|rest-get-bucket-location|rest-put-object-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-put-bucket-tagging,rest-get-bucket-location,rest-put-object-tagging,rest-get-object-tagging,rest-list-object-versions"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-west-1"
|
||||
- set: "s3, posix, non-file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-non-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-file-count"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, policy, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, user, non-static, s3 IAM"
|
||||
IAM_TYPE: s3
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, bucket, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, multipart, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-multipart"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, object, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-object"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, policy, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3api, posix, user, static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
# TODO fix/debug s3 gateway
|
||||
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
|
||||
# IAM_TYPE: folder
|
||||
@@ -166,34 +134,30 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3cmd, posix, non-user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-non-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "s3cmd, posix, user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
- set: "setup/remove static buckets scripts"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "setup-remove-static"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
AWS_REGION: "us-east-1"
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "stable"
|
||||
id: go
|
||||
@@ -255,9 +219,9 @@ jobs:
|
||||
MC_ALIAS: versity
|
||||
LOG_LEVEL: 4
|
||||
GOCOVERDIR: ${{ github.workspace }}/cover
|
||||
USERNAME_ONE: HIJKLMN
|
||||
USERNAME_ONE: ABCDEFG
|
||||
PASSWORD_ONE: 1234567
|
||||
USERNAME_TWO: OPQRSTU
|
||||
USERNAME_TWO: HIJKLMN
|
||||
PASSWORD_TWO: 8901234
|
||||
TEST_FILE_FOLDER: ${{ github.workspace }}/versity-gwtest-files
|
||||
REMOVE_TEST_FILE_FOLDER: true
|
||||
@@ -267,12 +231,11 @@ jobs:
|
||||
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
|
||||
AUTOGENERATE_USERS: true
|
||||
USER_AUTOGENERATION_PREFIX: github-actions-test-
|
||||
AWS_REGION: ${{ matrix.AWS_REGION }}
|
||||
run: |
|
||||
make testbin
|
||||
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
|
||||
export AWS_SECRET_ACCESS_KEY=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn
|
||||
export AWS_REGION=$AWS_REGION
|
||||
export AWS_REGION=us-east-1
|
||||
export AWS_ACCESS_KEY_ID_TWO=user
|
||||
export AWS_SECRET_ACCESS_KEY_TWO=pass
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
|
||||
|
||||
@@ -23,16 +23,13 @@ RUN go build -ldflags "-X=main.Build=${BUILD} -X=main.BuildTime=${TIME} -X=main.
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
# These arguments can be overridden when building the image
|
||||
# These arguments can be overriden when building the image
|
||||
ARG IAM_DIR=/tmp/vgw
|
||||
ARG SETUP_DIR=/tmp/vgw
|
||||
|
||||
RUN mkdir -p $IAM_DIR
|
||||
RUN mkdir -p $SETUP_DIR
|
||||
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /usr/local/bin/versitygw
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /app/versitygw
|
||||
|
||||
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "/usr/local/bin/docker-entrypoint.sh" ]
|
||||
ENTRYPOINT [ "/app/versitygw" ]
|
||||
|
||||
23
README.md
23
README.md
@@ -70,29 +70,6 @@ versitygw [global options] command [command options] [arguments...]
|
||||
```
|
||||
The [global options](https://github.com/versity/versitygw/wiki/Global-Options) are specified before the backend type and the backend options are specified after.
|
||||
|
||||
### Run the gateway in Docker
|
||||
|
||||
Use the published image like the native binary by passing CLI arguments:
|
||||
|
||||
```bash
|
||||
docker run --rm versity/versitygw:latest --version
|
||||
```
|
||||
|
||||
When no command arguments are supplied, the container looks for `VGW_BACKEND` and optional `VGW_BACKEND_ARG`/`VGW_BACKEND_ARGS` environment variables to determine which backend to start. Backend-specific configuration continues to come from the existing environment flags (for example `ROOT_ACCESS_KEY`, `VGW_PORT`, and others).
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-e ROOT_ACCESS_KEY=testuser \
|
||||
-e ROOT_SECRET_KEY=secret \
|
||||
-e VGW_BACKEND=posix \
|
||||
-e VGW_BACKEND_ARG=/data \
|
||||
-p 10000:7070 \
|
||||
-v $(pwd)/data:/data \
|
||||
versity/versitygw:latest
|
||||
```
|
||||
|
||||
If you need to pass additional CLI options, set `VGW_ARGS` with a space-delimited list, or continue passing arguments directly to `docker run`.
|
||||
|
||||
***
|
||||
|
||||
#### Versity gives you clarity and control over your archival storage, so you can allocate more resources to your core mission.
|
||||
|
||||
12
auth/acl.go
12
auth/acl.go
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -494,14 +493,3 @@ func UpdateBucketACLOwner(ctx context.Context, be backend.Backend, bucket, newOw
|
||||
|
||||
return be.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
|
||||
// ValidateCannedACL validates bucket canned acl value
|
||||
func ValidateCannedACL(acl string) error {
|
||||
switch types.BucketCannedACL(acl) {
|
||||
case types.BucketCannedACLPrivate, types.BucketCannedACLPublicRead, types.BucketCannedACLPublicReadWrite, "":
|
||||
return nil
|
||||
default:
|
||||
debuglogger.Logf("invalid bucket canned acl: %v", acl)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,17 +40,14 @@ const (
|
||||
policyErrInvalidFirstChar = policyErr("Policies must be valid JSON and the first byte must be '{'")
|
||||
policyErrEmptyStatement = policyErr("Could not parse the policy: Statement is empty!")
|
||||
policyErrMissingStatmentField = policyErr("Missing required field Statement")
|
||||
policyErrInvalidVersion = policyErr("The policy must contain a valid version string")
|
||||
)
|
||||
|
||||
type BucketPolicy struct {
|
||||
Version PolicyVersion `json:"Version"`
|
||||
Statement []BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
var tmp struct {
|
||||
Version *PolicyVersion
|
||||
Statement *[]BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
@@ -63,22 +60,12 @@ func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
return policyErrMissingStatmentField
|
||||
}
|
||||
|
||||
if tmp.Version == nil {
|
||||
// bucket policy version should defualt to '2008-10-17'
|
||||
bp.Version = PolicyVersion2008
|
||||
} else {
|
||||
bp.Version = *tmp.Version
|
||||
}
|
||||
|
||||
// Assign the parsed value to the actual struct
|
||||
bp.Statement = *tmp.Statement
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
|
||||
if !bp.Version.isValid() {
|
||||
return policyErrInvalidVersion
|
||||
}
|
||||
|
||||
for _, statement := range bp.Statement {
|
||||
err := statement.Validate(bucket, iam)
|
||||
if err != nil {
|
||||
|
||||
@@ -38,20 +38,15 @@ const (
|
||||
GetObjectAction Action = "s3:GetObject"
|
||||
GetObjectVersionAction Action = "s3:GetObjectVersion"
|
||||
DeleteObjectAction Action = "s3:DeleteObject"
|
||||
DeleteObjectVersionAction Action = "s3:DeleteObjectVersion"
|
||||
GetObjectAclAction Action = "s3:GetObjectAcl"
|
||||
GetObjectAttributesAction Action = "s3:GetObjectAttributes"
|
||||
GetObjectVersionAttributesAction Action = "s3:GetObjectVersionAttributes"
|
||||
PutObjectAclAction Action = "s3:PutObjectAcl"
|
||||
RestoreObjectAction Action = "s3:RestoreObject"
|
||||
GetBucketTaggingAction Action = "s3:GetBucketTagging"
|
||||
PutBucketTaggingAction Action = "s3:PutBucketTagging"
|
||||
GetObjectTaggingAction Action = "s3:GetObjectTagging"
|
||||
GetObjectVersionTaggingAction Action = "s3:GetObjectVersionTagging"
|
||||
PutObjectTaggingAction Action = "s3:PutObjectTagging"
|
||||
PutObjectVersionTaggingAction Action = "s3:PutObjectVersionTagging"
|
||||
DeleteObjectTaggingAction Action = "s3:DeleteObjectTagging"
|
||||
DeleteObjectVersionTaggingAction Action = "s3:DeleteObjectVersionTagging"
|
||||
ListBucketVersionsAction Action = "s3:ListBucketVersions"
|
||||
ListBucketAction Action = "s3:ListBucket"
|
||||
GetBucketObjectLockConfigurationAction Action = "s3:GetBucketObjectLockConfiguration"
|
||||
@@ -114,20 +109,15 @@ var supportedActionList = map[Action]struct{}{
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetBucketTaggingAction: {},
|
||||
PutBucketTaggingAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
ListBucketVersionsAction: {},
|
||||
ListBucketAction: {},
|
||||
GetBucketObjectLockConfigurationAction: {},
|
||||
@@ -173,30 +163,25 @@ var supportedActionList = map[Action]struct{}{
|
||||
}
|
||||
|
||||
var supportedObjectActionList = map[Action]struct{}{
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
AllActions: {},
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
AllActions: {},
|
||||
}
|
||||
|
||||
// Validates Action: it should either wildcard match with supported actions list or be in it
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPolicyVersion_isValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string // description of this test case
|
||||
value string
|
||||
want bool
|
||||
}{
|
||||
{"valid 2008", "2008-10-17", true},
|
||||
{"valid 2012", "2012-10-17", true},
|
||||
{"invalid empty", "", false},
|
||||
{"invalid 1", "invalid", false},
|
||||
{"invalid 2", "2010-10-17", false},
|
||||
{"invalid 3", "2006-00-12", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := PolicyVersion(tt.value).isValid()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
106
auth/iam.go
106
auth/iam.go
@@ -45,12 +45,11 @@ func (r Role) IsValid() bool {
|
||||
|
||||
// Account is a gateway IAM account
|
||||
type Account struct {
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID int `json:"userID"`
|
||||
GroupID int `json:"groupID"`
|
||||
ProjectID int `json:"projectID"`
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID int `json:"userID"`
|
||||
GroupID int `json:"groupID"`
|
||||
}
|
||||
|
||||
type ListUserAccountsResult struct {
|
||||
@@ -59,11 +58,10 @@ type ListUserAccountsResult struct {
|
||||
|
||||
// Mutable props, which could be changed when updating an IAM account
|
||||
type MutableProps struct {
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
ProjectID *int `json:"projectID"`
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
}
|
||||
|
||||
func (m MutableProps) Validate() error {
|
||||
@@ -84,9 +82,6 @@ func updateAcc(acc *Account, props MutableProps) {
|
||||
if props.UserID != nil {
|
||||
acc.UserID = *props.UserID
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
acc.ProjectID = *props.ProjectID
|
||||
}
|
||||
if props.Role != "" {
|
||||
acc.Role = props.Role
|
||||
}
|
||||
@@ -112,47 +107,42 @@ var (
|
||||
)
|
||||
|
||||
type Opts struct {
|
||||
RootAccount Account
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
LDAPUserIdAtr string
|
||||
LDAPGroupIdAtr string
|
||||
LDAPProjectIdAtr string
|
||||
LDAPTLSSkipVerify bool
|
||||
VaultEndpointURL string
|
||||
VaultNamespace string
|
||||
VaultSecretStoragePath string
|
||||
VaultSecretStorageNamespace string
|
||||
VaultAuthMethod string
|
||||
VaultAuthNamespace string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
RootAccount Account
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
LDAPUserIdAtr string
|
||||
LDAPGroupIdAtr string
|
||||
VaultEndpointURL string
|
||||
VaultSecretStoragePath string
|
||||
VaultAuthMethod string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
@@ -166,7 +156,7 @@ func New(o *Opts) (IAMService, error) {
|
||||
case o.LDAPServerURL != "":
|
||||
svc, err = NewLDAPService(o.RootAccount, o.LDAPServerURL, o.LDAPBindDN, o.LDAPPassword,
|
||||
o.LDAPQueryBase, o.LDAPAccessAtr, o.LDAPSecretAtr, o.LDAPRoleAtr, o.LDAPUserIdAtr,
|
||||
o.LDAPGroupIdAtr, o.LDAPProjectIdAtr, o.LDAPObjClasses, o.LDAPTLSSkipVerify)
|
||||
o.LDAPGroupIdAtr, o.LDAPObjClasses)
|
||||
fmt.Printf("initializing LDAP IAM with %q\n", o.LDAPServerURL)
|
||||
case o.S3Endpoint != "":
|
||||
svc, err = NewS3(o.RootAccount, o.S3Access, o.S3Secret, o.S3Region, o.S3Bucket,
|
||||
@@ -174,8 +164,8 @@ func New(o *Opts) (IAMService, error) {
|
||||
fmt.Printf("initializing S3 IAM with '%v/%v'\n",
|
||||
o.S3Endpoint, o.S3Bucket)
|
||||
case o.VaultEndpointURL != "":
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultNamespace, o.VaultSecretStoragePath, o.VaultSecretStorageNamespace,
|
||||
o.VaultAuthMethod, o.VaultAuthNamespace, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultSecretStoragePath,
|
||||
o.VaultAuthMethod, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
|
||||
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
|
||||
case o.IpaHost != "":
|
||||
|
||||
@@ -194,12 +194,11 @@ func (s *IAMServiceInternal) ListUserAccounts() ([]Account, error) {
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -132,7 +132,6 @@ func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
userResult := struct {
|
||||
Gidnumber []string
|
||||
Uidnumber []string
|
||||
PidNumber []string
|
||||
}{}
|
||||
|
||||
err = ipa.rpc(req, &userResult)
|
||||
@@ -140,25 +139,20 @@ func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
uid, err := parseToInt(userResult.Uidnumber, "userID")
|
||||
uid, err := strconv.Atoi(userResult.Uidnumber[0])
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
return Account{}, fmt.Errorf("ipa uid invalid: %w", err)
|
||||
}
|
||||
gid, err := parseToInt(userResult.Gidnumber, "groupID")
|
||||
gid, err := strconv.Atoi(userResult.Gidnumber[0])
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
pId, err := parseToInt(userResult.PidNumber, "projectID")
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
return Account{}, fmt.Errorf("ipa gid invalid: %w", err)
|
||||
}
|
||||
|
||||
account := Account{
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
ProjectID: pId,
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
}
|
||||
|
||||
session_key := make([]byte, 16)
|
||||
@@ -500,20 +494,3 @@ func (b *Base64Encoded) UnmarshalJSON(data []byte) error {
|
||||
*b, err = base64.StdEncoding.DecodeString(intermediate)
|
||||
return err
|
||||
}
|
||||
|
||||
// parseToInt parses the first argument of input string slice
|
||||
// to an integer. If slice is empty, it defaults to 0
|
||||
func parseToInt(input []string, argName string) (int, error) {
|
||||
if len(input) == 0 {
|
||||
debuglogger.IAMLogf("empty %s slice: defaulting to 0", argName)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
id, err := strconv.Atoi(input[0])
|
||||
if err != nil {
|
||||
debuglogger.IAMLogf("failed to parse %s: %v", argName, err)
|
||||
return 0, fmt.Errorf("invalid %s: %w", argName, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
129
auth/iam_ldap.go
129
auth/iam_ldap.go
@@ -15,9 +15,7 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -28,82 +26,57 @@ import (
|
||||
)
|
||||
|
||||
type LdapIAMService struct {
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
groupIdAtr string
|
||||
userIdAtr string
|
||||
projectIdAtr string
|
||||
rootAcc Account
|
||||
url string
|
||||
bindDN string
|
||||
pass string
|
||||
tlsSkipVerify bool
|
||||
mu sync.Mutex
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
groupIdAtr string
|
||||
userIdAtr string
|
||||
rootAcc Account
|
||||
url string
|
||||
bindDN string
|
||||
pass string
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var _ IAMService = &LdapIAMService{}
|
||||
|
||||
func NewLDAPService(rootAcc Account, ldapURL, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, userIdAtr, groupIdAtr, projectIdAtr, objClasses string, tlsSkipVerify bool) (IAMService, error) {
|
||||
if ldapURL == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" ||
|
||||
secAtr == "" || roleAtr == "" || userIdAtr == "" || groupIdAtr == "" || projectIdAtr == "" || objClasses == "" {
|
||||
func NewLDAPService(rootAcc Account, url, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, userIdAtr, groupIdAtr, objClasses string) (IAMService, error) {
|
||||
if url == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" ||
|
||||
secAtr == "" || roleAtr == "" || userIdAtr == "" || groupIdAtr == "" || objClasses == "" {
|
||||
return nil, fmt.Errorf("required parameters list not fully provided")
|
||||
}
|
||||
|
||||
conn, err := dialLDAP(ldapURL, tlsSkipVerify)
|
||||
conn, err := ldap.DialURL(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to LDAP server: %w", err)
|
||||
}
|
||||
|
||||
err = conn.Bind(bindDN, pass)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("failed to bind to LDAP server %w", err)
|
||||
}
|
||||
return &LdapIAMService{
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
userIdAtr: userIdAtr,
|
||||
groupIdAtr: groupIdAtr,
|
||||
projectIdAtr: projectIdAtr,
|
||||
rootAcc: rootAcc,
|
||||
url: ldapURL,
|
||||
bindDN: bindDN,
|
||||
pass: pass,
|
||||
tlsSkipVerify: tlsSkipVerify,
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
userIdAtr: userIdAtr,
|
||||
groupIdAtr: groupIdAtr,
|
||||
rootAcc: rootAcc,
|
||||
url: url,
|
||||
bindDN: bindDN,
|
||||
pass: pass,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// dialLDAP establishes an LDAP connection with optional TLS configuration
|
||||
func dialLDAP(ldapURL string, tlsSkipVerify bool) (*ldap.Conn, error) {
|
||||
u, err := url.Parse(ldapURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid LDAP URL: %w", err)
|
||||
}
|
||||
|
||||
// For ldaps:// URLs, use DialURL with custom TLS config if needed
|
||||
if u.Scheme == "ldaps" && tlsSkipVerify {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: tlsSkipVerify,
|
||||
}
|
||||
return ldap.DialURL(ldapURL, ldap.DialWithTLSConfig(tlsConfig))
|
||||
}
|
||||
|
||||
// For ldap:// or when TLS verification is enabled, use standard DialURL
|
||||
return ldap.DialURL(ldapURL)
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) reconnect() error {
|
||||
ld.conn.Close()
|
||||
|
||||
conn, err := dialLDAP(ld.url, ld.tlsSkipVerify)
|
||||
conn, err := ldap.DialURL(ld.url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reconnect to LDAP server: %w", err)
|
||||
}
|
||||
@@ -144,7 +117,6 @@ func (ld *LdapIAMService) CreateAccount(account Account) error {
|
||||
userEntry.Attribute(ld.roleAtr, []string{string(account.Role)})
|
||||
userEntry.Attribute(ld.groupIdAtr, []string{fmt.Sprint(account.GroupID)})
|
||||
userEntry.Attribute(ld.userIdAtr, []string{fmt.Sprint(account.UserID)})
|
||||
userEntry.Attribute(ld.projectIdAtr, []string{fmt.Sprint(account.ProjectID)})
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
return c.Add(userEntry)
|
||||
@@ -180,7 +152,7 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(access),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.userIdAtr, ld.groupIdAtr, ld.projectIdAtr},
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.userIdAtr, ld.groupIdAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -219,19 +191,12 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
entry.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(entry.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
entry.GetAttributeValue(ld.projectIdAtr), err)
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
ProjectID: projectID,
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -246,9 +211,6 @@ func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) e
|
||||
if props.UserID != nil {
|
||||
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
req.Replace(ld.projectIdAtr, []string{fmt.Sprint(*props.ProjectID)})
|
||||
}
|
||||
if props.Role != "" {
|
||||
req.Replace(ld.roleAtr, []string{string(props.Role)})
|
||||
}
|
||||
@@ -286,7 +248,7 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(""),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.groupIdAtr, ld.projectIdAtr, ld.userIdAtr},
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.groupIdAtr, ld.userIdAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -311,19 +273,12 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
return nil, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
el.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(el.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
el.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
|
||||
result = append(result, Account{
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
ProjectID: projectID,
|
||||
UserID: userId,
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -205,12 +205,11 @@ func (s *IAMServiceS3) ListUserAccounts() ([]Account, error) {
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -38,39 +38,15 @@ type VaultIAMService struct {
|
||||
creds schema.AppRoleLoginRequest
|
||||
}
|
||||
|
||||
type VaultIAMNamespace struct {
|
||||
Auth string
|
||||
SecretStorage string
|
||||
}
|
||||
|
||||
// Resolve empty specific namespaces to the fallback.
|
||||
// Empty result means root namespace.
|
||||
func resolveVaultNamespaces(authNamespace, secretStorageNamespace, fallback string) VaultIAMNamespace {
|
||||
ns := VaultIAMNamespace{
|
||||
Auth: authNamespace,
|
||||
SecretStorage: secretStorageNamespace,
|
||||
}
|
||||
|
||||
if ns.Auth == "" {
|
||||
ns.Auth = fallback
|
||||
}
|
||||
if ns.SecretStorage == "" {
|
||||
ns.SecretStorage = fallback
|
||||
}
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
var _ IAMService = &VaultIAMService{}
|
||||
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, namespace, secretStoragePath, secretStorageNamespace,
|
||||
authMethod, authNamespace, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
|
||||
authMethod, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
clientCert, clientCertKey string) (IAMService, error) {
|
||||
opts := []vault.ClientOption{
|
||||
vault.WithAddress(endpoint),
|
||||
vault.WithRequestTimeout(requestTimeout),
|
||||
}
|
||||
|
||||
if serverCert != "" {
|
||||
tls := vault.TLSConfiguration{}
|
||||
|
||||
@@ -104,28 +80,6 @@ func NewVaultIAMService(rootAcc Account, endpoint, namespace, secretStoragePath,
|
||||
kvReqOpts = append(kvReqOpts, vault.WithMountPath(mountPath))
|
||||
}
|
||||
|
||||
// Resolve namespaces using optional generic fallback "namespace"
|
||||
ns := resolveVaultNamespaces(authNamespace, secretStorageNamespace, namespace)
|
||||
|
||||
// Guard: AppRole tokens are namespace scoped. If using AppRole and namespaces differ, error early.
|
||||
// Root token can span namespaces because each request carries X-Vault-Namespace.
|
||||
if rootToken == "" && ns.Auth != "" && ns.SecretStorage != "" && ns.Auth != ns.SecretStorage {
|
||||
return nil, fmt.Errorf(
|
||||
"approle tokens are namespace scoped. auth namespace %q and secret storage namespace %q differ. "+
|
||||
"use the same namespace or authenticate with a root token",
|
||||
ns.Auth, ns.SecretStorage,
|
||||
)
|
||||
}
|
||||
|
||||
// Apply namespaces to the correct request option sets.
|
||||
// For root token we do not need an auth namespace since we are not logging in via auth.
|
||||
if rootToken == "" && ns.Auth != "" {
|
||||
authReqOpts = append(authReqOpts, vault.WithNamespace(ns.Auth))
|
||||
}
|
||||
if ns.SecretStorage != "" {
|
||||
kvReqOpts = append(kvReqOpts, vault.WithNamespace(ns.SecretStorage))
|
||||
}
|
||||
|
||||
creds := schema.AppRoleLoginRequest{
|
||||
RoleId: roleID,
|
||||
SecretId: roleSecret,
|
||||
@@ -225,10 +179,6 @@ func (vt *VaultIAMService) CreateAccount(account Account) error {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
if vault.IsErrorStatus(err, http.StatusForbidden) {
|
||||
return fmt.Errorf("vault 403 permission denied on path %q. check KV mount path and policy. original: %w",
|
||||
vt.secretStoragePath+"/"+account.Access, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -369,21 +319,12 @@ func parseVaultUserAccount(data map[string]any, access string) (acc Account, err
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectIdJson, ok := usrAcc["projectID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectID, err := projectIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
ProjectID: int(projectID),
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func ParseBucketLockConfigurationInput(input []byte) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if lockConfig.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
if lockConfig.ObjectLockEnabled != "" && lockConfig.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
@@ -210,16 +210,7 @@ func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResu
|
||||
}
|
||||
}
|
||||
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend, isOverwrite bool) error {
|
||||
if isOverwrite {
|
||||
// if bucket versioning is enabled, any overwrite request
|
||||
// should be enabled, as it leads to a new object version
|
||||
// creation
|
||||
res, err := be.GetBucketVersioning(ctx, bucket)
|
||||
if err == nil && res.Status != nil && *res.Status == types.BucketVersioningStatusEnabled {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend) error {
|
||||
data, err := be.GetObjectLockConfiguration(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
|
||||
@@ -281,35 +272,31 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
}
|
||||
|
||||
if retention.Mode != "" && retention.RetainUntilDate != nil {
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
// if the object retention is expired, the object
|
||||
// is allowed for write operations(delete, modify)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
} else {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
if retention.RetainUntilDate.After(time.Now()) {
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,21 +177,7 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
|
||||
meta[string(keyBucketLock)] = backend.GetPtrFromString(encodeBytes(defaultLockParsed))
|
||||
}
|
||||
|
||||
tagging, err := backend.ParseCreateBucketTags(input.CreateBucketConfiguration.Tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tagging != nil {
|
||||
tags, err := json.Marshal(tagging)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
|
||||
meta[string(keyTags)] = backend.GetPtrFromString(encodeBytes(tags))
|
||||
}
|
||||
|
||||
_, err = az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
_, err := az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
if errors.Is(s3err.GetAPIError(s3err.ErrBucketAlreadyExists), azureErrToS3Err(err)) {
|
||||
aclBytes, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyAclCapital))
|
||||
if err != nil {
|
||||
@@ -583,11 +569,6 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
}
|
||||
}
|
||||
|
||||
if resp.TagCount != nil {
|
||||
tagcount := int32(*resp.TagCount)
|
||||
result.TagCount = &tagcount
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -1090,7 +1071,7 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object, _ string, tags map[string]string) error {
|
||||
func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1104,7 +1085,7 @@ func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object, _ string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object, _ string) (map[string]string, error) {
|
||||
func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1118,7 +1099,7 @@ func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object, _ string)
|
||||
return parseAzTags(tags.BlobTagSet), nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object, _ string) error {
|
||||
func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1663,6 +1644,24 @@ func (az *Azure) DeleteBucketCors(ctx context.Context, bucket string) error {
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
|
||||
cfg, err := az.getContainerMetaData(ctx, bucket, string(keyBucketLock))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cfg) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
|
||||
var bucketLockCfg auth.BucketLockConfig
|
||||
if err := json.Unmarshal(cfg, &bucketLockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockCfg.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
|
||||
return az.setContainerMetaData(ctx, bucket, string(keyBucketLock), config)
|
||||
}
|
||||
|
||||
|
||||
@@ -83,9 +83,9 @@ type Backend interface {
|
||||
DeleteBucketTagging(_ context.Context, bucket string) error
|
||||
|
||||
// object tagging operations
|
||||
GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error
|
||||
GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object string) error
|
||||
|
||||
// object lock operations
|
||||
PutObjectLockConfiguration(_ context.Context, bucket string, config []byte) error
|
||||
@@ -251,13 +251,13 @@ func (BackendUnsupported) DeleteBucketTagging(_ context.Context, bucket string)
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error {
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
|
||||
@@ -317,60 +317,14 @@ func ParseObjectTags(tagging string) (map[string]string, error) {
|
||||
return tagSet, nil
|
||||
}
|
||||
|
||||
// ParseCreateBucketTags parses and validates the bucket
|
||||
// tagging from CreateBucket input
|
||||
func ParseCreateBucketTags(tagging []types.Tag) (map[string]string, error) {
|
||||
if len(tagging) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var validTagComponent = regexp.MustCompile(`^[a-zA-Z0-9:/_.\-+ ]+$`)
|
||||
|
||||
tagset := make(map[string]string, len(tagging))
|
||||
|
||||
if len(tagging) > 50 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
|
||||
}
|
||||
|
||||
for _, tag := range tagging {
|
||||
// validate tag key length
|
||||
key := GetStringFromPtr(tag.Key)
|
||||
if len(key) == 0 || len(key) > 128 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag key string chars
|
||||
if !isValidTagComponent(key) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value length
|
||||
value := GetStringFromPtr(tag.Value)
|
||||
if len(value) > 256 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// validate tag value string chars
|
||||
if !isValidTagComponent(value) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// make sure there are no duplicate keys
|
||||
_, ok := tagset[key]
|
||||
if ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
|
||||
}
|
||||
|
||||
tagset[key] = value
|
||||
}
|
||||
|
||||
return tagset, nil
|
||||
}
|
||||
|
||||
// tag component (key/value) name rule regexp
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_Tag.html
|
||||
var validTagComponent = regexp.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`)
|
||||
|
||||
// isValidTagComponent validates the tag component(key/value) name
|
||||
// isValidTagComponent matches strings which contain letters, decimal digits,
|
||||
// and special chars: '/', '_', '-', '+', '.', ' ' (space)
|
||||
func isValidTagComponent(str string) bool {
|
||||
if str == "" {
|
||||
return true
|
||||
}
|
||||
return validTagComponent.Match([]byte(str))
|
||||
}
|
||||
|
||||
@@ -616,19 +570,3 @@ func EvaluateObjectDeletePreconditions(etag string, modTime time.Time, size int6
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidDirectoryName returns true if the string is a valid name
|
||||
// for a directory
|
||||
func IsValidDirectoryName(name string) bool {
|
||||
// directories may not contain a path separator
|
||||
if strings.ContainsRune(name, '/') {
|
||||
return false
|
||||
}
|
||||
|
||||
// directories may not contain null character
|
||||
if strings.ContainsRune(name, 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -285,14 +285,11 @@ func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (s3res
|
||||
out, err := s.client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return s3response.GetBucketVersioningOutput{}, handleError(err)
|
||||
}
|
||||
|
||||
return s3response.GetBucketVersioningOutput{
|
||||
Status: &out.Status,
|
||||
MFADelete: &out.MFADelete,
|
||||
}, nil
|
||||
}, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
@@ -1096,9 +1093,6 @@ func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAt
|
||||
}
|
||||
|
||||
out, err := s.client.GetObjectAttributes(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.GetObjectAttributesResponse{}, handleError(err)
|
||||
}
|
||||
|
||||
parts := s3response.ObjectParts{}
|
||||
objParts := out.ObjectParts
|
||||
@@ -1131,7 +1125,7 @@ func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAt
|
||||
StorageClass: out.StorageClass,
|
||||
ObjectParts: &parts,
|
||||
Checksum: out.Checksum,
|
||||
}, nil
|
||||
}, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
|
||||
@@ -1451,7 +1445,7 @@ func (s *S3Proxy) PutBucketAcl(ctx context.Context, bucket string, data []byte)
|
||||
return handleError(s.putMetaBucketObj(ctx, bucket, data, metaPrefixAcl))
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
if bucket == s.metaBucket {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
@@ -1466,22 +1460,20 @@ func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object, versionI
|
||||
}
|
||||
|
||||
_, err := s.client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
Tagging: tagging,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Tagging: tagging,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
if bucket == s.metaBucket {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
output, err := s.client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
@@ -1495,14 +1487,13 @@ func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object, versionI
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object, versionId string) error {
|
||||
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
if bucket == s.metaBucket {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
_, err := s.client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
@@ -15,9 +15,24 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
// ScoutfsOpts are the options for the ScoutFS backend
|
||||
@@ -26,8 +41,6 @@ type ScoutfsOpts struct {
|
||||
ChownUID bool
|
||||
// ChownGID sets the GID of the object to the GID of the user on PUT
|
||||
ChownGID bool
|
||||
// SetProjectID sets the Project ID of the bucket/object to the project ID of the user on PUT
|
||||
SetProjectID bool
|
||||
// BucketLinks enables symlinks to directories to be treated as buckets
|
||||
BucketLinks bool
|
||||
//VersioningDir sets the version directory to enable object versioning
|
||||
@@ -38,10 +51,322 @@ type ScoutfsOpts struct {
|
||||
GlacierMode bool
|
||||
// DisableNoArchive prevents setting noarchive on temporary files
|
||||
DisableNoArchive bool
|
||||
// ValidateBucketNames enables minimal bucket name validation to prevent
|
||||
// incorrect access to the filesystem. This is only needed if the
|
||||
// frontend is not already validating bucket names.
|
||||
ValidateBucketNames bool
|
||||
}
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// disableNoArchive is used to disable setting scoutam noarchive flag
|
||||
// on mutlipart parts. This is enabled by default to prevent archive
|
||||
// copies of temporary multipart parts.
|
||||
disableNoArchive bool
|
||||
}
|
||||
|
||||
var _ backend.Backend = &ScoutFS{}
|
||||
|
||||
const (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
systemPrefix = "scoutfs.hide."
|
||||
onameAttr = systemPrefix + "objname"
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
stagecopykey = systemPrefix + "sam_stagereq"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
_ = s.rootdir
|
||||
}
|
||||
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
|
||||
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
out, err := s.Posix.UploadPart(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.disableNoArchive {
|
||||
sum := sha256.Sum256([]byte(*input.Key))
|
||||
partPath := filepath.Join(
|
||||
*input.Bucket, // bucket
|
||||
posix.MetaTmpMultipartDir, // temp multipart dir
|
||||
fmt.Sprintf("%x", sum), // hashed objname
|
||||
*input.UploadId, // upload id
|
||||
fmt.Sprintf("%v", *input.PartNumber), // part number
|
||||
)
|
||||
|
||||
err = setNoArchive(partPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set noarchive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input, moveData)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
res, err := s.Posix.HeadObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
objPath := filepath.Join(*input.Bucket, *input.Key)
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
|
||||
requestOngoing = stageComplete
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
|
||||
res.Restore = &requestOngoing
|
||||
res.StorageClass = stclass
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Posix.GetObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
res, err := posixFileToObj(path, d)
|
||||
if err != nil || d.IsDir() {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
}
|
||||
|
||||
func setFlag(objname string, flag uint64) error {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | flag
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err = json.Marshal(&newflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.Set(objname, flagskey, b)
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
return setFlag(objname, Staging)
|
||||
}
|
||||
|
||||
func setNoArchive(objname string) error {
|
||||
return setFlag(objname, NoArchive)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -17,70 +17,24 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/scoutfs-go"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// disableNoArchive is used to disable setting scoutam noarchive flag
|
||||
// on multipart parts. This is enabled by default to prevent archive
|
||||
// copies of temporary multipart parts.
|
||||
disableNoArchive bool
|
||||
|
||||
// enable posix level bucket name validations, not needed if the
|
||||
// frontend handlers are already validating bucket names
|
||||
validateBucketName bool
|
||||
|
||||
// projectIDEnabled enables setting projectid of new buckets and objects
|
||||
// to the account project id when non-0
|
||||
projectIDEnabled bool
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
metastore := meta.XattrMeta{}
|
||||
|
||||
p, err := posix.New(rootdir, metastore, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
NewDirPerm: opts.NewDirPerm,
|
||||
VersioningDir: opts.VersioningDir,
|
||||
ValidateBucketNames: opts.ValidateBucketNames,
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
NewDirPerm: opts.NewDirPerm,
|
||||
VersioningDir: opts.VersioningDir,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -91,491 +45,50 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("open %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
setProjectID := opts.SetProjectID
|
||||
if opts.SetProjectID {
|
||||
setProjectID = fGetFormatVersion(f).AtLeast(versionScoutFsV2)
|
||||
if !setProjectID {
|
||||
fmt.Println("WARNING:")
|
||||
fmt.Println("Disabling ProjectIDs for unsupported FS format version")
|
||||
fmt.Println("See documentation for format version upgrades")
|
||||
}
|
||||
}
|
||||
|
||||
return &ScoutFS{
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
glaciermode: opts.GlacierMode,
|
||||
disableNoArchive: opts.DisableNoArchive,
|
||||
projectIDEnabled: setProjectID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
const (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
systemPrefix = "scoutfs.hide."
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
}
|
||||
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
|
||||
func (s *ScoutFS) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
err := s.Posix.CreateBucket(ctx, input, acl)
|
||||
func moveData(from *os.File, to *os.File) error {
|
||||
// May fail if the files are not 4K aligned; check for alignment
|
||||
ffi, err := from.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("stat from: %v", err)
|
||||
}
|
||||
|
||||
if s.projectIDEnabled {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
if !isValidProjectID(acct.ProjectID) {
|
||||
// early return to avoid the open if we dont have a valid
|
||||
// project id
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(*input.Bucket)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("create bucket %q set project id - open: %v",
|
||||
*input.Bucket, err))
|
||||
return nil
|
||||
}
|
||||
|
||||
err = s.setProjectID(f, acct.ProjectID)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("create bucket %q set project id: %v",
|
||||
*input.Bucket, err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
res, err := s.Posix.HeadObject(ctx, input)
|
||||
tfi, err := to.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return fmt.Errorf("stat to: %v", err)
|
||||
}
|
||||
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
objPath := filepath.Join(*input.Bucket, *input.Key)
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
|
||||
requestOngoing = stageComplete
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
|
||||
res.Restore = &requestOngoing
|
||||
res.StorageClass = stclass
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.PutObjectWithPostFunc(ctx, po, func(f *os.File) error {
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("put object %v/%v set project id: %v",
|
||||
filepath.Join(*po.Bucket, *po.Key), acct.ProjectID, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.UploadPartWithPostFunc(ctx, input,
|
||||
func(f *os.File) error {
|
||||
if !s.disableNoArchive {
|
||||
err := setNoArchive(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set noarchive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id %v: %w", acct.ProjectID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input,
|
||||
func(from *os.File, to *os.File) error {
|
||||
// May fail if the files are not 4K aligned; check for alignment
|
||||
ffi, err := from.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat from: %w", err)
|
||||
}
|
||||
tfi, err := to.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat to: %w", err)
|
||||
}
|
||||
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
err = s.setProjectID(to, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("complete-mpu %q/%q set project id %v: %v",
|
||||
*input.Bucket, *input.Key, acct.ProjectID, err))
|
||||
}
|
||||
|
||||
err = scoutfs.MoveData(from, to)
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu movedata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) isBucketValid(bucket string) bool {
|
||||
if !s.validateBucketName {
|
||||
return true
|
||||
}
|
||||
|
||||
return backend.IsValidDirectoryName(bucket)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
err = scoutfs.MoveData(from, to)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
debuglogger.Logf("ScoutFs MoveData failed: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
|
||||
}
|
||||
func statMore(path string) (stat, error) {
|
||||
st, err := scoutfs.StatMore(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
return stat{}, err
|
||||
}
|
||||
var s stat
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
s.Meta_seq = st.Meta_seq
|
||||
s.Data_seq = st.Data_seq
|
||||
s.Data_version = st.Data_version
|
||||
s.Online_blocks = st.Online_blocks
|
||||
s.Offline_blocks = st.Offline_blocks
|
||||
s.Crtime_sec = st.Crtime_sec
|
||||
s.Crtime_nsec = st.Crtime_nsec
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Posix.GetObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
res, err := posixFileToObj(path, d)
|
||||
if err != nil || d.IsDir() {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
}
|
||||
|
||||
func setFlag(objname string, flag uint64) error {
|
||||
f, err := os.Open(objname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return fsetFlag(f, flag)
|
||||
}
|
||||
|
||||
func fsetFlag(f *os.File, flag uint64) error {
|
||||
b, err := xattr.FGet(f, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | flag
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err = json.Marshal(&newflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.FSet(f, flagskey, b)
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
return setFlag(objname, Staging)
|
||||
}
|
||||
|
||||
func setNoArchive(f *os.File) error {
|
||||
return fsetFlag(f, NoArchive)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ScoutFS) setProjectID(f *os.File, proj int) error {
|
||||
if s.projectIDEnabled && isValidProjectID(proj) {
|
||||
err := scoutfs.SetProjectID(f, uint64(proj))
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidProjectID(proj int) bool {
|
||||
return proj > 0
|
||||
}
|
||||
|
||||
const (
|
||||
sysscoutfs = "/sys/fs/scoutfs/"
|
||||
formatversion = "format_version"
|
||||
)
|
||||
|
||||
// GetFormatVersion returns ScoutFS version reported by sysfs
|
||||
func fGetFormatVersion(f *os.File) scoutFsVersion {
|
||||
fsid, err := scoutfs.GetIDs(f)
|
||||
if err != nil {
|
||||
return versionScoutFsNotScoutFS
|
||||
}
|
||||
|
||||
path := filepath.Join(sysscoutfs, fsid.ShortID, formatversion)
|
||||
buf, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
str := strings.TrimSpace(string(buf))
|
||||
vers, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
return scoutFsVersion(vers)
|
||||
}
|
||||
|
||||
const (
|
||||
// versionScoutFsUnknown is unknown version
|
||||
versionScoutFsUnknown scoutFsVersion = iota
|
||||
// versionScoutFsV1 is version 1
|
||||
versionScoutFsV1
|
||||
// versionScoutFsV2 is version 2
|
||||
versionScoutFsV2
|
||||
// versionScoutFsMin is minimum scoutfs version
|
||||
versionScoutFsMin = versionScoutFsV1
|
||||
// versionScoutFsMax is maximum scoutfs version
|
||||
versionScoutFsMax = versionScoutFsV2
|
||||
// versionScoutFsNotScoutFS means the target FS is not scoutfs
|
||||
versionScoutFsNotScoutFS = versionScoutFsMax + 1
|
||||
)
|
||||
|
||||
// scoutFsVersion version
|
||||
type scoutFsVersion int
|
||||
|
||||
// AtLeast returns true if version is valid and at least b
|
||||
func (a scoutFsVersion) AtLeast(b scoutFsVersion) bool {
|
||||
return a.IsValid() && a >= b
|
||||
}
|
||||
|
||||
func (a scoutFsVersion) IsValid() bool {
|
||||
return a >= versionScoutFsMin && a <= versionScoutFsMax
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -17,15 +17,23 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/versity/versitygw/backend"
|
||||
"os"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
backend.BackendUnsupported
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("scoutfs only available on linux")
|
||||
}
|
||||
|
||||
var (
|
||||
errNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
func moveData(_, _ *os.File) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
func statMore(_ string) (stat, error) {
|
||||
return stat{}, errNotSupported
|
||||
}
|
||||
|
||||
@@ -12,21 +12,14 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
package scoutfs
|
||||
|
||||
type PolicyVersion string
|
||||
|
||||
const (
|
||||
PolicyVersion2008 PolicyVersion = "2008-10-17"
|
||||
PolicyVersion2012 PolicyVersion = "2012-10-17"
|
||||
)
|
||||
|
||||
// isValid checks if the policy version is valid or not
|
||||
func (pv PolicyVersion) isValid() bool {
|
||||
switch pv {
|
||||
case PolicyVersion2008, PolicyVersion2012:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
type stat struct {
|
||||
Meta_seq uint64
|
||||
Data_seq uint64
|
||||
Data_version uint64
|
||||
Online_blocks uint64
|
||||
Offline_blocks uint64
|
||||
Crtime_sec uint64
|
||||
Crtime_nsec uint32
|
||||
}
|
||||
@@ -82,11 +82,6 @@ func adminCommand() *cli.Command {
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -120,11 +115,6 @@ func adminCommand() *cli.Command {
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -224,7 +214,7 @@ func initHTTPClient() *http.Client {
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("project-id")
|
||||
userID, groupID := ctx.Int("user-id"), ctx.Int("group-id")
|
||||
if access == "" || secret == "" {
|
||||
return fmt.Errorf("invalid input parameters for the new user access/secret keys")
|
||||
}
|
||||
@@ -233,12 +223,11 @@ func createUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
acc := auth.Account{
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
Role: auth.Role(role),
|
||||
UserID: userID,
|
||||
GroupID: groupID,
|
||||
ProjectID: projectID,
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
Role: auth.Role(role),
|
||||
UserID: userID,
|
||||
GroupID: groupID,
|
||||
}
|
||||
|
||||
accxml, err := xml.Marshal(acc)
|
||||
@@ -327,14 +316,7 @@ func deleteUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func updateUser(ctx *cli.Context) error {
|
||||
access, secret, userId, groupId, projectID, role :=
|
||||
ctx.String("access"),
|
||||
ctx.String("secret"),
|
||||
ctx.Int("user-id"),
|
||||
ctx.Int("group-id"),
|
||||
ctx.Int("projectID"),
|
||||
auth.Role(ctx.String("role"))
|
||||
|
||||
access, secret, userId, groupId, role := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id"), auth.Role(ctx.String("role"))
|
||||
props := auth.MutableProps{}
|
||||
if ctx.IsSet("role") {
|
||||
if !role.IsValid() {
|
||||
@@ -351,9 +333,6 @@ func updateUser(ctx *cli.Context) error {
|
||||
if ctx.IsSet("group-id") {
|
||||
props.GroupID = &groupId
|
||||
}
|
||||
if ctx.IsSet("project-id") {
|
||||
props.ProjectID = &projectID
|
||||
}
|
||||
|
||||
propsxml, err := xml.Marshal(props)
|
||||
if err != nil {
|
||||
@@ -454,10 +433,10 @@ const (
|
||||
func printAcctTable(accs []auth.Account) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
fmt.Fprintln(w, "Account\tRole\tUserID\tGroupID\tProjectID")
|
||||
fmt.Fprintln(w, "-------\t----\t------\t-------\t---------")
|
||||
fmt.Fprintln(w, "Account\tRole\tUserID\tGroupID")
|
||||
fmt.Fprintln(w, "-------\t----\t------\t-------")
|
||||
for _, acc := range accs {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\n", acc.Access, acc.Role, acc.UserID, acc.GroupID, acc.ProjectID)
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", acc.Access, acc.Role, acc.UserID, acc.GroupID)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
|
||||
@@ -32,63 +32,56 @@ import (
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
var (
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
rabbitmqURL, rabbitmqExchange string
|
||||
rabbitmqRoutingKey string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
virtualDomain string
|
||||
debug bool
|
||||
keepAlive bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
disableStrictBucketNames bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
ldapUserIdAtr, ldapGroupIdAtr string
|
||||
ldapProjectIdAtr string
|
||||
ldapTLSSkipVerify bool
|
||||
vaultEndpointURL, vaultNamespace string
|
||||
vaultSecretStoragePath string
|
||||
vaultSecretStorageNamespace string
|
||||
vaultAuthMethod, vaultAuthNamespace string
|
||||
vaultMountPath string
|
||||
vaultRootToken, vaultRoleId string
|
||||
vaultRoleSecret, vaultServerCert string
|
||||
vaultClientCert, vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
ipaHost, ipaVaultName string
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure bool
|
||||
iamDebug bool
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
rabbitmqURL, rabbitmqExchange string
|
||||
rabbitmqRoutingKey string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
virtualDomain string
|
||||
debug bool
|
||||
keepAlive bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
ldapUserIdAtr, ldapGroupIdAtr string
|
||||
vaultEndpointURL, vaultSecretStoragePath string
|
||||
vaultAuthMethod, vaultMountPath string
|
||||
vaultRootToken, vaultRoleId string
|
||||
vaultRoleSecret, vaultServerCert string
|
||||
vaultClientCert, vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
ipaHost, ipaVaultName string
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure bool
|
||||
iamDebug bool
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -406,54 +399,24 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_IAM_LDAP_GROUP_ID_ATR"},
|
||||
Destination: &ldapGroupIdAtr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-project-id-atr",
|
||||
Usage: "ldap server user project id attribute name",
|
||||
EnvVars: []string{"VGW_IAM_LDAP_PROJECT_ID_ATR"},
|
||||
Destination: &ldapProjectIdAtr,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "iam-ldap-tls-skip-verify",
|
||||
Usage: "disable TLS certificate verification for LDAP connections (insecure, for self-signed certificates)",
|
||||
EnvVars: []string{"VGW_IAM_LDAP_TLS_SKIP_VERIFY"},
|
||||
Destination: &ldapTLSSkipVerify,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-endpoint-url",
|
||||
Usage: "vault server url",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_ENDPOINT_URL"},
|
||||
Destination: &vaultEndpointURL,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-namespace",
|
||||
Usage: "vault server namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_NAMESPACE"},
|
||||
Destination: &vaultNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-secret-storage-path",
|
||||
Usage: "vault server secret storage path",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_PATH"},
|
||||
Destination: &vaultSecretStoragePath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-secret-storage-namespace",
|
||||
Usage: "vault server secret storage namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_NAMESPACE"},
|
||||
Destination: &vaultSecretStorageNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-auth-method",
|
||||
Usage: "vault server auth method",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_AUTH_METHOD"},
|
||||
Destination: &vaultAuthMethod,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-auth-namespace",
|
||||
Usage: "vault server auth namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_AUTH_NAMESPACE"},
|
||||
Destination: &vaultAuthNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-mount-path",
|
||||
Usage: "vault server mount path",
|
||||
@@ -573,12 +536,6 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_READ_ONLY"},
|
||||
Destination: &readonly,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-strict-bucket-names",
|
||||
Usage: "allow relaxed bucket naming (disables strict validation checks)",
|
||||
EnvVars: []string{"VGW_DISABLE_STRICT_BUCKET_NAMES"},
|
||||
Destination: &disableStrictBucketNames,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "metrics-service-name",
|
||||
Usage: "service name tag for metrics, hostname if blank",
|
||||
@@ -638,8 +595,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("root user access and secret key must be provided")
|
||||
}
|
||||
|
||||
utils.SetBucketNameValidationStrict(!disableStrictBucketNames)
|
||||
|
||||
if pprof != "" {
|
||||
// listen on specified port for pprof debug
|
||||
// point browser to http://<ip:port>/debug/pprof/
|
||||
@@ -695,46 +650,41 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
Secret: rootUserSecret,
|
||||
Role: auth.RoleAdmin,
|
||||
},
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
LDAPUserIdAtr: ldapUserIdAtr,
|
||||
LDAPGroupIdAtr: ldapGroupIdAtr,
|
||||
LDAPProjectIdAtr: ldapProjectIdAtr,
|
||||
LDAPTLSSkipVerify: ldapTLSSkipVerify,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultNamespace: vaultNamespace,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultSecretStorageNamespace: vaultSecretStorageNamespace,
|
||||
VaultAuthMethod: vaultAuthMethod,
|
||||
VaultAuthNamespace: vaultAuthNamespace,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
VaultRoleSecret: vaultRoleSecret,
|
||||
VaultServerCert: vaultServerCert,
|
||||
VaultClientCert: vaultClientCert,
|
||||
VaultClientCertKey: vaultClientCertKey,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
IpaHost: ipaHost,
|
||||
IpaVaultName: ipaVaultName,
|
||||
IpaUser: ipaUser,
|
||||
IpaPassword: ipaPassword,
|
||||
IpaInsecure: ipaInsecure,
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
LDAPUserIdAtr: ldapUserIdAtr,
|
||||
LDAPGroupIdAtr: ldapGroupIdAtr,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultAuthMethod: vaultAuthMethod,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
VaultRoleSecret: vaultRoleSecret,
|
||||
VaultServerCert: vaultServerCert,
|
||||
VaultClientCert: vaultClientCert,
|
||||
VaultClientCertKey: vaultClientCertKey,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
IpaHost: ipaHost,
|
||||
IpaVaultName: ipaVaultName,
|
||||
IpaUser: ipaUser,
|
||||
IpaPassword: ipaPassword,
|
||||
IpaInsecure: ipaInsecure,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
@@ -848,36 +798,31 @@ Loop:
|
||||
}
|
||||
saveErr := err
|
||||
|
||||
// first shut down the s3api and admin servers
|
||||
// as they have dependecy from other modules
|
||||
err = srv.ShutDown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown api server: %v\n", err)
|
||||
}
|
||||
|
||||
if admSrv != nil {
|
||||
err := admSrv.Shutdown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown admin server: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
be.Shutdown()
|
||||
|
||||
err = iam.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown iam: %v\n", err)
|
||||
}
|
||||
|
||||
if loggers.S3Logger != nil {
|
||||
err := loggers.S3Logger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown s3 logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
if loggers.AdminLogger != nil {
|
||||
err := loggers.AdminLogger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown admin logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
@@ -885,6 +830,9 @@ Loop:
|
||||
if evSender != nil {
|
||||
err := evSender.Close()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "close event sender: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,9 +32,8 @@ func pluginCommand() *cli.Command {
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "location of the plugin config file",
|
||||
Usage: "location of the config file",
|
||||
Aliases: []string{"c"},
|
||||
EnvVars: []string{"VGW_PLUGIN_CONFIG"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -120,13 +120,12 @@ func runPosix(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
opts := posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
ValidateBucketNames: disableStrictBucketNames,
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
}
|
||||
|
||||
var ms meta.MetadataStorer
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
var (
|
||||
glacier bool
|
||||
disableNoArchive bool
|
||||
setProjectID bool
|
||||
)
|
||||
|
||||
func scoutfsCommand() *cli.Command {
|
||||
@@ -67,12 +66,6 @@ move interfaces as well as support for tiered filesystems.`,
|
||||
EnvVars: []string{"VGW_CHOWN_GID"},
|
||||
Destination: &chowngid,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "projectid",
|
||||
Usage: "set project id on newly created buckets, files, and directories to client account ProjectID",
|
||||
EnvVars: []string{"VGW_SET_PROJECT_ID"},
|
||||
Destination: &setProjectID,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bucketlinks",
|
||||
Usage: "allow symlinked directories at bucket level to be treated as buckets",
|
||||
@@ -120,8 +113,6 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
opts.NewDirPerm = fs.FileMode(dirPerms)
|
||||
opts.DisableNoArchive = disableNoArchive
|
||||
opts.VersioningDir = versioningDir
|
||||
opts.ValidateBucketNames = disableStrictBucketNames
|
||||
opts.SetProjectID = setProjectID
|
||||
|
||||
be, err := scoutfs.New(ctx.Args().Get(0), opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -39,7 +39,6 @@ var (
|
||||
versioningEnabled bool
|
||||
azureTests bool
|
||||
tlsStatus bool
|
||||
parallel bool
|
||||
)
|
||||
|
||||
func testCommand() *cli.Command {
|
||||
@@ -116,12 +115,6 @@ func initTestCommands() []*cli.Command {
|
||||
Destination: &azureTests,
|
||||
Aliases: []string{"azure"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "parallel",
|
||||
Usage: "executes the tests concurrently",
|
||||
Destination: ¶llel,
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -311,9 +304,9 @@ func initTestCommands() []*cli.Command {
|
||||
}, extractIntTests()...)
|
||||
}
|
||||
|
||||
type testFunc func(*integration.TestState)
|
||||
type testFunc func(*integration.S3Conf)
|
||||
|
||||
func getAction(tf testFunc) func(ctx *cli.Context) error {
|
||||
func getAction(tf testFunc) func(*cli.Context) error {
|
||||
return func(ctx *cli.Context) error {
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
@@ -336,14 +329,12 @@ func getAction(tf testFunc) func(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
ts := integration.NewTestState(ctx.Context, s, parallel)
|
||||
tf(ts)
|
||||
ts.Wait()
|
||||
tf(s)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("RAN:", integration.RunCount.Load(), "PASS:", integration.PassCount.Load(), "FAIL:", integration.FailCount.Load())
|
||||
if integration.FailCount.Load() > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount.Load())
|
||||
fmt.Println("RAN:", integration.RunCount, "PASS:", integration.PassCount, "FAIL:", integration.FailCount)
|
||||
if integration.FailCount > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
BIN="${VGW_BINARY:-/usr/local/bin/versitygw}"
|
||||
|
||||
if [ ! -x "$BIN" ]; then
|
||||
echo "Entrypoint error: versitygw binary not found at $BIN" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If arguments were provided, run them directly for backward compatibility.
|
||||
if [ "$#" -gt 0 ]; then
|
||||
exec "$BIN" "$@"
|
||||
fi
|
||||
|
||||
backend="${VGW_BACKEND:-}"
|
||||
if [ -z "$backend" ]; then
|
||||
cat >&2 <<'EOF'
|
||||
No command arguments were provided and VGW_BACKEND is unset.
|
||||
Set VGW_BACKEND to one of: posix, scoutfs, s3, azure, plugin
|
||||
or pass explicit arguments to the container to run the versitygw command directly.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$backend" in
|
||||
posix|scoutfs|s3|azure|plugin)
|
||||
;;
|
||||
*)
|
||||
echo "VGW_BACKEND invalid backend (was '$backend')." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
set -- "$backend"
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARG:-}" ]; then
|
||||
set -- "$@" "$VGW_BACKEND_ARG"
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_BACKEND_ARGS}
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_ARGS}
|
||||
fi
|
||||
|
||||
exec "$BIN" "$@"
|
||||
@@ -23,8 +23,7 @@
|
||||
# VersityGW Required Options #
|
||||
##############################
|
||||
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, s3, azure,
|
||||
# or plugin
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, or s3
|
||||
# This defines the backend that the VGW will use for data access.
|
||||
VGW_BACKEND=posix
|
||||
|
||||
@@ -120,12 +119,6 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# https://<VGW_ENDPOINT>/<bucket>
|
||||
#VGW_VIRTUAL_DOMAIN=
|
||||
|
||||
# By default, versitygw will enforce similar bucket naming rules as described
|
||||
# in https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
||||
# Set to true to allow legacy or non-DNS-compliant bucket names by skipping
|
||||
# strict validation checks.
|
||||
#VGW_DISABLE_STRICT_BUCKET_NAMES=false
|
||||
|
||||
###############
|
||||
# Access Logs #
|
||||
###############
|
||||
@@ -279,11 +272,6 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_IAM_LDAP_ROLE_ATR=
|
||||
#VGW_IAM_LDAP_USER_ID_ATR=
|
||||
#VGW_IAM_LDAP_GROUP_ID_ATR=
|
||||
# Disable TLS certificate verification for LDAP connections (insecure, allows
|
||||
# self-signed certificates). This should only be used in testing environments
|
||||
# or when using self-signed certificates. The default is false (verification
|
||||
# enabled).
|
||||
#VGW_IAM_LDAP_TLS_SKIP_VERIFY=false
|
||||
|
||||
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
|
||||
# in an external FreeIPA service. Currently the FreeIPA IAM service only
|
||||
@@ -445,11 +433,6 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_CHOWN_UID=false
|
||||
#VGW_CHOWN_GID=false
|
||||
|
||||
# The VGW_SET_PROJECT_ID option will enable setting account defined ProjectID
|
||||
# for newly created buckets, files, and directories if the account ProjectID
|
||||
# is greater than 0 and the filesystem format version supports project IDs.
|
||||
#VGW_SET_PROJECT_ID=false
|
||||
|
||||
# The VGW_BUCKET_LINKS option will enable the gateway to treat symbolic links
|
||||
# to directories at the top level gateway directory as buckets.
|
||||
#VGW_BUCKET_LINKS=false
|
||||
@@ -497,48 +480,3 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_S3_DISABLE_CHECKSUM=false
|
||||
#VGW_S3_SSL_SKIP_VERIFY=false
|
||||
#VGW_S3_DEBUG=false
|
||||
|
||||
########
|
||||
# azure #
|
||||
########
|
||||
|
||||
# The azure backend allows the gateway to store objects in Azure Blob Storage.
|
||||
# Buckets created through the gateway map to blob containers within the
|
||||
# configured storage account. This backend is useful when existing workflows
|
||||
# expect an S3-compatible interface while data resides in Azure.
|
||||
|
||||
# When the azure backend is selected, configure credentials with one of the
|
||||
# following approaches:
|
||||
# - Shared key: Define AZ_ACCOUNT_NAME with the storage account name and
|
||||
# AZ_ACCESS_KEY with the corresponding account key.
|
||||
# - SAS token: Set AZ_SAS_TOKEN to an account or container scoped SAS token.
|
||||
# Provide AZ_ENDPOINT if the token does not implicitly define the endpoint.
|
||||
# - Default Azure credentials: Leave AZ_ACCOUNT_NAME and AZ_ACCESS_KEY blank
|
||||
# and configure the standard Azure identity environment variables supported
|
||||
# by the DefaultAzureCredential chain (e.g. AZURE_CLIENT_ID, AZURE_TENANT_ID,
|
||||
# AZURE_CLIENT_SECRET, managed identity, etc.).
|
||||
# Use AZ_ENDPOINT to override the service URL (for example when targeting
|
||||
# Azurite or a sovereign cloud). If unset, it defaults to
|
||||
# https://<account>.blob.core.windows.net/ when an account name is provided.
|
||||
#AZ_ACCOUNT_NAME=
|
||||
#AZ_ACCESS_KEY=
|
||||
#AZ_SAS_TOKEN=
|
||||
#AZ_ENDPOINT=
|
||||
|
||||
##########
|
||||
# plugin #
|
||||
##########
|
||||
|
||||
# The plugin backend loads a Go plugin shared object that exposes a variable
|
||||
# named "Backend" of type *plugins.BackendPlugin. The gateway uses the
|
||||
# exported constructor to create the backend implementation at runtime.
|
||||
|
||||
# Set VGW_BACKEND_ARG to the absolute path of the compiled plugin (.so) file.
|
||||
# The path must be readable by the gateway service account and remain stable
|
||||
# across restarts.
|
||||
#VGW_BACKEND_ARG=/usr/lib/versitygw/plugins/example.so
|
||||
|
||||
# Provide the plugin-specific configuration file path via VGW_PLUGIN_CONFIG.
|
||||
# The gateway automatically forwards this value to the plugin backend when it
|
||||
# starts up.
|
||||
#VGW_PLUGIN_CONFIG=/etc/versitygw.d/example-plugin.conf
|
||||
|
||||
@@ -17,7 +17,7 @@ Group=root
|
||||
|
||||
EnvironmentFile=/etc/versitygw.d/%i.conf
|
||||
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3" || "${VGW_BACKEND}" == "azure" || "${VGW_BACKEND}" == "plugin") ]]; then echo "VGW_BACKEND environment variable ${VGW_BACKEND} not set to valid backend type"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3") ]]; then echo "VGW_BACKEND environment variable not set to one of posix, scoutfs, or s3"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
|
||||
# Let systemd restart this service always
|
||||
Restart=always
|
||||
|
||||
84
go.mod
84
go.mod
@@ -5,21 +5,20 @@ go 1.24.0
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||
github.com/DataDog/datadog-go/v5 v5.8.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0
|
||||
github.com/aws/smithy-go v1.24.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1
|
||||
github.com/aws/smithy-go v1.23.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-ldap/ldap/v3 v3.4.12
|
||||
github.com/gofiber/fiber/v2 v2.52.10
|
||||
github.com/go-ldap/ldap/v3 v3.4.11
|
||||
github.com/gofiber/fiber/v2 v2.52.9
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/minio/crc64nvme v1.1.1
|
||||
github.com/nats-io/nats.go v1.47.0
|
||||
github.com/nats-io/nats.go v1.46.0
|
||||
github.com/oklog/ulid/v2 v2.1.1
|
||||
github.com/pkg/xattr v0.4.12
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
@@ -27,65 +26,62 @@ require (
|
||||
github.com/smira/go-statsd v1.3.4
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/urfave/cli/v2 v2.27.7
|
||||
github.com/valyala/fasthttp v1.68.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/sys v0.39.0
|
||||
github.com/valyala/fasthttp v1.66.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sync v0.17.0
|
||||
golang.org/x/sys v0.36.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.1.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.12 // indirect
|
||||
github.com/nats-io/nkeys v0.4.11 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.46.0 // indirect
|
||||
golang.org/x/net v0.48.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.13.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.5
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.5
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.16
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.9
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.13
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.7
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
|
||||
173
go.sum
173
go.sum
@@ -1,74 +1,68 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
|
||||
github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=
|
||||
github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/DataDog/datadog-go/v5 v5.8.2 h1:9IEfH1Mw9AjWwhAMqCAkhbxjuJeMxm2ARX2VdgL+ols=
|
||||
github.com/DataDog/datadog-go/v5 v5.8.2/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1 h1:dNhEwKaO3LJhGYKajl2DjobArfa5R9YF72z3Dy+PH3k=
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1/go.mod h1:CA9Ih6tb3jtxk+ps1xvTnxmhjr7ldE8TiwrZyrm31ss=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.5 h1:pz3duhAfUgnxbtVhIK39PGF/AHYyrzGEyRD9Og0QrE8=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.5/go.mod h1:xmDjzSUs/d0BB7ClzYPAZMmgQdrodNjPPhd6bGASwoE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.5 h1:xMo63RlqP3ZZydpJDMBsH9uJ10hgHYfQFIk1cHDXrR4=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.5/go.mod h1:hhbH6oRcou+LpXfA/0vPElh/e0M3aFeOblE1sssAAEk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.16 h1:NkjoiJoSpZqzsRcpM6rlk5AOCLro8JkK8UqekAm/hxM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.16/go.mod h1:l2736DvrgbOinD65Ksh8fc/WQHBBlvsd+0/ZaxtsmGY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0 h1:SWTxh/EcUCDVqi/0s26V6pVUq0BBG7kx0tDTmF/hCgA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 h1:eYnlt6QxnFINKzwxP5/Ucs1vkG7VT3Iezmvfgc2waUw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.7/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.9 h1:Q+9hVk8kmDGlC7XcDout/vs0FZhHnuPCPv+TRAYDans=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.9/go.mod h1:OpMrPn6rRbHKU4dAVNCk/EQx8sEQJI7hl9GZZ5u/Y+U=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.13 h1:gkpEm65/ZfrGJ3wbFH++Ki7DyaWtsWbK9idX6OXCo2E=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.13/go.mod h1:eVTHz1yI2/WIlXTE8f70mcrSxNafXD5sJpTIM9f+kmo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.7 h1:HWLRV4xlO15SsHs295AqwTGNwYG3kP6vAjw2OleUdX8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.7/go.mod h1:MWZrPol/xFvU6gyQ/gxqgsjufcbetFNE9gzSXPTLofw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 h1:UCxq0X9O3xrlENdKf1r9eRJoKz/b0AfGkpp3a7FPlhg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7/go.mod h1:rHRoJUNUASj5Z/0eqI4w32vKvC7atoWR0jC+IkmVH8k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 h1:Y6DTZUn7ZUC4th9FMBbo8LVE+1fyq3ofw+tRwkUd3PY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7/go.mod h1:x3XE6vMnU9QvHN/Wrx2s44kwzV2o2g5x/siw4ZUJ9g8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 h1:BszAktdUo2xlzmYHjWMq70DqJ7cROM8iBd3f6hrpuMQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7/go.mod h1:XJ1yHki/P7ZPuG4fd3f0Pg/dSGA2cTQBCLw82MH2H48=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 h1:zmZ8qvtE9chfhBPuKB2aQFxW5F/rpwXUgmcVCgQzqRw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7/go.mod h1:vVYfbpd2l+pKqlSIDIOgouxNsGu5il9uDp0ooWb0jys=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 h1:u3VbDKUCWarWiU+aIUK4gjTr/wQFXV17y3hgNno9fcA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7/go.mod h1:/OuMQwhSyRapYxq6ZNpPer8juGNrB4P5Oz8bZ2cgjQE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1 h1:+RpGuaQ72qnU83qBKVwxkznewEdAGhIWo/PQCmkhhog=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1/go.mod h1:xajPTguLoeQMAOE44AAP2RQoUhF8ey1g5IFHARv71po=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 h1:gBBZmSuIySGqDLtXdZiYpwyzbJKXQD2jjT0oDY6ywbo=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8=
|
||||
github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
|
||||
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -78,10 +72,10 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo=
|
||||
github.com/gofiber/fiber/v2 v2.52.10 h1:jRHROi2BuNti6NYXmZ6gbNSfT3zj/8c0xy94GOU5elY=
|
||||
github.com/gofiber/fiber/v2 v2.52.10/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
|
||||
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
|
||||
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
@@ -117,10 +111,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
|
||||
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -131,16 +123,14 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
|
||||
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM=
|
||||
github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc=
|
||||
github.com/nats-io/nkeys v0.4.12/go.mod h1:MT59A1HYcjIcyQDJStTfaOY6vhy9XTUjOFo+SVsvpBg=
|
||||
github.com/nats-io/nats.go v1.46.0 h1:iUcX+MLT0HHXskGkz+Sg20sXrPtJLsOojMDTDzOHSb8=
|
||||
github.com/nats-io/nats.go v1.46.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
|
||||
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
|
||||
@@ -157,6 +147,9 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
@@ -183,10 +176,10 @@ github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
|
||||
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.68.0 h1:v12Nx16iepr8r9ySOwqI+5RBJ/DqTxhOy1HrHoDFnok=
|
||||
github.com/valyala/fasthttp v1.68.0/go.mod h1:5EXiRfYQAoiO/khu4oU9VISC/eVY6JqmSpPJoHCKsz4=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b h1:kuqsuYRMG1c6YXBAQvWO7CiurlpYtjDJWI6oZ2K/ZZE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/valyala/fasthttp v1.66.0 h1:M87A0Z7EayeyNaV6pfO3tUTUiYO0dZfEJnRGXTVNuyU=
|
||||
github.com/valyala/fasthttp v1.66.0/go.mod h1:Y4eC+zwoocmXSVCB1JmhNbYtS7tZPRI2ztPB72EVObs=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
@@ -202,18 +195,18 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -224,15 +217,15 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
|
||||
11
runtests.sh
11
runtests.sh
@@ -16,6 +16,7 @@ ECHO "Generating TLS certificate and key in the cert.pem and key.pem files"
|
||||
openssl genpkey -algorithm RSA -out key.pem -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http"
|
||||
# run server in background not versioning-enabled
|
||||
# port: 7070(default)
|
||||
@@ -32,7 +33,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow --parallel; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_PID
|
||||
exit 1
|
||||
@@ -69,7 +70,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow --parallel; then
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_HTTPS_PID
|
||||
exit 1
|
||||
@@ -89,6 +90,7 @@ fi
|
||||
|
||||
kill $GW_HTTPS_PID
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http against the versioning-enabled gateway"
|
||||
# run server in background versioning-enabled
|
||||
# port: 7072
|
||||
@@ -106,7 +108,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs --parallel; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_PID
|
||||
exit 1
|
||||
@@ -138,7 +140,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs --parallel; then
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_HTTPS_PID
|
||||
exit 1
|
||||
@@ -160,3 +162,4 @@ exit 0
|
||||
# go tool covdata percent -i=/tmp/covdata
|
||||
# go tool covdata textfmt -i=/tmp/covdata -o profile.txt
|
||||
# go tool cover -html=profile.txt
|
||||
|
||||
|
||||
@@ -35,42 +35,42 @@ func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMSe
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user",
|
||||
controllers.ProcessHandlers(ctrl.CreateUser, metrics.ActionAdminCreateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateUser),
|
||||
))
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user",
|
||||
controllers.ProcessHandlers(ctrl.DeleteUser, metrics.ActionAdminDeleteUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false),
|
||||
middlewares.IsAdmin(metrics.ActionAdminDeleteUser),
|
||||
))
|
||||
|
||||
// UpdateUser admin api
|
||||
app.Patch("/update-user",
|
||||
controllers.ProcessHandlers(ctrl.UpdateUser, metrics.ActionAdminUpdateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false),
|
||||
middlewares.IsAdmin(metrics.ActionAdminUpdateUser),
|
||||
))
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users",
|
||||
controllers.ProcessHandlers(ctrl.ListUsers, metrics.ActionAdminListUsers, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListUsers),
|
||||
))
|
||||
|
||||
// ChangeBucketOwner admin api
|
||||
app.Patch("/change-bucket-owner",
|
||||
controllers.ProcessHandlers(ctrl.ChangeBucketOwner, metrics.ActionAdminChangeBucketOwner, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false),
|
||||
middlewares.IsAdmin(metrics.ActionAdminChangeBucketOwner),
|
||||
))
|
||||
|
||||
// ListBucketsAndOwners admin api
|
||||
app.Patch("/list-buckets",
|
||||
controllers.ProcessHandlers(ctrl.ListBuckets, metrics.ActionAdminListBuckets, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListBuckets),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -100,8 +100,3 @@ func (sa *S3AdminServer) Serve() (err error) {
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
// ShutDown gracefully shuts down the server with a context timeout
|
||||
func (sa S3AdminServer) Shutdown() error {
|
||||
return sa.app.ShutdownWithTimeout(shutDownDuration)
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
// panic("mock out the DeleteObject method")
|
||||
// },
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) error {
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) error {
|
||||
// panic("mock out the DeleteObjectTagging method")
|
||||
// },
|
||||
// DeleteObjectsFunc: func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
|
||||
@@ -101,7 +101,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) ([]byte, error) {
|
||||
// panic("mock out the GetObjectRetention method")
|
||||
// },
|
||||
// GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error) {
|
||||
// GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
// panic("mock out the GetObjectTagging method")
|
||||
// },
|
||||
// HeadBucketFunc: func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
@@ -164,7 +164,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error {
|
||||
// panic("mock out the PutObjectRetention method")
|
||||
// },
|
||||
// PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error {
|
||||
// PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
// panic("mock out the PutObjectTagging method")
|
||||
// },
|
||||
// RestoreObjectFunc: func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error {
|
||||
@@ -229,7 +229,7 @@ type BackendMock struct {
|
||||
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
|
||||
// DeleteObjectTaggingFunc mocks the DeleteObjectTagging method.
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) error
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) error
|
||||
|
||||
// DeleteObjectsFunc mocks the DeleteObjects method.
|
||||
DeleteObjectsFunc func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteResult, error)
|
||||
@@ -271,7 +271,7 @@ type BackendMock struct {
|
||||
GetObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) ([]byte, error)
|
||||
|
||||
// GetObjectTaggingFunc mocks the GetObjectTagging method.
|
||||
GetObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error)
|
||||
GetObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error)
|
||||
|
||||
// HeadBucketFunc mocks the HeadBucket method.
|
||||
HeadBucketFunc func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
@@ -334,7 +334,7 @@ type BackendMock struct {
|
||||
PutObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error
|
||||
|
||||
// PutObjectTaggingFunc mocks the PutObjectTagging method.
|
||||
PutObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error
|
||||
PutObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error
|
||||
|
||||
// RestoreObjectFunc mocks the RestoreObject method.
|
||||
RestoreObjectFunc func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error
|
||||
@@ -452,8 +452,6 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
}
|
||||
// DeleteObjects holds details about calls to the DeleteObjects method.
|
||||
DeleteObjects []struct {
|
||||
@@ -562,8 +560,6 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
}
|
||||
// HeadBucket holds details about calls to the HeadBucket method.
|
||||
HeadBucket []struct {
|
||||
@@ -737,8 +733,6 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
// Tags is the tags argument value.
|
||||
Tags map[string]string
|
||||
}
|
||||
@@ -1274,7 +1268,7 @@ func (mock *BackendMock) DeleteObjectCalls() []struct {
|
||||
}
|
||||
|
||||
// DeleteObjectTagging calls DeleteObjectTaggingFunc.
|
||||
func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string) error {
|
||||
func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bucket string, object string) error {
|
||||
if mock.DeleteObjectTaggingFunc == nil {
|
||||
panic("BackendMock.DeleteObjectTaggingFunc: method is nil but Backend.DeleteObjectTagging was just called")
|
||||
}
|
||||
@@ -1282,17 +1276,15 @@ func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bu
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
}
|
||||
mock.lockDeleteObjectTagging.Lock()
|
||||
mock.calls.DeleteObjectTagging = append(mock.calls.DeleteObjectTagging, callInfo)
|
||||
mock.lockDeleteObjectTagging.Unlock()
|
||||
return mock.DeleteObjectTaggingFunc(contextMoqParam, bucket, object, versionId)
|
||||
return mock.DeleteObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
}
|
||||
|
||||
// DeleteObjectTaggingCalls gets all the calls that were made to DeleteObjectTagging.
|
||||
@@ -1303,13 +1295,11 @@ func (mock *BackendMock) DeleteObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}
|
||||
mock.lockDeleteObjectTagging.RLock()
|
||||
calls = mock.calls.DeleteObjectTagging
|
||||
@@ -1802,7 +1792,7 @@ func (mock *BackendMock) GetObjectRetentionCalls() []struct {
|
||||
}
|
||||
|
||||
// GetObjectTagging calls GetObjectTaggingFunc.
|
||||
func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error) {
|
||||
func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
if mock.GetObjectTaggingFunc == nil {
|
||||
panic("BackendMock.GetObjectTaggingFunc: method is nil but Backend.GetObjectTagging was just called")
|
||||
}
|
||||
@@ -1810,17 +1800,15 @@ func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucke
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
}
|
||||
mock.lockGetObjectTagging.Lock()
|
||||
mock.calls.GetObjectTagging = append(mock.calls.GetObjectTagging, callInfo)
|
||||
mock.lockGetObjectTagging.Unlock()
|
||||
return mock.GetObjectTaggingFunc(contextMoqParam, bucket, object, versionId)
|
||||
return mock.GetObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
}
|
||||
|
||||
// GetObjectTaggingCalls gets all the calls that were made to GetObjectTagging.
|
||||
@@ -1831,13 +1819,11 @@ func (mock *BackendMock) GetObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}
|
||||
mock.lockGetObjectTagging.RLock()
|
||||
calls = mock.calls.GetObjectTagging
|
||||
@@ -2614,7 +2600,7 @@ func (mock *BackendMock) PutObjectRetentionCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObjectTagging calls PutObjectTaggingFunc.
|
||||
func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error {
|
||||
func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
if mock.PutObjectTaggingFunc == nil {
|
||||
panic("BackendMock.PutObjectTaggingFunc: method is nil but Backend.PutObjectTagging was just called")
|
||||
}
|
||||
@@ -2622,19 +2608,17 @@ func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucke
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
Tags: tags,
|
||||
}
|
||||
mock.lockPutObjectTagging.Lock()
|
||||
mock.calls.PutObjectTagging = append(mock.calls.PutObjectTagging, callInfo)
|
||||
mock.lockPutObjectTagging.Unlock()
|
||||
return mock.PutObjectTaggingFunc(contextMoqParam, bucket, object, versionId, tags)
|
||||
return mock.PutObjectTaggingFunc(contextMoqParam, bucket, object, tags)
|
||||
}
|
||||
|
||||
// PutObjectTaggingCalls gets all the calls that were made to PutObjectTagging.
|
||||
@@ -2645,14 +2629,12 @@ func (mock *BackendMock) PutObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
}
|
||||
mock.lockPutObjectTagging.RLock()
|
||||
|
||||
@@ -41,6 +41,7 @@ type S3ApiController struct {
|
||||
|
||||
const (
|
||||
// time constants
|
||||
iso8601Format = "20060102T150405Z"
|
||||
iso8601TimeFormatExtended = "Mon Jan _2 15:04:05 2006"
|
||||
timefmt = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
|
||||
@@ -207,32 +208,12 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
|
||||
// At this point, the S3 action has succeeded in the backend and
|
||||
// the event has already occurred. This means the S3 event must be sent,
|
||||
// even if unexpected issues arise while further parsing the response payload.
|
||||
if svc.EventSender != nil && opts.EventName != "" {
|
||||
svc.EventSender.SendEvent(ctx, s3event.EventMeta{
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
ObjectETag: opts.ObjectETag,
|
||||
VersionId: opts.VersionId,
|
||||
EventName: opts.EventName,
|
||||
})
|
||||
}
|
||||
|
||||
if opts.Status == 0 {
|
||||
opts.Status = http.StatusOK
|
||||
}
|
||||
|
||||
// if no data payload is provided, send the response status
|
||||
if response.Data == nil {
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, []byte{}, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
ctx.Status(opts.Status)
|
||||
return nil
|
||||
}
|
||||
@@ -246,13 +227,6 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
} else {
|
||||
if responseBytes, err = xml.Marshal(response.Data); err != nil {
|
||||
debuglogger.Logf("Internal Error, %v", err)
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, err, nil, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
return ctx.Status(http.StatusInternalServerError).Send(s3err.GetAPIErrorResponse(
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
@@ -262,19 +236,29 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
}
|
||||
}
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
if svc.EventSender != nil {
|
||||
svc.EventSender.SendEvent(ctx, s3event.EventMeta{
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
ObjectETag: opts.ObjectETag,
|
||||
VersionId: opts.VersionId,
|
||||
EventName: opts.EventName,
|
||||
})
|
||||
}
|
||||
|
||||
if ok {
|
||||
if len(responseBytes) > 0 {
|
||||
ctx.Response().Header.Set("Content-Length", fmt.Sprint(len(responseBytes)))
|
||||
}
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Send(responseBytes)
|
||||
}
|
||||
|
||||
@@ -282,13 +266,6 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
if msglen > maxXMLBodyLen {
|
||||
debuglogger.Logf("XML encoded body len %v exceeds max len %v",
|
||||
msglen, maxXMLBodyLen)
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, err, []byte{}, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(
|
||||
@@ -301,14 +278,6 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
// Set the Content-Length header
|
||||
ctx.Response().Header.SetContentLength(msglen)
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Send(res)
|
||||
}
|
||||
|
||||
@@ -317,8 +286,6 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers map[string]*string) {
|
||||
if headers == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Response().Header.DisableNormalizing()
|
||||
for key, val := range headers {
|
||||
if val == nil || *val == "" {
|
||||
continue
|
||||
|
||||
@@ -15,13 +15,10 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -45,9 +42,6 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
@@ -60,17 +54,6 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrAccessDenied)) {
|
||||
return &Response{
|
||||
// access denied for head object still returns region header
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -80,8 +63,8 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-access-point-alias": utils.GetStringPtr("false"),
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
"X-Amz-Access-Point-Alias": utils.GetStringPtr("false"),
|
||||
"X-Amz-Bucket-Region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -48,9 +48,6 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
@@ -101,8 +98,8 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-access-point-alias": utils.GetStringPtr("false"),
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
"X-Amz-Access-Point-Alias": utils.GetStringPtr("false"),
|
||||
"X-Amz-Bucket-Region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
|
||||
@@ -67,7 +67,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, dObj.Objects, bypass, IsBucketPublic, c.be, false)
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, dObj.Objects, bypass, IsBucketPublic, c.be)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -270,6 +271,37 @@ func (c S3ApiController) PutBucketCors(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
algo, checksusms, err := utils.ParseChecksumHeadersAndSdkAlgo(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
if algo != "" {
|
||||
rdr, err := utils.NewHashReader(bytes.NewReader(body), checksusms[algo], utils.HashType(strings.ToLower(string(algo))))
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// Pass the same body to avoid data duplication
|
||||
_, err = rdr.Read(body)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read hash calculation data: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
err = c.be.PutBucketCors(ctx.Context(), bucket, body)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -352,15 +384,6 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.ValidateCannedACL(acl)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
ownership, err := c.be.GetBucketOwnershipControls(ctx.Context(), bucket)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound)) {
|
||||
return &Response{
|
||||
@@ -428,6 +451,14 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
AccessControlPolicy: &accessControlPolicy,
|
||||
}
|
||||
} else if acl != "" {
|
||||
if acl != "private" && acl != "public-read" && acl != "public-read-write" {
|
||||
debuglogger.Logf("invalid acl: %q", acl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
if grants != "" {
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
|
||||
grants, acl)
|
||||
@@ -501,28 +532,14 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
// validate the bucket name
|
||||
if ok := utils.IsValidBucketName(bucket); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
// validate bucket canned acl
|
||||
err := auth.ValidateCannedACL(acl)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// validate the object ownership value
|
||||
if ok := utils.IsValidOwnership(objectOwnership); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
MetaOpts: &MetaOptions{},
|
||||
}, s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid x-amz-object-ownership header: %v", objectOwnership),
|
||||
@@ -548,32 +565,6 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrBothCannedAndHeaderGrants)
|
||||
}
|
||||
|
||||
var body s3response.CreateBucketConfiguration
|
||||
if len(ctx.Body()) != 0 {
|
||||
// request body is optional for CreateBucket
|
||||
err := xml.Unmarshal(ctx.Body(), &body)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse the request body: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if body.LocationConstraint != "" {
|
||||
region := utils.ContextKeyRegion.Get(ctx).(string)
|
||||
if body.LocationConstraint != region {
|
||||
debuglogger.Logf("invalid location constraint: %s", body.LocationConstraint)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidLocationConstraint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defACL := auth.ACL{
|
||||
Owner: acct.Access,
|
||||
}
|
||||
@@ -603,9 +594,6 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: objectOwnership,
|
||||
ObjectLockEnabledForBucket: &lockEnabled,
|
||||
CreateBucketConfiguration: &types.CreateBucketConfiguration{
|
||||
Tags: body.TagSet,
|
||||
},
|
||||
}, updAcl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -528,6 +528,22 @@ func TestS3ApiController_PutBucketCors(t *testing.T) {
|
||||
err: s3err.GetUnsopportedCORSMethodErr("invalid_method"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid checksum algo",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validBody,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Sdk-Checksum-Algorithm": "invalid_algo",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: "root"},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend error",
|
||||
input: testInput{
|
||||
@@ -695,11 +711,6 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
Role: auth.RoleUser,
|
||||
}
|
||||
|
||||
invLocConstBody, err := xml.Marshal(s3response.CreateBucketConfiguration{
|
||||
LocationConstraint: "us-west-1",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -729,62 +740,11 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: adminAcc.Access,
|
||||
},
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidBucketName),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "malformed body",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
},
|
||||
body: []byte("invalid_body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "invalid canned acl",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
},
|
||||
headers: map[string]string{
|
||||
"x-amz-acl": "invalid_acl",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidArgument),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid location constraint",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
utils.ContextKeyRegion: "us-east-1",
|
||||
},
|
||||
body: invLocConstBody,
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidLocationConstraint),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ownership",
|
||||
input: testInput{
|
||||
@@ -797,9 +757,7 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: adminAcc.Access,
|
||||
},
|
||||
MetaOpts: &MetaOptions{},
|
||||
},
|
||||
err: s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -1101,7 +1059,7 @@ func TestS3ApiController_PutBucketAcl(t *testing.T) {
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidArgument),
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,17 +30,11 @@ import (
|
||||
func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
isBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
action := auth.DeleteObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.DeleteObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
@@ -50,7 +44,7 @@ func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error)
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: action,
|
||||
Action: auth.DeleteObjectTaggingAction,
|
||||
IsPublicRequest: isBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -61,16 +55,7 @@ func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error)
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.DeleteObjectTagging(ctx.Context(), bucket, key, versionId)
|
||||
err = c.be.DeleteObjectTagging(ctx.Context(), bucket, key)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusNoContent,
|
||||
@@ -139,10 +124,7 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
isBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
action := auth.DeleteObjectAction
|
||||
if versionId != "" {
|
||||
action = auth.DeleteObjectVersionAction
|
||||
}
|
||||
//TODO: check s3:DeleteObjectVersion policy in case a use tries to delete a version of an object
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
@@ -153,7 +135,7 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: action,
|
||||
Action: auth.DeleteObjectAction,
|
||||
IsPublicRequest: isBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -164,15 +146,6 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(
|
||||
ctx.Context(),
|
||||
bucket,
|
||||
@@ -186,7 +159,6 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
bypass,
|
||||
isBucketPublic,
|
||||
c.be,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
|
||||
@@ -45,23 +45,6 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -98,7 +81,7 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string) error {
|
||||
DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -116,8 +99,7 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
queries: tt.input.queries,
|
||||
locals: tt.input.locals,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -224,23 +206,6 @@ func TestS3ApiController_DeleteObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object locked",
|
||||
input: testInput{
|
||||
@@ -324,8 +289,7 @@ func TestS3ApiController_DeleteObject(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
queries: tt.input.queries,
|
||||
locals: tt.input.locals,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,17 +35,11 @@ import (
|
||||
func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
isPublicBucket := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
action := auth.GetObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.GetObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -54,7 +48,7 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: action,
|
||||
Action: auth.GetObjectTaggingAction,
|
||||
IsPublicRequest: isPublicBucket,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -65,16 +59,7 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectTagging(ctx.Context(), bucket, key, versionId)
|
||||
data, err := c.be.GetObjectTagging(ctx.Context(), bucket, key)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -96,7 +81,7 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, nil
|
||||
}, err
|
||||
}
|
||||
|
||||
func (c S3ApiController) GetObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -128,15 +113,6 @@ func (c S3ApiController) GetObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectRetention(ctx.Context(), bucket, key, versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
@@ -184,15 +160,6 @@ func (c S3ApiController) GetObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectLegalHold(ctx.Context(), bucket, key, versionId)
|
||||
return &Response{
|
||||
Data: auth.ParseObjectLegalHoldOutput(data),
|
||||
@@ -326,11 +293,6 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
isPublicBucket := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
action := auth.GetObjectAttributesAction
|
||||
if versionId != "" {
|
||||
action = auth.GetObjectVersionAttributesAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -339,7 +301,7 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: action,
|
||||
Action: auth.GetObjectAttributesAction,
|
||||
IsPublicRequest: isPublicBucket,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -350,15 +312,6 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// parse max parts
|
||||
maxParts, err := utils.ParseUint(maxPartsStr)
|
||||
if err != nil {
|
||||
@@ -502,15 +455,6 @@ func (c S3ApiController) GetObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
partNumber = &partNumberQuery
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// validate the checksum mode
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
|
||||
@@ -52,23 +52,6 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -112,7 +95,7 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string) (map[string]string, error) {
|
||||
return tt.input.beRes.(map[string]string), tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -130,9 +113,8 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -165,23 +147,6 @@ func TestS3ApiController_GetObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -253,9 +218,8 @@ func TestS3ApiController_GetObjectRetention(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -285,23 +249,6 @@ func TestS3ApiController_GetObjectLegalHold(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -358,9 +305,8 @@ func TestS3ApiController_GetObjectLegalHold(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -609,23 +555,6 @@ func TestS3ApiController_GetObjectAttributes(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid max parts",
|
||||
input: testInput{
|
||||
@@ -734,7 +663,6 @@ func TestS3ApiController_GetObjectAttributes(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -765,23 +693,6 @@ func TestS3ApiController_GetObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid checksum mode",
|
||||
input: testInput{
|
||||
@@ -846,7 +757,7 @@ func TestS3ApiController_GetObject(t *testing.T) {
|
||||
"Range": "100-200",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"versionId": "01BX5ZZKBKACTAV9WEVGEMMVRZ",
|
||||
"versionId": "versionId",
|
||||
},
|
||||
locals: defaultLocals,
|
||||
beRes: &s3.GetObjectOutput{
|
||||
|
||||
@@ -80,15 +80,6 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
partNumber = &partNumberQuery
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
checksumMode := types.ChecksumMode(strings.ToUpper(ctx.Get("x-amz-checksum-mode")))
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
@@ -135,31 +126,30 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"ETag": res.ETag,
|
||||
"x-amz-restore": res.Restore,
|
||||
"accept-ranges": res.AcceptRanges,
|
||||
"Content-Range": res.ContentRange,
|
||||
"Content-Disposition": res.ContentDisposition,
|
||||
"Content-Encoding": res.ContentEncoding,
|
||||
"Content-Language": res.ContentLanguage,
|
||||
"Cache-Control": res.CacheControl,
|
||||
"Content-Length": utils.ConvertPtrToStringPtr(res.ContentLength),
|
||||
"Content-Type": res.ContentType,
|
||||
"Expires": res.ExpiresString,
|
||||
"ETag": res.ETag,
|
||||
"Last-Modified": utils.FormatDatePtrToString(res.LastModified, timefmt),
|
||||
"x-amz-restore": res.Restore,
|
||||
"accept-ranges": res.AcceptRanges,
|
||||
"x-amz-checksum-crc32": res.ChecksumCRC32,
|
||||
"x-amz-checksum-crc64nvme": res.ChecksumCRC64NVME,
|
||||
"x-amz-checksum-crc32c": res.ChecksumCRC32C,
|
||||
"x-amz-checksum-sha1": res.ChecksumSHA1,
|
||||
"x-amz-checksum-sha256": res.ChecksumSHA256,
|
||||
"Content-Type": res.ContentType,
|
||||
"x-amz-version-id": res.VersionId,
|
||||
"Content-Length": utils.ConvertPtrToStringPtr(res.ContentLength),
|
||||
"x-amz-mp-parts-count": utils.ConvertPtrToStringPtr(res.PartsCount),
|
||||
"x-amz-object-lock-mode": utils.ConvertToStringPtr(res.ObjectLockMode),
|
||||
"x-amz-object-lock-legal-hold": utils.ConvertToStringPtr(res.ObjectLockLegalHoldStatus),
|
||||
"x-amz-storage-class": utils.ConvertToStringPtr(res.StorageClass),
|
||||
"x-amz-checksum-type": utils.ConvertToStringPtr(res.ChecksumType),
|
||||
"x-amz-object-lock-retain-until-date": utils.FormatDatePtrToString(res.ObjectLockRetainUntilDate, time.RFC3339),
|
||||
"x-amz-tagging-count": utils.ConvertPtrToStringPtr(res.TagCount),
|
||||
"Last-Modified": utils.FormatDatePtrToString(res.LastModified, timefmt),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -51,30 +51,13 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid part number",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"partNumber": "-4",
|
||||
"versionId": "01BX5ZZKBKACTAV9WEVGEMMVRZ",
|
||||
"versionId": "id",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
@@ -164,7 +147,6 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
"x-amz-checksum-type": nil,
|
||||
"x-amz-object-lock-retain-until-date": nil,
|
||||
"Last-Modified": nil,
|
||||
"x-amz-tagging-count": nil,
|
||||
"Content-Type": utils.GetStringPtr("application/xml"),
|
||||
"Content-Length": utils.GetStringPtr("100"),
|
||||
},
|
||||
|
||||
@@ -305,7 +305,7 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
mpuObjectSize = &val
|
||||
}
|
||||
|
||||
checksums, err := utils.ParseCompleteMpChecksumHeaders(ctx)
|
||||
checksums, err := utils.ParseChecksumHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -325,15 +325,6 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
|
||||
ifMatch, ifNoneMatch := utils.ParsePreconditionMatchHeaders(ctx)
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, isBucketPublic, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
res, versid, err := c.be.CompleteMultipartUpload(ctx.Context(),
|
||||
&s3.CompleteMultipartUploadInput{
|
||||
Bucket: &bucket,
|
||||
|
||||
@@ -479,30 +479,13 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
err: s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object is locked",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
beRes: s3response.CompleteMultipartUploadResult{},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
beRes: s3response.CompleteMultipartUploadResult{},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -531,7 +514,6 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
headers: map[string]string{
|
||||
"X-Amz-Mp-Object-Size": "3",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -560,12 +542,6 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
|
||||
@@ -36,17 +36,11 @@ import (
|
||||
func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
action := auth.PutObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.PutObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -55,7 +49,7 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: action,
|
||||
Action: auth.PutObjectTaggingAction,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -66,15 +60,6 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitObject)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
@@ -84,7 +69,7 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, key, versionId, tagging)
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, key, tagging)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -103,7 +88,7 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
@@ -113,17 +98,7 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
Object: key,
|
||||
Action: auth.PutObjectRetentionAction,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -178,7 +153,7 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
@@ -188,17 +163,7 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
Object: key,
|
||||
Action: auth.PutObjectLegalHoldAction,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -225,7 +190,7 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
err = c.be.PutObjectLegalHold(ctx.Context(), bucket, key, versionId, legalHold.Status == types.ObjectLockLegalHoldStatusOn)
|
||||
err := c.be.PutObjectLegalHold(ctx.Context(), bucket, key, versionId, legalHold.Status == types.ObjectLockLegalHoldStatusOn)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -389,15 +354,6 @@ func (c S3ApiController) UploadPartCopy(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if len(ctx.Request().Body()) != 0 {
|
||||
debuglogger.Logf("expected empty request body")
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrNonEmptyRequestBody)
|
||||
}
|
||||
|
||||
if partNumber < minPartNumber || partNumber > maxPartNumber {
|
||||
debuglogger.Logf("invalid part number: %d", partNumber)
|
||||
return &Response{
|
||||
@@ -534,15 +490,6 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if len(ctx.Request().Body()) != 0 {
|
||||
debuglogger.Logf("expected empty request body")
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrNonEmptyRequestBody)
|
||||
}
|
||||
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
if metaDirective != "" && metaDirective != types.MetadataDirectiveCopy && metaDirective != types.MetadataDirectiveReplace {
|
||||
@@ -584,15 +531,6 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
preconditionHdrs := utils.ParsePreconditionHeaders(ctx, utils.WithCopySource())
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, false, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
res, err := c.be.CopyObject(ctx.Context(),
|
||||
s3response.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
@@ -691,7 +629,7 @@ func (c S3ApiController) PutObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, IsBucketPublic, c.be, true)
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, IsBucketPublic, c.be)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -64,23 +64,6 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -132,7 +115,7 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string, tags map[string]string) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -150,9 +133,8 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -189,23 +171,6 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -297,7 +262,6 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -334,23 +298,6 @@ func TestS3ApiController_PutObjectLegalHold(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -433,9 +380,8 @@ func TestS3ApiController_PutObjectLegalHold(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -633,26 +579,6 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source: invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object?versionId=invalid_versionId",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"partNumber": "2",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source",
|
||||
input: testInput{
|
||||
@@ -673,27 +599,6 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non empty request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"partNumber": "2",
|
||||
},
|
||||
body: []byte("body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNonEmptyRequestBody),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid part number",
|
||||
input: testInput{
|
||||
@@ -791,7 +696,6 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -913,41 +817,6 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source: versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object?versionId=invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non empty request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
body: []byte("body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNonEmptyRequestBody),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid metadata directive",
|
||||
input: testInput{
|
||||
@@ -1022,24 +891,6 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object is locked",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -1049,7 +900,6 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -1080,7 +930,6 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
ETag: utils.GetStringPtr("ETag"),
|
||||
},
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -1110,12 +959,6 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
@@ -1130,7 +973,6 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1332,9 +1174,6 @@ func TestS3ApiController_PutObject(t *testing.T) {
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// ParseAcl retreives the bucket acl and stores in the context locals
|
||||
@@ -43,16 +42,6 @@ func ParseAcl(be backend.Backend) fiber.Handler {
|
||||
parsedAcl.Owner = utils.ContextKeyRootAccessKey.Get(ctx).(string)
|
||||
}
|
||||
|
||||
// if expected bucket owner doesn't match the bucket owner
|
||||
// the gateway should return AccessDenied.
|
||||
// This header appears in all actions except 'CreateBucket' and 'ListBuckets'.
|
||||
// 'ParseACL' is also applied to all actions except for 'CreateBucket' and 'ListBuckets',
|
||||
// so it's a perfect place to check the expected bucket owner
|
||||
bucketOwner := ctx.Get("X-Amz-Expected-Bucket-Owner")
|
||||
if bucketOwner != "" && bucketOwner != parsedAcl.Owner {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
utils.ContextKeyParsedAcl.Set(ctx, parsedAcl)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ type RootUserConfig struct {
|
||||
Secret string
|
||||
}
|
||||
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string, streamBody bool, requireContentSha256 bool) fiber.Handler {
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string, streamBody bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
@@ -109,17 +109,9 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string,
|
||||
}
|
||||
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if requireContentSha256 && hashPayload == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingContentSha256)
|
||||
}
|
||||
if !utils.IsValidSh256PayloadHeader(hashPayload) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidSHA256Paylod)
|
||||
}
|
||||
// the streaming payload type is allowed only in PutObject and UploadPart
|
||||
// e.g. STREAMING-UNSIGNED-PAYLOAD-TRAILER
|
||||
if !streamBody && utils.IsStreamingPayload(hashPayload) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidSHA256PayloadUsage)
|
||||
}
|
||||
if streamBody {
|
||||
// for streaming PUT actions, authorization is deferred
|
||||
// until end of stream due to need to get length and
|
||||
|
||||
@@ -22,83 +22,17 @@ import (
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
)
|
||||
|
||||
// ChecksumReader extends io.Reader with checksum-related metadata.
|
||||
// It is used to differentiate normal readers from readers that can
|
||||
// report a checksum and the algorithm used to produce it.
|
||||
type ChecksumReader interface {
|
||||
io.Reader
|
||||
Algorithm() string
|
||||
Checksum() string
|
||||
}
|
||||
|
||||
// NewChecksumReader wraps a stackedReader and returns a reader that
|
||||
// preserves checksum behavior when the *original* bodyReader implemented
|
||||
// ChecksumReader.
|
||||
//
|
||||
// If bodyReader already supports ChecksumReader, we wrap stackedReader
|
||||
// with MockChecksumReader so that reading continues from stackedReader,
|
||||
// but Algorithm() and Checksum() still delegate to the underlying reader.
|
||||
//
|
||||
// If bodyReader is not a ChecksumReader, we simply return stackedReader.
|
||||
func NewChecksumReader(bodyReader io.Reader, stackedReader io.Reader) io.Reader {
|
||||
_, ok := bodyReader.(ChecksumReader)
|
||||
if ok {
|
||||
return &MockChecksumReader{rdr: stackedReader}
|
||||
}
|
||||
|
||||
return stackedReader
|
||||
}
|
||||
|
||||
// MockChecksumReader is a wrapper around an io.Reader that forwards Read()
|
||||
// but also conditionally exposes checksum metadata if the underlying reader
|
||||
// implements the ChecksumReader interface.
|
||||
type MockChecksumReader struct {
|
||||
rdr io.Reader
|
||||
}
|
||||
|
||||
// Read simply forwards data reads to the underlying reader.
|
||||
func (rr *MockChecksumReader) Read(buffer []byte) (int, error) {
|
||||
return rr.rdr.Read(buffer)
|
||||
}
|
||||
|
||||
// Algorithm returns the checksum algorithm used by the underlying reader,
|
||||
// but only if the wrapped reader implements ChecksumReader.
|
||||
func (rr *MockChecksumReader) Algorithm() string {
|
||||
r, ok := rr.rdr.(ChecksumReader)
|
||||
if ok {
|
||||
return r.Algorithm()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Checksum returns the checksum value from the underlying reader,
|
||||
// if it implements ChecksumReader. Otherwise returns an empty string.
|
||||
func (rr *MockChecksumReader) Checksum() string {
|
||||
r, ok := rr.rdr.(ChecksumReader)
|
||||
if ok {
|
||||
return r.Checksum()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
var _ ChecksumReader = &MockChecksumReader{}
|
||||
|
||||
func wrapBodyReader(ctx *fiber.Ctx, wr func(io.Reader) io.Reader) {
|
||||
rdr, ok := utils.ContextKeyBodyReader.Get(ctx).(io.Reader)
|
||||
r, ok := utils.ContextKeyBodyReader.Get(ctx).(io.Reader)
|
||||
if !ok {
|
||||
rdr = ctx.Request().BodyStream()
|
||||
r = ctx.Request().BodyStream()
|
||||
// Override the body reader with an empty reader to prevent panics
|
||||
// in case of unexpected or malformed HTTP requests.
|
||||
if rdr == nil {
|
||||
rdr = bytes.NewBuffer([]byte{})
|
||||
if r == nil {
|
||||
r = bytes.NewBuffer([]byte{})
|
||||
}
|
||||
}
|
||||
|
||||
r := wr(rdr)
|
||||
// Ensure checksum behavior is stacked if the original body reader had it.
|
||||
r = NewChecksumReader(rdr, r)
|
||||
|
||||
r = wr(r)
|
||||
utils.ContextKeyBodyReader.Set(ctx, r)
|
||||
}
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// VerifyChecksums parses, validates, and calculates the
|
||||
// Content-MD5 and x-amz-checksum-* headers.
|
||||
// Additionally, it ensures that the request body is not empty
|
||||
// for actions that require a non-empty body. For large data actions(PutObject, UploadPart),
|
||||
// it wraps the body reader to handle Content-MD5:
|
||||
// the x-amz-checksum-* headers are explicitly processed by the backend.
|
||||
func VerifyChecksums(streamBody bool, requireBody bool, requireChecksum bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
md5sum := ctx.Get("Content-Md5")
|
||||
|
||||
if streamBody {
|
||||
// for large data actions(PutObject, UploadPart)
|
||||
// only stack the md5 reader,as x-amz-checksum-*
|
||||
// calculation is explicitly handled in back-end
|
||||
if md5sum == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isValidMD5(md5sum) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
r, err = utils.NewHashReader(r, md5sum, utils.HashTypeMd5)
|
||||
return r
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
body := ctx.Body()
|
||||
if requireBody && len(body) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrMissingRequestBody)
|
||||
}
|
||||
|
||||
var rdr io.Reader
|
||||
var err error
|
||||
if md5sum != "" {
|
||||
if !isValidMD5(md5sum) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
rdr, err = utils.NewHashReader(bytes.NewReader(body), md5sum, utils.HashTypeMd5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// parse and validate checksum headers
|
||||
algo, checksums, err := utils.ParseChecksumHeadersAndSdkAlgo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if algo != "" {
|
||||
r, err := utils.NewHashReader(bytes.NewReader(body), checksums[algo], utils.HashType(strings.ToLower(string(algo))))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rdr != nil {
|
||||
// combine both md5 and the checksum readers
|
||||
rdr = io.MultiReader(rdr, r)
|
||||
} else {
|
||||
rdr = r
|
||||
}
|
||||
}
|
||||
|
||||
if rdr == nil && requireChecksum {
|
||||
return s3err.GetAPIError(s3err.ErrChecksumRequired)
|
||||
}
|
||||
|
||||
if rdr != nil {
|
||||
_, err = io.Copy(io.Discard, rdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func isValidMD5(s string) bool {
|
||||
decoded, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(decoded) == 16
|
||||
}
|
||||
68
s3api/middlewares/md5.go
Normal file
68
s3api/middlewares/md5.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyMD5Body(streamBody bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
incomingSum := ctx.Get("Content-Md5")
|
||||
if incomingSum == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isValidMD5(incomingSum) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
if streamBody {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
r, err = utils.NewHashReader(r, incomingSum, utils.HashTypeMd5)
|
||||
return r
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
sum := md5.Sum(ctx.Body())
|
||||
calculatedSum := utils.Base64SumString(sum[:])
|
||||
|
||||
if incomingSum != calculatedSum {
|
||||
return s3err.GetAPIError(s3err.ErrBadDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func isValidMD5(s string) bool {
|
||||
decoded, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(decoded) == 16
|
||||
}
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
// AuthorizePublicBucketAccess checks if the bucket grants public
|
||||
// access to anonymous requesters
|
||||
func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPermission auth.Action, permission auth.Permission, region string, streamBody bool) fiber.Handler {
|
||||
func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPermission auth.Action, permission auth.Permission, streamBody bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// skip for authenticated requests
|
||||
if utils.IsPresignedURLAuth(ctx) || ctx.Get("Authorization") != "" {
|
||||
@@ -59,11 +59,6 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
bucket, object := parsePath(ctx.Path())
|
||||
err := auth.VerifyPublicAccess(ctx.Context(), be, policyPermission, permission, bucket, object)
|
||||
if err != nil {
|
||||
if s3action == metrics.ActionHeadBucket {
|
||||
// add the bucket region header for HeadBucket
|
||||
// if anonymous access is denied
|
||||
ctx.Response().Header.Add("x-amz-bucket-region", region)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -79,10 +74,6 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
|
||||
if streamBody {
|
||||
if utils.IsUnsignedStreamingPayload(payloadHash) {
|
||||
cLength, err := utils.ParseDecodedContentLength(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// stack an unsigned streaming payload reader
|
||||
checksumType, err := utils.ExtractChecksumType(ctx)
|
||||
if err != nil {
|
||||
@@ -91,7 +82,7 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewUnsignedChunkReader(r, checksumType, cLength)
|
||||
cr, err = utils.NewUnsignedChunkReader(r, checksumType)
|
||||
return cr
|
||||
})
|
||||
|
||||
@@ -112,15 +103,13 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
}
|
||||
}
|
||||
|
||||
if payloadHash != "" {
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if payloadHash != hexPayload {
|
||||
return s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
|
||||
}
|
||||
// Compare the calculated hash with the hash provided
|
||||
if payloadHash != hexPayload {
|
||||
return s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
489
s3api/router.go
489
s3api/router.go
File diff suppressed because it is too large
Load Diff
@@ -19,7 +19,6 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
@@ -36,10 +35,6 @@ import (
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
const (
|
||||
shutDownDuration = time.Second * 10
|
||||
)
|
||||
|
||||
type S3ApiServer struct {
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
@@ -172,11 +167,6 @@ func (sa *S3ApiServer) Serve() (err error) {
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
// ShutDown gracefully shuts down the server with a context timeout
|
||||
func (sa *S3ApiServer) ShutDown() error {
|
||||
return sa.app.ShutdownWithTimeout(shutDownDuration)
|
||||
}
|
||||
|
||||
// stackTraceHandler stores the system panics
|
||||
// in the context locals
|
||||
func stackTraceHandler(ctx *fiber.Ctx, e any) {
|
||||
@@ -199,13 +189,6 @@ func globalErrorHandler(ctx *fiber.Ctx, er error) error {
|
||||
ctx.Status(http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
if strings.Contains(fiberErr.Message, "error when reading request headers") {
|
||||
// This error means fiber failed to parse the incoming request
|
||||
// which is a malfoedmed one. Return a BadRequest in this case
|
||||
err := s3err.GetAPIError(s3err.ErrCannotParseHTTPRequest)
|
||||
ctx.Status(err.HTTPStatusCode)
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(err, "", "", ""))
|
||||
}
|
||||
}
|
||||
|
||||
// additionally log the internal error
|
||||
|
||||
@@ -160,32 +160,22 @@ func IsStreamingPayload(str string) bool {
|
||||
pt == payloadTypeStreamingSignedTrailer
|
||||
}
|
||||
|
||||
// ParseDecodedContentLength extracts and validates the
|
||||
// 'x-amz-decoded-content-length' from fiber context
|
||||
func ParseDecodedContentLength(ctx *fiber.Ctx) (int64, error) {
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
decContLengthStr := ctx.Get("X-Amz-Decoded-Content-Length")
|
||||
if decContLengthStr == "" {
|
||||
debuglogger.Logf("missing required header 'X-Amz-Decoded-Content-Length'")
|
||||
return 0, s3err.GetAPIError(s3err.ErrMissingContentLength)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMissingContentLength)
|
||||
}
|
||||
decContLength, err := strconv.ParseInt(decContLengthStr, 10, 64)
|
||||
//TODO: not sure if InvalidRequest should be returned in this case
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid value for 'X-Amz-Decoded-Content-Length': %v", decContLengthStr)
|
||||
return 0, s3err.GetAPIError(s3err.ErrMissingContentLength)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
if decContLength > maxObjSizeLimit {
|
||||
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, int64(maxObjSizeLimit))
|
||||
return 0, s3err.GetAPIError(s3err.ErrEntityTooLarge)
|
||||
}
|
||||
|
||||
return decContLength, nil
|
||||
}
|
||||
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
cLength, err := ParseDecodedContentLength(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, maxObjSizeLimit)
|
||||
return nil, s3err.GetAPIError(s3err.ErrEntityTooLarge)
|
||||
}
|
||||
|
||||
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
|
||||
@@ -199,10 +189,14 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if contentSha256 != payloadTypeStreamingSigned && checksumType == "" {
|
||||
debuglogger.Logf("empty value for required trailer header 'X-Amz-Trailer': %v", checksumType)
|
||||
return nil, s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
|
||||
}
|
||||
|
||||
switch contentSha256 {
|
||||
case payloadTypeStreamingUnsignedTrailer:
|
||||
return NewUnsignedChunkReader(r, checksumType, cLength)
|
||||
return NewUnsignedChunkReader(r, checksumType)
|
||||
case payloadTypeStreamingSignedTrailer:
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType)
|
||||
case payloadTypeStreamingSigned:
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
|
||||
const (
|
||||
chunkHdrDelim = "\r\n"
|
||||
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
@@ -29,65 +30,38 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
var (
|
||||
trailerDelim = []byte{'\n', '\r', '\n'}
|
||||
minChunkSize int64 = 8192
|
||||
errMalformedEncoding = errors.New("malformed chunk encoding")
|
||||
trailerDelim = []byte{'\n', '\r', '\n'}
|
||||
errMalformedEncoding = errors.New("malformed chunk encoding")
|
||||
)
|
||||
|
||||
type UnsignedChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
checksumType checksumType
|
||||
parsedChecksum string
|
||||
hasher hash.Hash
|
||||
stash []byte
|
||||
offset int
|
||||
// this data is necessary for 'InvalidChunkSizeError' error
|
||||
// TODO: add 'Chunk' and 'BadChunkSize' in the error
|
||||
chunkSizes []int64
|
||||
cLength int64
|
||||
// This data is necessary for the decoded content length mismatch error
|
||||
// TODO: add 'NumberBytesExpected' and 'NumberBytesProvided' in the error
|
||||
dataRead int64
|
||||
reader *bufio.Reader
|
||||
checksumType checksumType
|
||||
expectedChecksum string
|
||||
hasher hash.Hash
|
||||
stash []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
func NewUnsignedChunkReader(r io.Reader, ct checksumType, decContentLength int64) (*UnsignedChunkReader, error) {
|
||||
var hasher hash.Hash
|
||||
var err error
|
||||
if ct != "" {
|
||||
hasher, err = getHasher(ct)
|
||||
}
|
||||
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
|
||||
hasher, err := getHasher(ct)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to initialize hash calculator: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debuglogger.Infof("initializing unsigned chunk reader")
|
||||
return &UnsignedChunkReader{
|
||||
reader: bufio.NewReader(r),
|
||||
checksumType: ct,
|
||||
stash: make([]byte, 0),
|
||||
hasher: hasher,
|
||||
chunkSizes: []int64{},
|
||||
cLength: decContentLength,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Algorithm returns the checksum algorithm
|
||||
func (ucr *UnsignedChunkReader) Algorithm() string {
|
||||
return strings.TrimPrefix(string(ucr.checksumType), "x-amz-checksum-")
|
||||
}
|
||||
|
||||
// Checksum returns the parsed trailing checksum
|
||||
func (ucr *UnsignedChunkReader) Checksum() string {
|
||||
return ucr.parsedChecksum
|
||||
}
|
||||
|
||||
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
|
||||
// First read any stashed data
|
||||
if len(ucr.stash) != 0 {
|
||||
@@ -109,26 +83,15 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ucr.dataRead += chunkSize
|
||||
|
||||
if chunkSize == 0 {
|
||||
// Stop reading parsing payloads as 0 sized chunk is reached
|
||||
break
|
||||
}
|
||||
var rdr io.Reader = ucr.reader
|
||||
if ucr.hasher != nil {
|
||||
rdr = io.TeeReader(ucr.reader, ucr.hasher)
|
||||
}
|
||||
rdr := io.TeeReader(ucr.reader, ucr.hasher)
|
||||
payload := make([]byte, chunkSize)
|
||||
// Read and cache the payload
|
||||
_, err = io.ReadFull(rdr, payload)
|
||||
if err != nil {
|
||||
// the chunk size is not 0 and if io.EOF is returned
|
||||
// it means the body is incomplete
|
||||
if errors.Is(err, io.EOF) {
|
||||
debuglogger.Logf("unexpected EOF when reading chunk data")
|
||||
return 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
debuglogger.Logf("failed to read chunk data: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
@@ -153,11 +116,6 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if ucr.cLength != ucr.dataRead {
|
||||
debuglogger.Logf("number of bytes expected: (%v), number of bytes read: (%v)", ucr.cLength, ucr.dataRead)
|
||||
return 0, s3err.GetAPIError(s3err.ErrContentLengthMismatch)
|
||||
}
|
||||
|
||||
// Read and validate trailers
|
||||
if err := ucr.readTrailer(); err != nil {
|
||||
debuglogger.Logf("failed to read trailer: %v", err)
|
||||
@@ -172,11 +130,14 @@ func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
|
||||
for _, d := range data {
|
||||
b, err := ucr.reader.ReadByte()
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if b != d {
|
||||
return s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
return errMalformedEncoding
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,94 +146,53 @@ func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
|
||||
|
||||
// Extracts the chunk size from the payload
|
||||
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
|
||||
line, err := ucr.reader.ReadString('\r')
|
||||
line, err := ucr.reader.ReadString('\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse chunk size: %v", err)
|
||||
return 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
return 0, errMalformedEncoding
|
||||
}
|
||||
|
||||
err = ucr.readAndSkip('\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read the second byte (\\n) after chunk size")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
chunkSize, err := strconv.ParseInt(line, 16, 64)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to convert chunk size: %v", err)
|
||||
return 0, s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
return 0, errMalformedEncoding
|
||||
}
|
||||
|
||||
if !ucr.isValidChunkSize(chunkSize) {
|
||||
return chunkSize, s3err.GetAPIError(s3err.ErrInvalidChunkSize)
|
||||
}
|
||||
|
||||
ucr.chunkSizes = append(ucr.chunkSizes, chunkSize)
|
||||
|
||||
debuglogger.Infof("chunk size extracted: %v", chunkSize)
|
||||
|
||||
return chunkSize, nil
|
||||
}
|
||||
|
||||
// isValidChunkSize checks if the parsed chunk size is valid
|
||||
// they follow one rule: all chunk sizes except for the last one
|
||||
// should be greater than 8192
|
||||
func (ucr *UnsignedChunkReader) isValidChunkSize(size int64) bool {
|
||||
if len(ucr.chunkSizes) == 0 {
|
||||
// any valid number is valid as a first chunk size
|
||||
return true
|
||||
}
|
||||
|
||||
lastChunkSize := ucr.chunkSizes[len(ucr.chunkSizes)-1]
|
||||
// any chunk size, except the last one should be greater than 8192
|
||||
if size != 0 && lastChunkSize < minChunkSize {
|
||||
debuglogger.Logf("invalid chunk size %v", lastChunkSize)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Reads and validates the trailer at the end
|
||||
func (ucr *UnsignedChunkReader) readTrailer() error {
|
||||
var trailerBuffer bytes.Buffer
|
||||
var hasChecksum bool
|
||||
|
||||
for {
|
||||
v, err := ucr.reader.ReadByte()
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
debuglogger.Logf("failed to read byte: %v", err)
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if v != '\r' {
|
||||
hasChecksum = true
|
||||
trailerBuffer.WriteByte(v)
|
||||
continue
|
||||
}
|
||||
|
||||
if !hasChecksum {
|
||||
// in case the payload doesn't contain trailer
|
||||
// the first 2 bytes(\r\n) have been read
|
||||
// only read the last byte: \n
|
||||
err := ucr.readAndSkip('\n')
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk last byte: \\n: %v", err)
|
||||
return s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
var tmp [3]byte
|
||||
_, err = io.ReadFull(ucr.reader, tmp[:])
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read chunk ending: \\n\\r\\n: %v", err)
|
||||
return s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(tmp[:], trailerDelim) {
|
||||
debuglogger.Logf("incorrect trailer delimiter: (expected): \\n\\r\\n, (got): %q", tmp[:])
|
||||
return s3err.GetAPIError(s3err.ErrIncompleteBody)
|
||||
return errMalformedEncoding
|
||||
}
|
||||
break
|
||||
}
|
||||
@@ -280,35 +200,20 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
|
||||
// Parse the trailer
|
||||
trailerHeader := trailerBuffer.String()
|
||||
trailerHeader = strings.TrimSpace(trailerHeader)
|
||||
if trailerHeader == "" {
|
||||
if ucr.checksumType != "" {
|
||||
debuglogger.Logf("expected %s checksum in the paylod, but it's missing", ucr.checksumType)
|
||||
return s3err.GetAPIError(s3err.ErrMalformedTrailer)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
trailerHeaderParts := strings.Split(trailerHeader, ":")
|
||||
if len(trailerHeaderParts) != 2 {
|
||||
debuglogger.Logf("invalid trailer header parts: %v", trailerHeaderParts)
|
||||
return s3err.GetAPIError(s3err.ErrMalformedTrailer)
|
||||
return errMalformedEncoding
|
||||
}
|
||||
|
||||
checksumKey := checksumType(trailerHeaderParts[0])
|
||||
checksum := trailerHeaderParts[1]
|
||||
|
||||
if !checksumKey.isValid() {
|
||||
debuglogger.Logf("invalid checksum header key: %s", checksumKey)
|
||||
return s3err.GetAPIError(s3err.ErrMalformedTrailer)
|
||||
if trailerHeaderParts[0] != string(ucr.checksumType) {
|
||||
debuglogger.Logf("invalid checksum type: %v", trailerHeaderParts[0])
|
||||
//TODO: handle the error
|
||||
return errMalformedEncoding
|
||||
}
|
||||
|
||||
if checksumKey != ucr.checksumType {
|
||||
debuglogger.Logf("incorrect checksum type (expected): %s, (actual): %s", ucr.checksumType, checksumKey)
|
||||
return s3err.GetAPIError(s3err.ErrMalformedTrailer)
|
||||
}
|
||||
|
||||
ucr.parsedChecksum = checksum
|
||||
debuglogger.Infof("parsed the trailing header:\n%v:%v", checksumKey, checksum)
|
||||
ucr.expectedChecksum = trailerHeaderParts[1]
|
||||
debuglogger.Infof("parsed the trailing header:\n%v:%v", trailerHeaderParts[0], trailerHeaderParts[1])
|
||||
|
||||
// Validate checksum
|
||||
return ucr.validateChecksum()
|
||||
@@ -316,30 +221,17 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
|
||||
|
||||
// Validates the trailing checksum sent at the end
|
||||
func (ucr *UnsignedChunkReader) validateChecksum() error {
|
||||
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(ucr.checksumType), "x-amz-checksum-")))
|
||||
// validate the checksum
|
||||
if !IsValidChecksum(ucr.parsedChecksum, algo) {
|
||||
debuglogger.Logf("invalid checksum: (algo): %s, (checksum): %s", algo, ucr.parsedChecksum)
|
||||
return s3err.GetInvalidTrailingChecksumHeaderErr(string(ucr.checksumType))
|
||||
}
|
||||
csum := ucr.hasher.Sum(nil)
|
||||
checksum := base64.StdEncoding.EncodeToString(csum)
|
||||
|
||||
checksum := ucr.calculateChecksum()
|
||||
|
||||
// compare the calculated and parsed checksums
|
||||
if checksum != ucr.parsedChecksum {
|
||||
debuglogger.Logf("incorrect checksum: (expected): %v, (got): %v", ucr.parsedChecksum, checksum)
|
||||
return s3err.GetChecksumBadDigestErr(algo)
|
||||
if checksum != ucr.expectedChecksum {
|
||||
debuglogger.Logf("incorrect checksum: (expected): %v, (got): %v", ucr.expectedChecksum, checksum)
|
||||
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// calculateChecksum calculates the checksum with the unsigned reader hasher
|
||||
func (ucr *UnsignedChunkReader) calculateChecksum() string {
|
||||
csum := ucr.hasher.Sum(nil)
|
||||
return base64.StdEncoding.EncodeToString(csum)
|
||||
}
|
||||
|
||||
// Retruns the hash calculator based on the hash type provided
|
||||
func getHasher(ct checksumType) (hash.Hash, error) {
|
||||
switch ct {
|
||||
|
||||
@@ -26,12 +26,10 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
@@ -43,23 +41,13 @@ var (
|
||||
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
|
||||
)
|
||||
|
||||
var strictBucketNameValidation atomic.Bool
|
||||
|
||||
func init() {
|
||||
strictBucketNameValidation.Store(true)
|
||||
}
|
||||
|
||||
func SetBucketNameValidationStrict(strict bool) {
|
||||
strictBucketNameValidation.Store(strict)
|
||||
}
|
||||
|
||||
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
|
||||
metadata = make(map[string]string)
|
||||
headers.DisableNormalizing()
|
||||
for key, value := range headers.AllInOrder() {
|
||||
hKey := string(key)
|
||||
if strings.HasPrefix(strings.ToLower(hKey), "x-amz-meta-") {
|
||||
trimmedKey := strings.ToLower(hKey[11:])
|
||||
trimmedKey := hKey[11:]
|
||||
headerValue := string(value)
|
||||
metadata[trimmedKey] = headerValue
|
||||
}
|
||||
@@ -178,7 +166,7 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
|
||||
func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
|
||||
ctx.Response().Header.DisableNormalizing()
|
||||
for key, val := range meta {
|
||||
ctx.Response().Header.Set(fmt.Sprintf("x-amz-meta-%s", key), val)
|
||||
ctx.Response().Header.Set(fmt.Sprintf("X-Amz-Meta-%s", key), val)
|
||||
}
|
||||
ctx.Response().Header.EnableNormalizing()
|
||||
}
|
||||
@@ -221,10 +209,6 @@ func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
|
||||
}
|
||||
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if !strictBucketNameValidation.Load() {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
|
||||
return false
|
||||
@@ -462,35 +446,18 @@ func ParseCalculatedChecksumHeaders(ctx *fiber.Ctx) (ChecksumValues, error) {
|
||||
return checksums, nil
|
||||
}
|
||||
|
||||
// ParseCompleteMpChecksumHeaders parses and validates
|
||||
// the 'CompleteMultipartUpload' x-amz-checksum-x headers
|
||||
// by supporting both 'checksum' and 'checksum-<part_length>' formats
|
||||
func ParseCompleteMpChecksumHeaders(ctx *fiber.Ctx) (ChecksumValues, error) {
|
||||
// ParseChecksumHeaders parses/validates x-amz-checksum-x headers key/values
|
||||
func ParseChecksumHeaders(ctx *fiber.Ctx) (ChecksumValues, error) {
|
||||
// first parse/validate 'x-amz-checksum-x' headers
|
||||
checksums, err := ParseCalculatedChecksumHeaders(ctx)
|
||||
if err != nil {
|
||||
return checksums, err
|
||||
}
|
||||
|
||||
// check if the values are valid
|
||||
for al, val := range checksums {
|
||||
algo := strings.ToLower(string(al))
|
||||
if al != types.ChecksumAlgorithmCrc64nvme {
|
||||
chParts := strings.Split(val, "-")
|
||||
if len(chParts) > 2 {
|
||||
debuglogger.Logf("invalid checksum header: x-amz-checksum-%s: %s", algo, val)
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", algo))
|
||||
}
|
||||
if len(chParts) == 2 {
|
||||
_, err := strconv.ParseInt(chParts[1], 10, 32)
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid checksum header: x-amz-checksum-%s: %s", algo, val)
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", algo))
|
||||
}
|
||||
val = chParts[0]
|
||||
}
|
||||
}
|
||||
if !IsValidChecksum(val, al) {
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", algo))
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -512,44 +479,14 @@ func ParseChecksumHeadersAndSdkAlgo(ctx *fiber.Ctx) (types.ChecksumAlgorithm, Ch
|
||||
return sdkAlgorithm, checksums, err
|
||||
}
|
||||
|
||||
trailer := strings.ToUpper(ctx.Get("X-Amz-Trailer"))
|
||||
|
||||
if len(checksums) != 0 && trailer != "" {
|
||||
// both x-amz-trailer and one of x-amz-checksum-* is not allowed
|
||||
debuglogger.Logf("x-amz-checksum-* header is used with x-amz-trailer: trailer: %s", trailer)
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
|
||||
}
|
||||
|
||||
trailerAlgo := strings.TrimPrefix(trailer, "X-AMZ-CHECKSUM-")
|
||||
|
||||
if sdkAlgorithm != "" {
|
||||
if len(checksums) == 0 && trailerAlgo == "" {
|
||||
// in case x-amz-sdk-algorithm is specified, but no corresponging
|
||||
// x-amz-checksum-* or x-amz-trailer is sent
|
||||
debuglogger.Logf("'x-amz-sdk-checksum-algorithm : %s' is used without corresponding x-amz-checksum-* header", sdkAlgorithm)
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrChecksumSDKAlgoMismatch)
|
||||
}
|
||||
|
||||
if trailerAlgo != "" && string(sdkAlgorithm) != trailerAlgo {
|
||||
// x-amz-sdk-checksum-algorithm and x-amz-trailer should match
|
||||
debuglogger.Logf("x-amz-sdk-checksum-algorithm: (%s) and x-amz-trailer: (%s) doesn't match", sdkAlgorithm, trailerAlgo)
|
||||
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr("x-amz-sdk-checksum-algorithm")
|
||||
}
|
||||
}
|
||||
|
||||
if trailerAlgo != "" {
|
||||
sdkAlgorithm = types.ChecksumAlgorithm(trailerAlgo)
|
||||
}
|
||||
|
||||
for al, val := range checksums {
|
||||
if !IsValidChecksum(val, al) {
|
||||
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
|
||||
}
|
||||
|
||||
// If any other checksum value is provided,
|
||||
// rather than x-amz-sdk-checksum-algorithm
|
||||
if sdkAlgorithm != "" && sdkAlgorithm != al {
|
||||
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr("x-amz-sdk-checksum-algorithm")
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
|
||||
}
|
||||
sdkAlgorithm = al
|
||||
}
|
||||
@@ -706,11 +643,6 @@ const (
|
||||
TagLimitObject TagLimit = 10
|
||||
)
|
||||
|
||||
// The tag key/value validation pattern comes from
|
||||
// AWS S3 docs
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_Tag.html
|
||||
var tagRule = regexp.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`)
|
||||
|
||||
// Parses and validates tagging
|
||||
func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
|
||||
var tagging s3response.TaggingInput
|
||||
@@ -735,30 +667,18 @@ func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
|
||||
tagSet := make(map[string]string, tLen)
|
||||
|
||||
for _, tag := range tagging.TagSet.Tags {
|
||||
// validate tag key length
|
||||
// validate tag key
|
||||
if len(tag.Key) == 0 || len(tag.Key) > 128 {
|
||||
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag key string chars
|
||||
if !tagRule.MatchString(tag.Key) {
|
||||
debuglogger.Logf("invalid tag key: %s", tag.Key)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value length
|
||||
// validate tag value
|
||||
if len(tag.Value) > 256 {
|
||||
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// validate tag value string chars
|
||||
if !tagRule.MatchString(tag.Value) {
|
||||
debuglogger.Logf("invalid tag value: %s", tag.Value)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// make sure there are no duplicate keys
|
||||
_, ok := tagSet[tag.Key]
|
||||
if ok {
|
||||
@@ -838,7 +758,7 @@ func ValidateCopySource(copysource string) error {
|
||||
|
||||
// cut till the versionId as it's the only query param
|
||||
// that is recognized in copy source
|
||||
object, versionId, _ := strings.Cut(rest, "?versionId=")
|
||||
object, _, _ := strings.Cut(rest, "?versionId=")
|
||||
|
||||
// objects containing '../', '...../' ... are considered valid in AWS
|
||||
// but for the security purposes these should be considered as invalid
|
||||
@@ -848,12 +768,6 @@ func ValidateCopySource(copysource string) error {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)
|
||||
}
|
||||
|
||||
// validate the versionId
|
||||
err = ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -873,17 +787,3 @@ func ApplyOverride(original, override *string) *string {
|
||||
}
|
||||
return original
|
||||
}
|
||||
|
||||
// ValidateVersionId check if the input versionId is 'ulid' compatible
|
||||
func ValidateVersionId(versionId string) error {
|
||||
if versionId == "" || versionId == "null" {
|
||||
return nil
|
||||
}
|
||||
_, err := ulid.Parse(versionId)
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid versionId: %s", versionId)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -231,28 +231,6 @@ func TestIsValidBucketName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetBucketNameValidationStrict(t *testing.T) {
|
||||
SetBucketNameValidationStrict(true)
|
||||
t.Cleanup(func() {
|
||||
SetBucketNameValidationStrict(true)
|
||||
})
|
||||
|
||||
invalidBucket := "Invalid_Bucket"
|
||||
if IsValidBucketName(invalidBucket) {
|
||||
t.Fatalf("expected %q to be invalid with strict validation", invalidBucket)
|
||||
}
|
||||
|
||||
SetBucketNameValidationStrict(false)
|
||||
if !IsValidBucketName(invalidBucket) {
|
||||
t.Fatalf("expected %q to be accepted when strict validation disabled", invalidBucket)
|
||||
}
|
||||
|
||||
SetBucketNameValidationStrict(true)
|
||||
if IsValidBucketName(invalidBucket) {
|
||||
t.Fatalf("expected %q to be invalid after re-enabling strict validation", invalidBucket)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUint(t *testing.T) {
|
||||
type args struct {
|
||||
str string
|
||||
@@ -955,15 +933,12 @@ func TestValidateCopySource(t *testing.T) {
|
||||
{"invalid object name 3", "bucket", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
|
||||
{"invalid object name 4", "bucket/../foo/dir/../../../", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
|
||||
{"invalid object name 5", "bucket/.?versionId=smth", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
|
||||
// invalid versionId
|
||||
{"invalid versionId 1", "bucket/object?versionId=invalid", s3err.GetAPIError(s3err.ErrInvalidVersionId)},
|
||||
{"invalid versionId 2", "bucket/object?versionId=01BX5ZZKBKACTAV9WEVGEMMV", s3err.GetAPIError(s3err.ErrInvalidVersionId)},
|
||||
// success
|
||||
{"no error 1", "bucket/object", nil},
|
||||
{"no error 2", "bucket/object/key", nil},
|
||||
{"no error 3", "bucket/4*&(*&(89765))", nil},
|
||||
{"no error 4", "bucket/foo/../bar", nil},
|
||||
{"no error 5", "bucket/foo/bar/baz?versionId=01BX5ZZKBKACTAV9WEVGEMMVRZ", nil},
|
||||
{"no error 5", "bucket/foo/bar/baz?versionId=id", nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -87,8 +87,6 @@ const (
|
||||
ErrInvalidPartOrder
|
||||
ErrInvalidCompleteMpPartNumber
|
||||
ErrInternalError
|
||||
ErrNonEmptyRequestBody
|
||||
ErrIncompleteBody
|
||||
ErrInvalidCopyDest
|
||||
ErrInvalidCopySourceRange
|
||||
ErrInvalidCopySourceBucket
|
||||
@@ -99,7 +97,6 @@ const (
|
||||
ErrDuplicateTagKey
|
||||
ErrBucketTaggingLimited
|
||||
ErrObjectTaggingLimited
|
||||
ErrCannotParseHTTPRequest
|
||||
ErrInvalidURLEncodedTagging
|
||||
ErrInvalidAuthHeader
|
||||
ErrUnsupportedAuthorizationType
|
||||
@@ -119,15 +116,11 @@ const (
|
||||
ErrSignatureDoesNotMatch
|
||||
ErrContentSHA256Mismatch
|
||||
ErrInvalidSHA256Paylod
|
||||
ErrInvalidSHA256PayloadUsage
|
||||
ErrUnsupportedAnonymousSignedStreaming
|
||||
ErrMissingContentLength
|
||||
ErrContentLengthMismatch
|
||||
ErrInvalidAccessKeyID
|
||||
ErrRequestNotReadyYet
|
||||
ErrMissingDateHeader
|
||||
ErrGetUploadsWithKey
|
||||
ErrCopySourceNotAllowed
|
||||
ErrInvalidRequest
|
||||
ErrAuthNotSetup
|
||||
ErrNotImplemented
|
||||
@@ -162,11 +155,7 @@ const (
|
||||
ErrInvalidVersionId
|
||||
ErrNoSuchVersion
|
||||
ErrSuspendedVersioningNotAllowed
|
||||
ErrMissingRequestBody
|
||||
ErrMultipleChecksumHeaders
|
||||
ErrChecksumSDKAlgoMismatch
|
||||
ErrChecksumRequired
|
||||
ErrMissingContentSha256
|
||||
ErrInvalidChecksumAlgorithm
|
||||
ErrInvalidChecksumPart
|
||||
ErrChecksumTypeWithAlgo
|
||||
@@ -179,10 +168,6 @@ const (
|
||||
ErrMissingCORSOrigin
|
||||
ErrCORSIsNotEnabled
|
||||
ErrNotModified
|
||||
ErrInvalidLocationConstraint
|
||||
ErrInvalidArgument
|
||||
ErrMalformedTrailer
|
||||
ErrInvalidChunkSize
|
||||
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
@@ -327,16 +312,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "We encountered an internal error, please try again.",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
ErrNonEmptyRequestBody: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The request included a body. Requests of this type must not include a non-empty body.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncompleteBody: {
|
||||
Code: "IncompleteBody",
|
||||
Description: "The request body terminated unexpectedly",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPart: {
|
||||
Code: "InvalidPart",
|
||||
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
|
||||
@@ -407,11 +382,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Object tags cannot be greater than 10",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCannotParseHTTPRequest: {
|
||||
Code: "BadRequest",
|
||||
Description: "An error occurred when parsing the HTTP request.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidURLEncodedTagging: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.",
|
||||
@@ -512,11 +482,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "x-amz-content-sha256 must be UNSIGNED-PAYLOAD, STREAMING-UNSIGNED-PAYLOAD-TRAILER, STREAMING-AWS4-HMAC-SHA256-PAYLOAD, STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER, STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD, STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER or a valid sha256 value.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidSHA256PayloadUsage: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The value of x-amz-content-sha256 header is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedAnonymousSignedStreaming: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Anonymous requests don't support this x-amz-content-sha256 value. Please use UNSIGNED-PAYLOAD or STREAMING-UNSIGNED-PAYLOAD-TRAILER.",
|
||||
@@ -527,26 +492,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "You must provide the Content-Length HTTP header.",
|
||||
HTTPStatusCode: http.StatusLengthRequired,
|
||||
},
|
||||
ErrContentLengthMismatch: {
|
||||
Code: "IncompleteBody",
|
||||
Description: "You did not provide the number of bytes specified by the Content-Length HTTP header",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingDateHeader: {
|
||||
Code: "AccessDenied",
|
||||
Description: "AWS authentication requires a valid Date or x-amz-date header.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrGetUploadsWithKey: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Key is not expected for the GET method ?uploads subresource",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCopySourceNotAllowed: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "You can only specify a copy source header for copy requests.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRequest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Invalid Request.",
|
||||
@@ -599,7 +549,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
},
|
||||
ErrObjectLockConfigurationNotAllowed: {
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to apply a Object Lock configuration",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrObjectLocked: {
|
||||
@@ -717,26 +667,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingRequestBody: {
|
||||
Code: "MissingRequestBodyError",
|
||||
Description: "Request Body is empty",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrChecksumSDKAlgoMismatch: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* or x-amz-trailer headers were found.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrChecksumRequired: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Missing required header for this request: Content-MD5 OR x-amz-checksum-*",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingContentSha256: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Missing required header for this request: x-amz-content-sha256",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMultipleChecksumHeaders: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.",
|
||||
@@ -802,26 +732,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Not Modified",
|
||||
HTTPStatusCode: http.StatusNotModified,
|
||||
},
|
||||
ErrInvalidLocationConstraint: {
|
||||
Code: "InvalidLocationConstraint",
|
||||
Description: "The specified location-constraint is not valid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidArgument: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedTrailer: {
|
||||
Code: "MalformedTrailerError",
|
||||
Description: "The request contained trailing data that was not well-formed or did not conform to our published schema.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidChunkSize: {
|
||||
Code: "InvalidChunkSizeError",
|
||||
Description: "Only the last chunk is allowed to have a size less than 8192 bytes",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// non aws errors
|
||||
ErrExistingObjectIsDirectory: {
|
||||
@@ -956,7 +866,7 @@ func GetChecksumBadDigestErr(algo types.ChecksumAlgorithm) APIError {
|
||||
func GetChecksumSchemaMismatchErr(algo types.ChecksumAlgorithm, t types.ChecksumType) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("The %v checksum type cannot be used with the %v checksum algorithm.", strings.ToUpper(string(t)), strings.ToLower(string(algo))),
|
||||
Description: fmt.Sprintf("The %v checksum type cannot be used with the %v checksum algorithm.", algo, strings.ToLower(string(t))),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
logFileMode = 0644
|
||||
logFileMode = 0600
|
||||
timeFormat = "02/January/2006:15:04:05 -0700"
|
||||
)
|
||||
|
||||
@@ -45,12 +45,12 @@ var _ AuditLogger = &FileLogger{}
|
||||
|
||||
// InitFileLogger initializes audit logs to local file
|
||||
func InitFileLogger(logname string) (AuditLogger, error) {
|
||||
f, err := os.OpenFile(logname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, logFileMode)
|
||||
f, err := os.OpenFile(logname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open log: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "log starts %v\n", time.Now())
|
||||
f.WriteString(fmt.Sprintf("log starts %v\n", time.Now()))
|
||||
|
||||
return &FileLogger{logfile: logname, f: f}, nil
|
||||
}
|
||||
|
||||
@@ -728,8 +728,3 @@ type LocationConstraint struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"`
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type CreateBucketConfiguration struct {
|
||||
LocationConstraint string
|
||||
TagSet []types.Tag `xml:"Tags>Tag"`
|
||||
}
|
||||
|
||||
@@ -92,7 +92,6 @@ complete_multipart_upload_rest_invalid_checksum() {
|
||||
if ! check_param_count_v2 "bucket, key, upload ID, parts payload, type, algorithm, correct hash" 7 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket name: $1"
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" PARTS="$4" CHECKSUM_TYPE="$5" CHECKSUM_ALGORITHM="$6" CHECKSUM_HASH="$7" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh 2>&1); then
|
||||
log 2 "error completing multipart upload: $result"
|
||||
return 1
|
||||
|
||||
@@ -48,31 +48,6 @@ create_bucket() {
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_invalid_name() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "create bucket w/invalid name missing command type"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]] || [[ $1 == 's3' ]]; then
|
||||
bucket_create_error=$(aws --no-verify-ssl s3 mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]]; then
|
||||
bucket_create_error=$(aws --no-verify-ssl s3api create-bucket --bucket "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
bucket_create_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
bucket_create_error=$(mc --insecure mb "$MC_ALIAS/." 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
log 2 "error: bucket should have not been created but was"
|
||||
return 1
|
||||
fi
|
||||
echo "$bucket_create_error"
|
||||
}
|
||||
|
||||
create_bucket_with_user() {
|
||||
log 6 "create_bucket_with_user"
|
||||
if ! check_param_count "create_bucket_with_user" "command type, bucket, access ID, secret key" 4 $#; then
|
||||
@@ -150,19 +125,3 @@ create_bucket_rest_expect_success() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_rest() {
|
||||
if ! check_param_count "create_bucket_rest" "bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/create_bucket.sh 2>&1); then
|
||||
log 2 "error creating bucket: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
bucket_create_error="$(cat "$TEST_FILE_FOLDER/result.txt")"
|
||||
log 2 "expected '200', was '$result' ($bucket_create_error)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -81,19 +81,4 @@ get_bucket_location_mc() {
|
||||
# shellcheck disable=SC2034
|
||||
bucket_location=$(echo "$info" | grep -o 'Location:.*' | awk '{print $2}')
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_location_rest() {
|
||||
if ! check_param_count_v2 "bucket, callback" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command "200" "-bucketName" "$1" "-method" "GET" "-query" "location=" "-awsRegion" "$AWS_REGION"; then
|
||||
log 2 "error sending rest go command"
|
||||
return 1
|
||||
fi
|
||||
if [ "$2" != "" ] && ! "$2" "$TEST_FILE_FOLDER/result.txt"; then
|
||||
log 2 "callback error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -109,7 +109,6 @@ get_bucket_policy_rest() {
|
||||
if ! check_param_count "get_bucket_policy_rest" "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "aws region: $AWS_REGION"
|
||||
if ! get_bucket_policy_rest_expect_code "$1" "200"; then
|
||||
log 2 "error getting REST bucket policy"
|
||||
return 1
|
||||
|
||||
@@ -60,17 +60,15 @@ put_object_tagging_rest() {
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
|
||||
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
|
||||
content_md5=$(echo -n "$tagging" | openssl dgst -binary -md5 | openssl base64)
|
||||
# shellcheck disable=SC2154
|
||||
canonical_request="PUT
|
||||
/$1/$2
|
||||
tagging=
|
||||
content-md5:$content_md5
|
||||
host:$aws_endpoint_url_address
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
content-md5;host;x-amz-content-sha256;x-amz-date
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
|
||||
@@ -80,8 +78,7 @@ $payload_hash"
|
||||
get_signature
|
||||
# shellcheck disable=SC2154
|
||||
reply=$(send_command curl -ks -w "%{http_code}" -X PUT "$header://$aws_endpoint_url_address/$1/$2?tagging" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "Content-MD5: $content_md5" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "x-amz-content-sha256: $payload_hash" \
|
||||
-H "x-amz-date: $current_date_time" \
|
||||
-d "$tagging" -o "$TEST_FILE_FOLDER"/put_tagging_error.txt 2>&1)
|
||||
|
||||
@@ -96,7 +96,7 @@ upload_part_rest_without_upload_id() {
|
||||
log 2 "error uploading part $i: $result"
|
||||
return 1
|
||||
fi
|
||||
if ! check_rest_expected_error "$result" "$TEST_FILE_FOLDER/response.txt" "400" "InvalidArgument" "does not accept partNumber without uploadId"; then
|
||||
if ! check_rest_expected_error "$result" "$TEST_FILE_FOLDER/response.txt" "405" "MethodNotAllowed" "method is not allowed"; then
|
||||
log 2 "error checking error"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -110,7 +110,7 @@ test_multipart_upload_with_checksum() {
|
||||
log 2 "error calculating multipart checksum"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_with_checksum "$bucket_name" "$2" "$TEST_FILE_FOLDER/$2" "$upload_id" 2 "$3" "$4"; then
|
||||
if ! complete_multipart_upload_with_checksum "$1" "$2" "$TEST_FILE_FOLDER/$2" "$upload_id" 2 "$3" "$4"; then
|
||||
log 2 "error completing multipart upload"
|
||||
return 1
|
||||
fi
|
||||
@@ -125,7 +125,7 @@ test_complete_multipart_upload_unneeded_algorithm_parameter() {
|
||||
log 2 "error performing multipart upload with checksum before completion"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_rest_nonexistent_param "$bucket_name" "$2" "$upload_id" "$parts_payload"; then
|
||||
if ! complete_multipart_upload_rest_nonexistent_param "$1" "$2" "$upload_id" "$parts_payload"; then
|
||||
log 2 "error completing multipart upload with nonexistent param"
|
||||
return 1
|
||||
fi
|
||||
@@ -144,7 +144,7 @@ test_complete_multipart_upload_incorrect_checksum() {
|
||||
log 2 "error calculating multipart checksum"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_rest_incorrect_checksum "$bucket_name" "$2" "$upload_id" "$parts_payload" "$3" "$4" "$checksum"; then
|
||||
if ! complete_multipart_upload_rest_incorrect_checksum "$1" "$2" "$upload_id" "$parts_payload" "$3" "$4" "$checksum"; then
|
||||
log 2 "error completing multipart upload with nonexistent param"
|
||||
return 1
|
||||
fi
|
||||
@@ -159,7 +159,7 @@ test_complete_multipart_upload_invalid_checksum() {
|
||||
log 2 "error performing multipart upload with checksum before completion"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_rest_invalid_checksum "$bucket_name" "$2" "$upload_id" "$parts_payload" "$3" "$4" "wrong"; then
|
||||
if ! complete_multipart_upload_rest_invalid_checksum "$1" "$2" "$upload_id" "$parts_payload" "$3" "$4" "wrong"; then
|
||||
log 2 "error completing multipart upload with nonexistent param"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2025 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/util/util_mc.sh
|
||||
source ./tests/logger.sh
|
||||
|
||||
create_and_check_bucket_invalid_name() {
|
||||
if ! check_param_count_v2 "client" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! create_bucket_invalid_name "$1"; then
|
||||
log 2 "error creating bucket with invalid name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "$bucket_create_error" != *"Invalid bucket name "* ]] && [[ "$bucket_create_error" != *"Bucket name cannot"* ]]; then
|
||||
log 2 "unexpected error: $bucket_create_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -17,7 +17,6 @@
|
||||
source ./tests/drivers/delete_bucket/delete_bucket_rest.sh
|
||||
source ./tests/drivers/get_bucket_acl/get_bucket_acl_rest.sh
|
||||
source ./tests/drivers/get_object/get_object_rest.sh
|
||||
source ./tests/drivers/list_objects/list_objects_rest.sh
|
||||
source ./tests/drivers/put_bucket_acl/put_bucket_acl_rest.sh
|
||||
source ./tests/drivers/put_object/put_object_rest.sh
|
||||
source ./tests/drivers/user.sh
|
||||
@@ -135,7 +134,6 @@ get_bucket_prefix() {
|
||||
}
|
||||
|
||||
setup_bucket_v2() {
|
||||
log 6 "setup_bucket_v2 '$1'"
|
||||
if ! check_param_count_v2 "bucket prefix or name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
@@ -143,7 +141,6 @@ setup_bucket_v2() {
|
||||
log 2 "error getting prefix: $prefix"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket prefix: $prefix"
|
||||
if ! bucket_cleanup_if_bucket_exists_v2 "$prefix"; then
|
||||
log 2 "error cleaning up bucket(s), if it/they exist(s)"
|
||||
return 1
|
||||
@@ -158,41 +155,12 @@ setup_bucket_v2() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: client, bucket name(s)
|
||||
# return 0 for success, 1 for failure
|
||||
setup_buckets() {
|
||||
if ! check_param_count_gt "minimum of 1 bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! setup_bucket "$name"; then
|
||||
log 2 "error setting up bucket $name"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_buckets_v2() {
|
||||
if ! check_param_count_gt "minimum of 1 bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! setup_bucket_v2 "$name"; then
|
||||
log 2 "error setting up bucket $name"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_name() {
|
||||
if ! check_param_count_v2 "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" == "false" ]; then
|
||||
echo "$1"
|
||||
return 0
|
||||
fi
|
||||
echo "$1-$(date +%Y%m%d%H%M%S)"
|
||||
}
|
||||
@@ -226,6 +194,10 @@ setup_bucket_object_lock_enabled() {
|
||||
log 2 "error cleaning up bucket"
|
||||
return 1
|
||||
fi
|
||||
if [ "$DIRECT" == "true" ] && [ "$RECREATE_BUCKETS" == "true" ]; then
|
||||
log 2 "bucket not confirmed as deleted"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# in static bucket config, bucket will still exist
|
||||
if ! bucket_exists "$1"; then
|
||||
@@ -234,77 +206,8 @@ setup_bucket_object_lock_enabled() {
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
send_curl_command_create_bucket_expect_error() {
|
||||
if ! check_param_count_gt "response code, error code, message, params" 4 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_curl_command_create_bucket_expect_error_callback "$1" "$2" "$3" "" "${@:4}"; then
|
||||
log 2 "error sending curl create bucket command"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
send_curl_command_create_bucket_expect_error_callback() {
|
||||
if ! check_param_count_gt "response code, error code, message, callback, params" 5 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! bucket_name=$(get_bucket_name "$BUCKET_ONE_NAME" 2>&1); then
|
||||
log 2 "error getting bucket name from '$BUCKET_ONE_NAME': $bucket_name"
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command_expect_error "$1" "$2" "$3" "-bucketName" "$bucket_name" "-commandType" "createBucket" "${@:5}"; then
|
||||
log 2 "error sending rest go command"
|
||||
return 1
|
||||
fi
|
||||
if [ "$4" != "" ] && ! "$4" "$TEST_FILE_FOLDER/result.txt"; then
|
||||
log 2 "callback error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
send_invalid_location_constraint_check_error() {
|
||||
if ! check_param_count_v2 "invalid param" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
invalid_location_constraint="$1"
|
||||
if ! send_curl_command_create_bucket_expect_error_callback "400" "InvalidLocationConstraint" "The specified location-constraint is not valid" \
|
||||
"check_location_constraint_param" "-locationConstraint" "$invalid_location_constraint"; then
|
||||
log 2 "error sending curl command and checking callback"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_location_constraint_param() {
|
||||
if ! check_param_count_v2 "file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! check_error_parameter "$1" "LocationConstraint" "$invalid_location_constraint"; then
|
||||
log 2 "location constraint mismatch"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_and_run_command() {
|
||||
if ! check_param_count_gt "bucket, command, params" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! bucket_name=$(get_bucket_name "$1" 2>&1); then
|
||||
log 2 "error creating bucket '$1': $bucket_name"
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket "$bucket_name"; then
|
||||
log 2 "error setting up bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! "$2" "$bucket_name" "${@:3}"; then
|
||||
log 2 "error running command on bucket"
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
log 2 "bucket not confirmed as created"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
@@ -14,12 +14,8 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/commands/get_bucket_location.sh
|
||||
source ./tests/commands/list_buckets.sh
|
||||
source ./tests/drivers/get_bucket_location/get_bucket_location_rest.sh
|
||||
source ./tests/drivers/head_bucket/head_bucket_rest.sh
|
||||
source ./tests/drivers/list_buckets/list_buckets_rest.sh
|
||||
source ./tests/drivers/user.sh
|
||||
|
||||
delete_buckets_with_prefix() {
|
||||
if ! check_param_count_v2 "bucket prefix" 1 $#; then
|
||||
@@ -133,12 +129,6 @@ delete_bucket_recursive() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! location=$(get_bucket_location_rest "$1" "parse_bucket_location" 2>&1); then
|
||||
log 2 "error getting bucket location: $location"
|
||||
return 1
|
||||
fi
|
||||
log 5 "location: $location"
|
||||
|
||||
if ! reset_bucket "$1"; then
|
||||
log 2 "error clearing bucket (s3api)"
|
||||
return 1
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
delete_tags_and_verify_deletion() {
|
||||
if ! check_param_count_v2 "bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command "204" \
|
||||
"-method" "DELETE" "-bucketName" "$1" "-query" "tagging="; then
|
||||
log 2 "error sending tag deletion command"
|
||||
return 1
|
||||
fi
|
||||
if ! verify_no_bucket_tags_rest "$1"; then
|
||||
log 2 "error verifying no bucket tags"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -15,7 +15,6 @@
|
||||
# under the License.
|
||||
|
||||
source ./tests/drivers/params.sh
|
||||
source ./tests/drivers/put_bucket_ownership_controls/put_bucket_ownership_controls_rest.sh
|
||||
|
||||
setup_bucket_and_file() {
|
||||
if ! check_param_count_v2 "bucket, file name" 2 $#; then
|
||||
@@ -88,7 +87,7 @@ setup_bucket_and_files_base() {
|
||||
}
|
||||
|
||||
setup_bucket_and_large_file_base() {
|
||||
if ! check_param_count_v2 "bucket, file name, function" 3 $#; then
|
||||
if ! check_param_count "setup_bucket_and_large_file" "bucket, file name, function" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! "$3" "$1"; then
|
||||
@@ -103,10 +102,10 @@ setup_bucket_and_large_file_base() {
|
||||
}
|
||||
|
||||
setup_bucket_and_large_file() {
|
||||
if ! check_param_count_v2 "bucket, file name" 2 $#; then
|
||||
if ! check_param_count "setup_bucket_and_large_file" "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_large_file_base "$1" "$2" "setup_bucket"; then
|
||||
if ! setup_bucket_and_large_file "$1" "$2" "setup_bucket"; then
|
||||
log 2 "error setting up bucket and large file"
|
||||
return 1
|
||||
fi
|
||||
@@ -114,7 +113,7 @@ setup_bucket_and_large_file() {
|
||||
}
|
||||
|
||||
setup_bucket_and_large_file_v2() {
|
||||
if ! check_param_count_v2 "bucket, file name" 2 $#; then
|
||||
if ! check_param_count "setup_bucket_and_large_file" "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_large_file_base "$1" "$2" "setup_bucket_v2"; then
|
||||
@@ -123,31 +122,3 @@ setup_bucket_and_large_file_v2() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
chunked_upload_trailer_success() {
|
||||
if ! check_param_count_v2 "checksum" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! bucket_name=$(get_bucket_name "$BUCKET_ONE_NAME" 2>&1); then
|
||||
log 2 "error getting bucket name: $bucket_name"
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket "$bucket_name"; then
|
||||
log 2 "error setting up bucket"
|
||||
return 1
|
||||
fi
|
||||
test_file="test-file"
|
||||
if ! create_test_file "$test_file" 10000; then
|
||||
log 2 "error creating test file"
|
||||
return 1
|
||||
fi
|
||||
if ! put_object_chunked_trailer_success "$TEST_FILE_FOLDER/$test_file" "$bucket_name" "$test_file" "$1"; then
|
||||
log 2 "error performing chunked upload w/trailer"
|
||||
return 1
|
||||
fi
|
||||
if ! download_and_compare_file "$TEST_FILE_FOLDER/$test_file" "$bucket_name" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"; then
|
||||
log 2 "error downloading and comparing file"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ get_check_acl_after_first_put() {
|
||||
log 2 "'get_check_acl_after_first_put' requires client, bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_acl "$1" "$2"; then
|
||||
if ! get_bucket_acl "$1" "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error retrieving second ACL"
|
||||
return 1
|
||||
fi
|
||||
@@ -74,7 +74,7 @@ get_check_acl_after_second_put() {
|
||||
log 2 "'get_check_acl_after_second_put' requires client, bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_acl "$1" "$2"; then
|
||||
if ! get_bucket_acl "$1" "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error retrieving third ACL"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2025 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
get_check_bucket_location_various() {
|
||||
if ! check_param_count_v2 "client, bucket" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_location "$1" "$2"; then
|
||||
log 2 "error getting bucket location"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $bucket_location != "null" ]] && [[ $bucket_location != "us-east-1" ]]; then
|
||||
log 2 "wrong location: '$bucket_location'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/drivers/xml.sh
|
||||
|
||||
get_check_bucket_location() {
|
||||
if ! check_param_count_v2 "bucket name, expected value" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
expected_location="$2"
|
||||
if [ "$expected_location" == "us-east-1" ]; then
|
||||
expected_location=""
|
||||
fi
|
||||
if ! get_bucket_location_rest "$1" "check_location_constraint"; then
|
||||
log 2 "error getting and checking bucket location"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_location_constraint() {
|
||||
if ! check_param_count_v2 "file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "location constraint: $(cat "$1")"
|
||||
if ! location_constraint=$(get_element_text "$1" "LocationConstraint" 2>&1); then
|
||||
log 2 "error getting location constraint: $location_constraint"
|
||||
return 1
|
||||
fi
|
||||
if [ "$location_constraint" != "$expected_location" ]; then
|
||||
log 2 "expected location constraint of '$expected_location', was '$location_constraint'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
parse_bucket_location() {
|
||||
if ! check_param_count_v2 "file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "data: $(cat "$1")"
|
||||
if ! location_constraint=$(get_element_text "$1" "LocationConstraint" 2>&1); then
|
||||
log 2 "error getting location constraint: $location_constraint"
|
||||
return 1
|
||||
fi
|
||||
echo "$location_constraint"
|
||||
return 0
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
check_bucket_tags_empty() {
|
||||
if ! check_param_count_v2 "command type, bucket" 2 $#; then
|
||||
return 2
|
||||
fi
|
||||
if ! get_bucket_tagging "$1" "$2"; then
|
||||
log 2 "failed to get tags"
|
||||
return 2
|
||||
fi
|
||||
check_tags_empty "$1" || local check_result=$?
|
||||
# shellcheck disable=SC2086
|
||||
return $check_result
|
||||
}
|
||||
|
||||
verify_no_bucket_tags() {
|
||||
if ! check_param_count_v2 "command type, bucket" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_tagging "$1" "$2"; then
|
||||
log 2 "error retrieving bucket tagging"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "$tags" != "" ]]; then
|
||||
log 2 "tags should be empty, but are: $tags"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# params: username, password, bucket, expected key, expected value
|
||||
# return 0 for success, 1 for failure
|
||||
get_and_check_bucket_tags_with_user() {
|
||||
log 6 "get_and_check_bucket_tags"
|
||||
if ! check_param_count_v2 "username, password, bucket, expected key, expected value" 5 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_tagging_with_user "$1" "$2" "$3"; then
|
||||
log 2 "error retrieving bucket tagging"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "TAGS: $tags"
|
||||
if ! tag=$(echo "$tags" | jq -r ".TagSet[0]" 2>&1); then
|
||||
log 2 "error getting tag: $tag"
|
||||
return 1
|
||||
fi
|
||||
if ! key=$(echo "$tag" | jq -r ".Key" 2>&1); then
|
||||
log 2 "error getting key: $key"
|
||||
return 1
|
||||
fi
|
||||
if [ "$key" != "$4" ]; then
|
||||
log 2 "key mismatch ($key, $4)"
|
||||
return 1
|
||||
fi
|
||||
if ! value=$(echo "$tag" | jq -r ".Value" 2>&1); then
|
||||
log 2 "error getting value: $value"
|
||||
return 1
|
||||
fi
|
||||
if [ "$value" != "$5" ]; then
|
||||
log 2 "value mismatch ($value, $5)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: bucket, expected tag key, expected tag value
|
||||
# fail on error
|
||||
get_and_check_bucket_tags() {
|
||||
if ! check_param_count_v2 "bucket, expected key, expected value" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_and_check_bucket_tags_with_user "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$2" "$3"; then
|
||||
log 2 "error getting and checking bucket tags with user"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
add_verify_bucket_tags_rest() {
|
||||
if ! check_param_count_v2 "bucket, expected key, expected value" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" TAG_KEY="$2" TAG_VALUE="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/put_bucket_tagging.sh); then
|
||||
log 2 "error putting bucket tags: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "204" ]; then
|
||||
log 2 "expected response code of '204', was '$result' (error: $(cat "$TEST_FILE_FOLDER/result.txt"))"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/bucket_tagging.txt" ./tests/rest_scripts/get_bucket_tagging.sh); then
|
||||
log 2 "error listing bucket tags: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "expected response code of '200', was '$result' (error: $(cat "$TEST_FILE_FOLDER/bucket_tagging.txt"))"
|
||||
return 1
|
||||
fi
|
||||
log 5 "tags: $(cat "$TEST_FILE_FOLDER/bucket_tagging.txt")"
|
||||
if ! key=$(xmllint --xpath '//*[local-name()="Key"]/text()' "$TEST_FILE_FOLDER/bucket_tagging.txt" 2>&1); then
|
||||
log 2 "error retrieving key: $key"
|
||||
return 1
|
||||
fi
|
||||
if [ "$key" != "$2" ]; then
|
||||
log 2 "key mismatch (expected '$2', actual '$key')"
|
||||
return 1
|
||||
fi
|
||||
if ! value=$(xmllint --xpath '//*[local-name()="Value"]/text()' "$TEST_FILE_FOLDER/bucket_tagging.txt" 2>&1); then
|
||||
log 2 "error retrieving value: $value"
|
||||
return 1
|
||||
fi
|
||||
if [ "$value" != "$3" ]; then
|
||||
log 2 "value mismatch (expected '$3', actual '$value')"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
verify_no_bucket_tags_rest() {
|
||||
if ! check_param_count_v2 "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/bucket_tagging.txt" ./tests/rest_scripts/get_bucket_tagging.sh); then
|
||||
log 2 "error listing bucket tags: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "404" ]; then
|
||||
log 2 "expected response code of '404', was '$result' (error: $(cat "$TEST_FILE_FOLDER/bucket_tagging.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/commands/get_object_tagging.sh
|
||||
source ./tests/drivers/get_object_tagging/get_object_tagging_rest.sh
|
||||
source ./tests/drivers/get_object_tagging/get_object_tagging_s3api.sh
|
||||
source ./tests/drivers/tags.sh
|
||||
|
||||
get_and_verify_object_tags() {
|
||||
if ! check_param_count_v2 "command type, bucket, key, tag key, tag value" 5 $#; then
|
||||
return 1
|
||||
fi
|
||||
get_object_tagging "$1" "$2" "$3" || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
log 2 "failed to get tags"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set_key=$(echo "$tags" | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq '.TagSet[0].Value')
|
||||
if [[ $tag_set_key != '"'$4'"' ]]; then
|
||||
log 2 "Key mismatch ($tag_set_key, \"$4\")"
|
||||
return 1
|
||||
fi
|
||||
if [[ $tag_set_value != '"'$5'"' ]]; then
|
||||
log 2 "Value mismatch ($tag_set_value, \"$5\")"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
read -r tag_set_key tag_set_value <<< "$(echo "$tags" | awk 'NR==2 {print $1, $3}')"
|
||||
[[ $tag_set_key == "$4" ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == "$5" ]] || fail "Value mismatch"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
verify_no_object_tags() {
|
||||
if ! check_param_count_v2 "command type, bucket, key" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
result=0
|
||||
get_object_tagging "$1" "$2" "$3" || result=$?
|
||||
if [ $result == 1 ]; then
|
||||
if [ "$1" == 'rest' ]; then
|
||||
return 0
|
||||
fi
|
||||
log 2 "error getting object tagging"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$1" == 'aws' ]] || [ "$1" == 's3api' ]; then
|
||||
if ! tag_set=$(echo "$tags" | jq '.TagSet' 2>&1); then
|
||||
log 2 "error getting tag set: $tag_set"
|
||||
return 1
|
||||
fi
|
||||
if [[ $tag_set != "[]" ]] && [[ $tag_set != "" ]]; then
|
||||
log 2 "tags not empty ($tag_set)"
|
||||
return 1
|
||||
fi
|
||||
elif [[ $tags != *"No tags found"* ]] && [[ $tags != "" ]]; then
|
||||
log 2 "tags not empty (tags: $tags)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_verify_object_tags() {
|
||||
if ! check_param_count_v2 "command type, bucket, key, tag key, tag value" 5 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_object_tagging "$1" "$2" "$3"; then
|
||||
log 2 "error getting object tags"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
|
||||
if ! parse_object_tags_s3api; then
|
||||
log 2 "error parsing object tags"
|
||||
return 1
|
||||
fi
|
||||
elif [ "$1" == 'rest' ]; then
|
||||
if ! parse_object_tags_rest "$TEST_FILE_FOLDER/object_tags.txt"; then
|
||||
log 2 "error parsing object tags"
|
||||
return 1
|
||||
fi
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
read -r tag_set_key tag_set_value <<< "$(echo "$tags" | awk 'NR==2 {print $1, $3}')"
|
||||
else
|
||||
log 2 "unrecognized client for check_verify_object_tags: $1"
|
||||
return 1
|
||||
fi
|
||||
if [[ $tag_set_key != "$4" ]]; then
|
||||
log 2 "Key mismatch ($tag_set_key, $4)"
|
||||
return 1
|
||||
fi
|
||||
if [[ $tag_set_value != "$5" ]]; then
|
||||
log 2 "Value mismatch ($tag_set_value, $5)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_object_tags_empty() {
|
||||
if ! check_param_count_v2 "command type, bucket, key" 3 $#; then
|
||||
return 2
|
||||
fi
|
||||
if ! get_object_tagging "$1" "$2" "$3"; then
|
||||
log 2 "failed to get tags"
|
||||
return 2
|
||||
fi
|
||||
check_tags_empty "$1" || local check_result=$?
|
||||
# shellcheck disable=SC2086
|
||||
return $check_result
|
||||
}
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/drivers/put_object_tagging/put_object_tagging_rest.sh
|
||||
|
||||
parse_object_tags_rest() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "object tags: $(cat "$1")"
|
||||
if ! tag_set_key=$(get_element_text "$1" "Tagging" "TagSet" "Tag" "Key" 2>&1); then
|
||||
log 2 "error getting key: $tag_set_key"
|
||||
return 1
|
||||
fi
|
||||
if ! tag_set_value=$(get_element_text "$1" "Tagging" "TagSet" "Tag" "Value" 2>&1); then
|
||||
log 2 "error getting value: $tag_set_value"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_check_object_tags_single_set_go() {
|
||||
if ! check_param_count_gt "bucket, key, expected tag key, expected tag value, params" 4 $#; then
|
||||
return 1
|
||||
fi
|
||||
expected_key="$3"
|
||||
expected_value="$4"
|
||||
if ! send_rest_go_command_callback "200" "check_object_tags_single_set" "-bucketName" "$1" "-objectKey" "$2" "-method" "GET" \
|
||||
"-query" "tagging=" "${@:5}"; then
|
||||
log 2 "error sending go command or callback error"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_object_tags_single_set() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! parse_object_tags_rest "$1"; then
|
||||
log 2 "error parsing object tags"
|
||||
return 1
|
||||
fi
|
||||
if [ "$tag_set_key" != "$expected_key" ]; then
|
||||
log 2 "key mismatch, expected '$expected_key', was '$tag_set_key'"
|
||||
return 1
|
||||
fi
|
||||
if [ "$tag_set_value" != "$expected_value" ]; then
|
||||
log 2 "key mismatch, expected '$expected_value', was '$tag_set_value'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_for_empty_tagset() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! check_for_empty_element "$1" "Tagging" "TagSet"; then
|
||||
log 2 "error checking for empty XML element"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_check_object_tags_empty() {
|
||||
if ! check_param_count_v2 "bucket name, key" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command_callback "200" "check_for_empty_tagset" "-bucketName" "$1" "-objectKey" "$2" \
|
||||
"-method" "GET" "-query" "tagging="; then
|
||||
log 2 "error sending get object tagging command"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_header_version_id() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! check_for_header_key_and_value "$1" "x-amz-version-id" "$version_id"; then
|
||||
log 2 "error checking for x-amz-version-id header"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
add_version_tags_check_version_id() {
|
||||
if ! check_param_count_v2 "bucket name, key" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! tag_old_version "$1" "$2"; then
|
||||
log 2 "error tagging old version"
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command_callback "200" "check_header_version_id" "-bucketName" "$1" "-objectKey" "$2" "-debug" "-logFile" "signature.log" \
|
||||
"-method" "GET" "-query" "tagging=&versionId=$version_id" "-tagKey" "key" "-tagValue" "value" "-contentMD5"; then
|
||||
log 2 "error tagging object"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_invalid_version_id_error() {
|
||||
if ! check_param_count_v2 "data file" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! check_error_parameter "$1" "ArgumentName" "versionId"; then
|
||||
return 1
|
||||
fi
|
||||
if ! check_error_parameter "$1" "ArgumentValue" "$invalid_version_id"; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
get_object_tagging_invalid_version_id() {
|
||||
if ! check_param_count_v2 "bucket name, key" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
invalid_version_id="$2"
|
||||
if ! send_rest_go_command_expect_error_callback "400" "InvalidArgument" "Invalid version id specified" "check_invalid_version_id_error" \
|
||||
"-bucketName" "$1" "-objectKey" "$2" "-debug" "-logFile" "signature.log" \
|
||||
"-method" "GET" "-query" "tagging=&versionId=$invalid_version_id" "-tagKey" "key" "-tagValue" "value" "-contentMD5"; then
|
||||
log 2 "error tagging object"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2025 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
bucket_exists() {
|
||||
if ! check_param_count "bucket_exists" "bucket name" 1 $#; then
|
||||
return 2
|
||||
fi
|
||||
local exists=0
|
||||
head_bucket "rest" "$1" || exists=$?
|
||||
log 5 "bucket exists response code: $exists"
|
||||
# shellcheck disable=SC2181
|
||||
if [ $exists -eq 2 ]; then
|
||||
log 2 "unexpected error checking if bucket exists"
|
||||
return 2
|
||||
fi
|
||||
if [ $exists -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2025 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
bucket_is_accessible() {
|
||||
if ! check_param_count "bucket_is_accessible" "bucket" 1 $#; then
|
||||
return 2
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws --no-verify-ssl s3api head-bucket --bucket "$1" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
if [[ "$error" == *"500"* ]]; then
|
||||
return 1
|
||||
fi
|
||||
log 2 "Error checking bucket accessibility: $error"
|
||||
return 2
|
||||
}
|
||||
|
||||
check_for_empty_region() {
|
||||
if ! check_param_count "check_for_empty_region" "bucket" 1 $#; then
|
||||
return 2
|
||||
fi
|
||||
if ! head_bucket "s3api" "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error getting bucket info"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "INFO: $bucket_info"
|
||||
if ! region=$(echo "$bucket_info" | grep -v "InsecureRequestWarning" | jq -r ".BucketRegion" 2>&1); then
|
||||
log 2 "error getting region: $region"
|
||||
return 1
|
||||
fi
|
||||
if [[ $region == "" ]]; then
|
||||
log 2 "empty bucket region"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user