mirror of
https://github.com/versity/versitygw.git
synced 2026-01-30 06:52:04 +00:00
Compare commits
125 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c2b4c6452 | ||
|
|
11bd58c39e | ||
|
|
30d8474b17 | ||
|
|
3a65521b48 | ||
|
|
5ac5705b72 | ||
|
|
1d0a1d8261 | ||
|
|
ac0884a1dc | ||
|
|
3c3e9dd8b1 | ||
|
|
874e838dcc | ||
|
|
8a43d1cd18 | ||
|
|
4740372ce2 | ||
|
|
5226f0dc61 | ||
|
|
9f54a25519 | ||
|
|
b629f5d707 | ||
|
|
371dccfde9 | ||
|
|
05f8225577 | ||
|
|
8466d06371 | ||
|
|
eae11b44c5 | ||
|
|
12bfd4220b | ||
|
|
fc03472d60 | ||
|
|
971ae7845d | ||
|
|
8bb4bcba63 | ||
|
|
72a4e40038 | ||
|
|
a64733bfbe | ||
|
|
743cb03808 | ||
|
|
5c3cef65e2 | ||
|
|
8bb34b3b6e | ||
|
|
77459720ba | ||
|
|
59312f880f | ||
|
|
fe9384164c | ||
|
|
8d2eeebce3 | ||
|
|
c06463424a | ||
|
|
efe4ccb5ec | ||
|
|
a6e8752b33 | ||
|
|
c3c39e4022 | ||
|
|
9a01185be9 | ||
|
|
7744dacced | ||
|
|
4345420e12 | ||
|
|
d05f25f277 | ||
|
|
d174819eac | ||
|
|
9bde1ddb3a | ||
|
|
1c488422bc | ||
|
|
8a733b8cbf | ||
|
|
a93cf3f403 | ||
|
|
326de3b010 | ||
|
|
2a51b0cc70 | ||
|
|
8c3e49d0bb | ||
|
|
559d636846 | ||
|
|
045bdec60c | ||
|
|
ee67b41a98 | ||
|
|
ff973c279f | ||
|
|
adbf8e138c | ||
|
|
12f4920c8d | ||
|
|
d63b5818f1 | ||
|
|
dff3eb0887 | ||
|
|
69a3483269 | ||
|
|
d256ea5929 | ||
|
|
ebf7a030cc | ||
|
|
2bf4ccd244 | ||
|
|
7e44a5e703 | ||
|
|
b5bea5b659 | ||
|
|
2a4a0f000a | ||
|
|
2dd442c24d | ||
|
|
27dc84b5fd | ||
|
|
932f1c9da7 | ||
|
|
24679a82ac | ||
|
|
fa2023c18f | ||
|
|
bfe090df38 | ||
|
|
9c6a09260a | ||
|
|
6ea9950ead | ||
|
|
5bc6852f2c | ||
|
|
d39685947d | ||
|
|
af550c8f80 | ||
|
|
d15d348226 | ||
|
|
66e29d4aa4 | ||
|
|
cdc4358257 | ||
|
|
ce6193b191 | ||
|
|
8bb22debad | ||
|
|
64f50cc504 | ||
|
|
e2534afafe | ||
|
|
341d51107c | ||
|
|
707af47769 | ||
|
|
40da4a31d3 | ||
|
|
874165cdcf | ||
|
|
e750cf9718 | ||
|
|
c158dfeb0d | ||
|
|
4c3965d87e | ||
|
|
5c084b8452 | ||
|
|
78cf20075f | ||
|
|
a4dc837f54 | ||
|
|
a7d83b42fd | ||
|
|
54bd4ec841 | ||
|
|
9ae68076c1 | ||
|
|
13fdbaf35a | ||
|
|
45f55c2283 | ||
|
|
7aa733ae9e | ||
|
|
bef297f6ad | ||
|
|
25cde72fa3 | ||
|
|
48f438b1e0 | ||
|
|
a606e57bbd | ||
|
|
53dea3bb0d | ||
|
|
703c7cdc8b | ||
|
|
62ca9b6ff3 | ||
|
|
1ec629c38d | ||
|
|
5ef61af6f3 | ||
|
|
fe660d5b9c | ||
|
|
068b04ec62 | ||
|
|
54e2c39df1 | ||
|
|
caa7ca0f90 | ||
|
|
dac2460eb3 | ||
|
|
d3c56dbfc1 | ||
|
|
6cf3b93a83 | ||
|
|
df74e7fde6 | ||
|
|
6a34f3a848 | ||
|
|
6b64783db7 | ||
|
|
b5b823c47b | ||
|
|
a057a254c1 | ||
|
|
f435880fe8 | ||
|
|
ebdda06633 | ||
|
|
221592fbab | ||
|
|
8c1327d1e8 | ||
|
|
70ebe00f7c | ||
|
|
ca6a92bb84 | ||
|
|
51e54874a8 | ||
|
|
6176d9eb46 |
1
.github/workflows/docker-bats.yml
vendored
1
.github/workflows/docker-bats.yml
vendored
@@ -14,7 +14,6 @@ jobs:
|
||||
run: |
|
||||
cp tests/.env.docker.default tests/.env.docker
|
||||
cp tests/.secrets.default tests/.secrets
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
docker build \
|
||||
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
|
||||
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \
|
||||
|
||||
45
.github/workflows/system.yml
vendored
45
.github/workflows/system.yml
vendored
@@ -39,6 +39,36 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, static, base|acl|multipart|put-object, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-base,rest-acl,rest-multipart,rest-put-object"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, static, chunked|checksum|versioning|bucket, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket,rest-list-buckets,rest-create-bucket,rest-head-bucket"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, static, not implemented|rest-delete-bucket-ownership-controls|rest-delete-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-not-implemented,rest-delete-bucket-ownership-controls,rest-delete-bucket-tagging"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, static, rest-put-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-put-bucket-tagging"
|
||||
RECREATE_BUCKETS: "false"
|
||||
DELETE_BUCKETS_AFTER_TEST: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "REST, posix, non-static, rest-put-bucket-tagging, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "rest-put-bucket-tagging"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "s3, posix, non-file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3-non-file-count"
|
||||
@@ -128,6 +158,12 @@ jobs:
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
- set: "setup/remove static buckets scripts"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "setup-remove-static"
|
||||
RECREATE_BUCKETS: "true"
|
||||
DELETE_BUCKETS_AFTER_TEST: "true"
|
||||
BACKEND: "posix"
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
@@ -195,9 +231,9 @@ jobs:
|
||||
MC_ALIAS: versity
|
||||
LOG_LEVEL: 4
|
||||
GOCOVERDIR: ${{ github.workspace }}/cover
|
||||
USERNAME_ONE: ABCDEFG
|
||||
USERNAME_ONE: HIJKLMN
|
||||
PASSWORD_ONE: 1234567
|
||||
USERNAME_TWO: HIJKLMN
|
||||
USERNAME_TWO: OPQRSTU
|
||||
PASSWORD_TWO: 8901234
|
||||
TEST_FILE_FOLDER: ${{ github.workspace }}/versity-gwtest-files
|
||||
REMOVE_TEST_FILE_FOLDER: true
|
||||
@@ -229,7 +265,10 @@ jobs:
|
||||
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/run.sh $RUN_SET
|
||||
|
||||
- name: Time report
|
||||
run: cat ${{ github.workspace }}/time.log
|
||||
run: |
|
||||
if [ -e ${{ github.workspace }}/time.log ]; then
|
||||
cat ${{ github.workspace }}/time.log
|
||||
fi
|
||||
|
||||
- name: Coverage report
|
||||
run: |
|
||||
|
||||
@@ -23,13 +23,16 @@ RUN go build -ldflags "-X=main.Build=${BUILD} -X=main.BuildTime=${TIME} -X=main.
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
# These arguments can be overriden when building the image
|
||||
# These arguments can be overridden when building the image
|
||||
ARG IAM_DIR=/tmp/vgw
|
||||
ARG SETUP_DIR=/tmp/vgw
|
||||
|
||||
RUN mkdir -p $IAM_DIR
|
||||
RUN mkdir -p $SETUP_DIR
|
||||
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /app/versitygw
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /usr/local/bin/versitygw
|
||||
|
||||
ENTRYPOINT [ "/app/versitygw" ]
|
||||
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "/usr/local/bin/docker-entrypoint.sh" ]
|
||||
|
||||
23
README.md
23
README.md
@@ -70,6 +70,29 @@ versitygw [global options] command [command options] [arguments...]
|
||||
```
|
||||
The [global options](https://github.com/versity/versitygw/wiki/Global-Options) are specified before the backend type and the backend options are specified after.
|
||||
|
||||
### Run the gateway in Docker
|
||||
|
||||
Use the published image like the native binary by passing CLI arguments:
|
||||
|
||||
```bash
|
||||
docker run --rm versity/versitygw:latest --version
|
||||
```
|
||||
|
||||
When no command arguments are supplied, the container looks for `VGW_BACKEND` and optional `VGW_BACKEND_ARG`/`VGW_BACKEND_ARGS` environment variables to determine which backend to start. Backend-specific configuration continues to come from the existing environment flags (for example `ROOT_ACCESS_KEY`, `VGW_PORT`, and others).
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-e ROOT_ACCESS_KEY=testuser \
|
||||
-e ROOT_SECRET_KEY=secret \
|
||||
-e VGW_BACKEND=posix \
|
||||
-e VGW_BACKEND_ARG=/data \
|
||||
-p 10000:7070 \
|
||||
-v $(pwd)/data:/data \
|
||||
versity/versitygw:latest
|
||||
```
|
||||
|
||||
If you need to pass additional CLI options, set `VGW_ARGS` with a space-delimited list, or continue passing arguments directly to `docker run`.
|
||||
|
||||
***
|
||||
|
||||
#### Versity gives you clarity and control over your archival storage, so you can allocate more resources to your core mission.
|
||||
|
||||
12
auth/acl.go
12
auth/acl.go
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -493,3 +494,14 @@ func UpdateBucketACLOwner(ctx context.Context, be backend.Backend, bucket, newOw
|
||||
|
||||
return be.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
|
||||
// ValidateCannedACL validates bucket canned acl value
|
||||
func ValidateCannedACL(acl string) error {
|
||||
switch types.BucketCannedACL(acl) {
|
||||
case types.BucketCannedACLPrivate, types.BucketCannedACLPublicRead, types.BucketCannedACLPublicReadWrite, "":
|
||||
return nil
|
||||
default:
|
||||
debuglogger.Logf("invalid bucket canned acl: %v", acl)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,17 @@ const (
|
||||
policyErrInvalidFirstChar = policyErr("Policies must be valid JSON and the first byte must be '{'")
|
||||
policyErrEmptyStatement = policyErr("Could not parse the policy: Statement is empty!")
|
||||
policyErrMissingStatmentField = policyErr("Missing required field Statement")
|
||||
policyErrInvalidVersion = policyErr("The policy must contain a valid version string")
|
||||
)
|
||||
|
||||
type BucketPolicy struct {
|
||||
Version PolicyVersion `json:"Version"`
|
||||
Statement []BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
var tmp struct {
|
||||
Version *PolicyVersion
|
||||
Statement *[]BucketPolicyItem `json:"Statement"`
|
||||
}
|
||||
|
||||
@@ -60,12 +63,22 @@ func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
|
||||
return policyErrMissingStatmentField
|
||||
}
|
||||
|
||||
// Assign the parsed value to the actual struct
|
||||
if tmp.Version == nil {
|
||||
// bucket policy version should defualt to '2008-10-17'
|
||||
bp.Version = PolicyVersion2008
|
||||
} else {
|
||||
bp.Version = *tmp.Version
|
||||
}
|
||||
|
||||
bp.Statement = *tmp.Statement
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
|
||||
if !bp.Version.isValid() {
|
||||
return policyErrInvalidVersion
|
||||
}
|
||||
|
||||
for _, statement := range bp.Statement {
|
||||
err := statement.Validate(bucket, iam)
|
||||
if err != nil {
|
||||
|
||||
@@ -38,15 +38,20 @@ const (
|
||||
GetObjectAction Action = "s3:GetObject"
|
||||
GetObjectVersionAction Action = "s3:GetObjectVersion"
|
||||
DeleteObjectAction Action = "s3:DeleteObject"
|
||||
DeleteObjectVersionAction Action = "s3:DeleteObjectVersion"
|
||||
GetObjectAclAction Action = "s3:GetObjectAcl"
|
||||
GetObjectAttributesAction Action = "s3:GetObjectAttributes"
|
||||
GetObjectVersionAttributesAction Action = "s3:GetObjectVersionAttributes"
|
||||
PutObjectAclAction Action = "s3:PutObjectAcl"
|
||||
RestoreObjectAction Action = "s3:RestoreObject"
|
||||
GetBucketTaggingAction Action = "s3:GetBucketTagging"
|
||||
PutBucketTaggingAction Action = "s3:PutBucketTagging"
|
||||
GetObjectTaggingAction Action = "s3:GetObjectTagging"
|
||||
GetObjectVersionTaggingAction Action = "s3:GetObjectVersionTagging"
|
||||
PutObjectTaggingAction Action = "s3:PutObjectTagging"
|
||||
PutObjectVersionTaggingAction Action = "s3:PutObjectVersionTagging"
|
||||
DeleteObjectTaggingAction Action = "s3:DeleteObjectTagging"
|
||||
DeleteObjectVersionTaggingAction Action = "s3:DeleteObjectVersionTagging"
|
||||
ListBucketVersionsAction Action = "s3:ListBucketVersions"
|
||||
ListBucketAction Action = "s3:ListBucket"
|
||||
GetBucketObjectLockConfigurationAction Action = "s3:GetBucketObjectLockConfiguration"
|
||||
@@ -109,15 +114,20 @@ var supportedActionList = map[Action]struct{}{
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetBucketTaggingAction: {},
|
||||
PutBucketTaggingAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
ListBucketVersionsAction: {},
|
||||
ListBucketAction: {},
|
||||
GetBucketObjectLockConfigurationAction: {},
|
||||
@@ -163,25 +173,30 @@ var supportedActionList = map[Action]struct{}{
|
||||
}
|
||||
|
||||
var supportedObjectActionList = map[Action]struct{}{
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
AllActions: {},
|
||||
AbortMultipartUploadAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
GetObjectVersionAction: {},
|
||||
DeleteObjectAction: {},
|
||||
DeleteObjectVersionAction: {},
|
||||
GetObjectAclAction: {},
|
||||
GetObjectAttributesAction: {},
|
||||
GetObjectVersionAttributesAction: {},
|
||||
PutObjectAclAction: {},
|
||||
RestoreObjectAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
GetObjectVersionTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
PutObjectVersionTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
DeleteObjectVersionTaggingAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
AllActions: {},
|
||||
}
|
||||
|
||||
// Validates Action: it should either wildcard match with supported actions list or be in it
|
||||
|
||||
@@ -12,14 +12,21 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package scoutfs
|
||||
package auth
|
||||
|
||||
type stat struct {
|
||||
Meta_seq uint64
|
||||
Data_seq uint64
|
||||
Data_version uint64
|
||||
Online_blocks uint64
|
||||
Offline_blocks uint64
|
||||
Crtime_sec uint64
|
||||
Crtime_nsec uint32
|
||||
type PolicyVersion string
|
||||
|
||||
const (
|
||||
PolicyVersion2008 PolicyVersion = "2008-10-17"
|
||||
PolicyVersion2012 PolicyVersion = "2012-10-17"
|
||||
)
|
||||
|
||||
// isValid checks if the policy version is valid or not
|
||||
func (pv PolicyVersion) isValid() bool {
|
||||
switch pv {
|
||||
case PolicyVersion2008, PolicyVersion2012:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
54
auth/bucket_policy_version_test.go
Normal file
54
auth/bucket_policy_version_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPolicyVersion_isValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string // description of this test case
|
||||
value string
|
||||
want bool
|
||||
}{
|
||||
{"valid 2008", "2008-10-17", true},
|
||||
{"valid 2012", "2012-10-17", true},
|
||||
{"invalid empty", "", false},
|
||||
{"invalid 1", "invalid", false},
|
||||
{"invalid 2", "2010-10-17", false},
|
||||
{"invalid 3", "2006-00-12", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := PolicyVersion(tt.value).isValid()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
106
auth/iam.go
106
auth/iam.go
@@ -45,11 +45,12 @@ func (r Role) IsValid() bool {
|
||||
|
||||
// Account is a gateway IAM account
|
||||
type Account struct {
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID int `json:"userID"`
|
||||
GroupID int `json:"groupID"`
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID int `json:"userID"`
|
||||
GroupID int `json:"groupID"`
|
||||
ProjectID int `json:"projectID"`
|
||||
}
|
||||
|
||||
type ListUserAccountsResult struct {
|
||||
@@ -58,10 +59,11 @@ type ListUserAccountsResult struct {
|
||||
|
||||
// Mutable props, which could be changed when updating an IAM account
|
||||
type MutableProps struct {
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
Secret *string `json:"secret"`
|
||||
Role Role `json:"role"`
|
||||
UserID *int `json:"userID"`
|
||||
GroupID *int `json:"groupID"`
|
||||
ProjectID *int `json:"projectID"`
|
||||
}
|
||||
|
||||
func (m MutableProps) Validate() error {
|
||||
@@ -82,6 +84,9 @@ func updateAcc(acc *Account, props MutableProps) {
|
||||
if props.UserID != nil {
|
||||
acc.UserID = *props.UserID
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
acc.ProjectID = *props.ProjectID
|
||||
}
|
||||
if props.Role != "" {
|
||||
acc.Role = props.Role
|
||||
}
|
||||
@@ -107,42 +112,47 @@ var (
|
||||
)
|
||||
|
||||
type Opts struct {
|
||||
RootAccount Account
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
LDAPUserIdAtr string
|
||||
LDAPGroupIdAtr string
|
||||
VaultEndpointURL string
|
||||
VaultSecretStoragePath string
|
||||
VaultAuthMethod string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
RootAccount Account
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
LDAPUserIdAtr string
|
||||
LDAPGroupIdAtr string
|
||||
LDAPProjectIdAtr string
|
||||
LDAPTLSSkipVerify bool
|
||||
VaultEndpointURL string
|
||||
VaultNamespace string
|
||||
VaultSecretStoragePath string
|
||||
VaultSecretStorageNamespace string
|
||||
VaultAuthMethod string
|
||||
VaultAuthNamespace string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
@@ -156,7 +166,7 @@ func New(o *Opts) (IAMService, error) {
|
||||
case o.LDAPServerURL != "":
|
||||
svc, err = NewLDAPService(o.RootAccount, o.LDAPServerURL, o.LDAPBindDN, o.LDAPPassword,
|
||||
o.LDAPQueryBase, o.LDAPAccessAtr, o.LDAPSecretAtr, o.LDAPRoleAtr, o.LDAPUserIdAtr,
|
||||
o.LDAPGroupIdAtr, o.LDAPObjClasses)
|
||||
o.LDAPGroupIdAtr, o.LDAPProjectIdAtr, o.LDAPObjClasses, o.LDAPTLSSkipVerify)
|
||||
fmt.Printf("initializing LDAP IAM with %q\n", o.LDAPServerURL)
|
||||
case o.S3Endpoint != "":
|
||||
svc, err = NewS3(o.RootAccount, o.S3Access, o.S3Secret, o.S3Region, o.S3Bucket,
|
||||
@@ -164,8 +174,8 @@ func New(o *Opts) (IAMService, error) {
|
||||
fmt.Printf("initializing S3 IAM with '%v/%v'\n",
|
||||
o.S3Endpoint, o.S3Bucket)
|
||||
case o.VaultEndpointURL != "":
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultSecretStoragePath,
|
||||
o.VaultAuthMethod, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultNamespace, o.VaultSecretStoragePath, o.VaultSecretStorageNamespace,
|
||||
o.VaultAuthMethod, o.VaultAuthNamespace, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
|
||||
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
|
||||
case o.IpaHost != "":
|
||||
|
||||
@@ -194,11 +194,12 @@ func (s *IAMServiceInternal) ListUserAccounts() ([]Account, error) {
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -132,6 +132,7 @@ func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
userResult := struct {
|
||||
Gidnumber []string
|
||||
Uidnumber []string
|
||||
PidNumber []string
|
||||
}{}
|
||||
|
||||
err = ipa.rpc(req, &userResult)
|
||||
@@ -147,12 +148,17 @@ func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa gid invalid: %w", err)
|
||||
}
|
||||
pId, err := strconv.Atoi(userResult.PidNumber[0])
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa pid invalid: %w", err)
|
||||
}
|
||||
|
||||
account := Account{
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
ProjectID: pId,
|
||||
}
|
||||
|
||||
session_key := make([]byte, 16)
|
||||
|
||||
129
auth/iam_ldap.go
129
auth/iam_ldap.go
@@ -15,7 +15,9 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -26,57 +28,82 @@ import (
|
||||
)
|
||||
|
||||
type LdapIAMService struct {
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
groupIdAtr string
|
||||
userIdAtr string
|
||||
rootAcc Account
|
||||
url string
|
||||
bindDN string
|
||||
pass string
|
||||
mu sync.Mutex
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
groupIdAtr string
|
||||
userIdAtr string
|
||||
projectIdAtr string
|
||||
rootAcc Account
|
||||
url string
|
||||
bindDN string
|
||||
pass string
|
||||
tlsSkipVerify bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var _ IAMService = &LdapIAMService{}
|
||||
|
||||
func NewLDAPService(rootAcc Account, url, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, userIdAtr, groupIdAtr, objClasses string) (IAMService, error) {
|
||||
if url == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" ||
|
||||
secAtr == "" || roleAtr == "" || userIdAtr == "" || groupIdAtr == "" || objClasses == "" {
|
||||
func NewLDAPService(rootAcc Account, ldapURL, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, userIdAtr, groupIdAtr, projectIdAtr, objClasses string, tlsSkipVerify bool) (IAMService, error) {
|
||||
if ldapURL == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" ||
|
||||
secAtr == "" || roleAtr == "" || userIdAtr == "" || groupIdAtr == "" || projectIdAtr == "" || objClasses == "" {
|
||||
return nil, fmt.Errorf("required parameters list not fully provided")
|
||||
}
|
||||
conn, err := ldap.DialURL(url)
|
||||
|
||||
conn, err := dialLDAP(ldapURL, tlsSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to LDAP server: %w", err)
|
||||
}
|
||||
|
||||
err = conn.Bind(bindDN, pass)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("failed to bind to LDAP server %w", err)
|
||||
}
|
||||
return &LdapIAMService{
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
userIdAtr: userIdAtr,
|
||||
groupIdAtr: groupIdAtr,
|
||||
rootAcc: rootAcc,
|
||||
url: url,
|
||||
bindDN: bindDN,
|
||||
pass: pass,
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
userIdAtr: userIdAtr,
|
||||
groupIdAtr: groupIdAtr,
|
||||
projectIdAtr: projectIdAtr,
|
||||
rootAcc: rootAcc,
|
||||
url: ldapURL,
|
||||
bindDN: bindDN,
|
||||
pass: pass,
|
||||
tlsSkipVerify: tlsSkipVerify,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// dialLDAP establishes an LDAP connection with optional TLS configuration
|
||||
func dialLDAP(ldapURL string, tlsSkipVerify bool) (*ldap.Conn, error) {
|
||||
u, err := url.Parse(ldapURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid LDAP URL: %w", err)
|
||||
}
|
||||
|
||||
// For ldaps:// URLs, use DialURL with custom TLS config if needed
|
||||
if u.Scheme == "ldaps" && tlsSkipVerify {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: tlsSkipVerify,
|
||||
}
|
||||
return ldap.DialURL(ldapURL, ldap.DialWithTLSConfig(tlsConfig))
|
||||
}
|
||||
|
||||
// For ldap:// or when TLS verification is enabled, use standard DialURL
|
||||
return ldap.DialURL(ldapURL)
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) reconnect() error {
|
||||
ld.conn.Close()
|
||||
|
||||
conn, err := ldap.DialURL(ld.url)
|
||||
conn, err := dialLDAP(ld.url, ld.tlsSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reconnect to LDAP server: %w", err)
|
||||
}
|
||||
@@ -117,6 +144,7 @@ func (ld *LdapIAMService) CreateAccount(account Account) error {
|
||||
userEntry.Attribute(ld.roleAtr, []string{string(account.Role)})
|
||||
userEntry.Attribute(ld.groupIdAtr, []string{fmt.Sprint(account.GroupID)})
|
||||
userEntry.Attribute(ld.userIdAtr, []string{fmt.Sprint(account.UserID)})
|
||||
userEntry.Attribute(ld.projectIdAtr, []string{fmt.Sprint(account.ProjectID)})
|
||||
|
||||
err := ld.execute(func(c *ldap.Conn) error {
|
||||
return c.Add(userEntry)
|
||||
@@ -152,7 +180,7 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(access),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.userIdAtr, ld.groupIdAtr},
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.userIdAtr, ld.groupIdAtr, ld.projectIdAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -191,12 +219,19 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
entry.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(entry.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
entry.GetAttributeValue(ld.projectIdAtr), err)
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(entry.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
ProjectID: projectID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -211,6 +246,9 @@ func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) e
|
||||
if props.UserID != nil {
|
||||
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
|
||||
}
|
||||
if props.ProjectID != nil {
|
||||
req.Replace(ld.projectIdAtr, []string{fmt.Sprint(*props.ProjectID)})
|
||||
}
|
||||
if props.Role != "" {
|
||||
req.Replace(ld.roleAtr, []string{string(props.Role)})
|
||||
}
|
||||
@@ -248,7 +286,7 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
0,
|
||||
false,
|
||||
ld.buildSearchFilter(""),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.groupIdAtr, ld.userIdAtr},
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr, ld.groupIdAtr, ld.projectIdAtr, ld.userIdAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -273,12 +311,19 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
return nil, fmt.Errorf("invalid entry value for user-id %q: %w",
|
||||
el.GetAttributeValue(ld.userIdAtr), err)
|
||||
}
|
||||
projectID, err := strconv.Atoi(el.GetAttributeValue(ld.projectIdAtr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid entry value for project-id %q: %w",
|
||||
el.GetAttributeValue(ld.groupIdAtr), err)
|
||||
}
|
||||
|
||||
result = append(result, Account{
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
UserID: userId,
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: Role(el.GetAttributeValue(ld.roleAtr)),
|
||||
GroupID: groupId,
|
||||
ProjectID: projectID,
|
||||
UserID: userId,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -205,11 +205,12 @@ func (s *IAMServiceS3) ListUserAccounts() ([]Account, error) {
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -38,15 +38,39 @@ type VaultIAMService struct {
|
||||
creds schema.AppRoleLoginRequest
|
||||
}
|
||||
|
||||
type VaultIAMNamespace struct {
|
||||
Auth string
|
||||
SecretStorage string
|
||||
}
|
||||
|
||||
// Resolve empty specific namespaces to the fallback.
|
||||
// Empty result means root namespace.
|
||||
func resolveVaultNamespaces(authNamespace, secretStorageNamespace, fallback string) VaultIAMNamespace {
|
||||
ns := VaultIAMNamespace{
|
||||
Auth: authNamespace,
|
||||
SecretStorage: secretStorageNamespace,
|
||||
}
|
||||
|
||||
if ns.Auth == "" {
|
||||
ns.Auth = fallback
|
||||
}
|
||||
if ns.SecretStorage == "" {
|
||||
ns.SecretStorage = fallback
|
||||
}
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
var _ IAMService = &VaultIAMService{}
|
||||
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
|
||||
authMethod, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
func NewVaultIAMService(rootAcc Account, endpoint, namespace, secretStoragePath, secretStorageNamespace,
|
||||
authMethod, authNamespace, mountPath, rootToken, roleID, roleSecret, serverCert,
|
||||
clientCert, clientCertKey string) (IAMService, error) {
|
||||
opts := []vault.ClientOption{
|
||||
vault.WithAddress(endpoint),
|
||||
vault.WithRequestTimeout(requestTimeout),
|
||||
}
|
||||
|
||||
if serverCert != "" {
|
||||
tls := vault.TLSConfiguration{}
|
||||
|
||||
@@ -80,6 +104,28 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
|
||||
kvReqOpts = append(kvReqOpts, vault.WithMountPath(mountPath))
|
||||
}
|
||||
|
||||
// Resolve namespaces using optional generic fallback "namespace"
|
||||
ns := resolveVaultNamespaces(authNamespace, secretStorageNamespace, namespace)
|
||||
|
||||
// Guard: AppRole tokens are namespace scoped. If using AppRole and namespaces differ, error early.
|
||||
// Root token can span namespaces because each request carries X-Vault-Namespace.
|
||||
if rootToken == "" && ns.Auth != "" && ns.SecretStorage != "" && ns.Auth != ns.SecretStorage {
|
||||
return nil, fmt.Errorf(
|
||||
"approle tokens are namespace scoped. auth namespace %q and secret storage namespace %q differ. "+
|
||||
"use the same namespace or authenticate with a root token",
|
||||
ns.Auth, ns.SecretStorage,
|
||||
)
|
||||
}
|
||||
|
||||
// Apply namespaces to the correct request option sets.
|
||||
// For root token we do not need an auth namespace since we are not logging in via auth.
|
||||
if rootToken == "" && ns.Auth != "" {
|
||||
authReqOpts = append(authReqOpts, vault.WithNamespace(ns.Auth))
|
||||
}
|
||||
if ns.SecretStorage != "" {
|
||||
kvReqOpts = append(kvReqOpts, vault.WithNamespace(ns.SecretStorage))
|
||||
}
|
||||
|
||||
creds := schema.AppRoleLoginRequest{
|
||||
RoleId: roleID,
|
||||
SecretId: roleSecret,
|
||||
@@ -179,6 +225,10 @@ func (vt *VaultIAMService) CreateAccount(account Account) error {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
if vault.IsErrorStatus(err, http.StatusForbidden) {
|
||||
return fmt.Errorf("vault 403 permission denied on path %q. check KV mount path and policy. original: %w",
|
||||
vt.secretStoragePath+"/"+account.Access, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -319,12 +369,21 @@ func parseVaultUserAccount(data map[string]any, access string) (acc Account, err
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectIdJson, ok := usrAcc["projectID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
projectID, err := projectIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
ProjectID: int(projectID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
@@ -40,7 +41,7 @@ func ParseBucketLockConfigurationInput(input []byte) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if lockConfig.ObjectLockEnabled != "" && lockConfig.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
if lockConfig.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
@@ -92,28 +93,101 @@ func ParseBucketLockConfigurationOutput(input []byte) (*types.ObjectLockConfigur
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionInput(input []byte) ([]byte, error) {
|
||||
func ParseObjectLockRetentionInput(input []byte) (*s3response.PutObjectRetentionInput, error) {
|
||||
var retention s3response.PutObjectRetentionInput
|
||||
if err := xml.Unmarshal(input, &retention); err != nil {
|
||||
debuglogger.Logf("invalid object lock retention request body: %v", err)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
debuglogger.Logf("object lock retain until date must be in the future")
|
||||
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
default:
|
||||
debuglogger.Logf("invalid object lock retention mode: %s", retention.Mode)
|
||||
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
return json.Marshal(retention)
|
||||
return &retention, nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionInputToJSON(input *s3response.PutObjectRetentionInput) ([]byte, error) {
|
||||
data, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
debuglogger.Logf("parse object lock retention to JSON: %v", err)
|
||||
return nil, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// IsObjectLockRetentionPutAllowed checks if the object lock retention PUT request
|
||||
// is allowed against the current state of the object lock
|
||||
func IsObjectLockRetentionPutAllowed(ctx context.Context, be backend.Backend, bucket, object, versionId, userAccess string, input *s3response.PutObjectRetentionInput, bypass bool) error {
|
||||
ret, err := be.GetObjectRetention(ctx, bucket, object, versionId)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
|
||||
// if object lock configuration is not set
|
||||
// allow the retention modification without any checks
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to get object retention: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
retention, err := ParseObjectLockRetentionOutput(ret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if retention.Mode == input.Mode {
|
||||
// if retention mode is the same
|
||||
// the operation is allowed
|
||||
return nil
|
||||
}
|
||||
|
||||
if retention.Mode == types.ObjectLockRetentionModeCompliance {
|
||||
// COMPLIANCE mode is by definition not allowed to modify
|
||||
debuglogger.Logf("object lock retention change request from 'COMPLIANCE' to 'GOVERNANCE' is not allowed")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
if !bypass {
|
||||
// if x-amz-bypass-governance-retention is not provided
|
||||
// return error: object is locked
|
||||
debuglogger.Logf("object lock retention mode change is not allowed and bypass governence is not forced")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
// the last case left, when user tries to chenge
|
||||
// from 'GOVERNANCE' to 'COMPLIANCE' with
|
||||
// 'x-amz-bypass-governance-retention' header
|
||||
// first we need to check if user has 's3:BypassGovernanceRetention'
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
// if it fails to get the policy, return object is locked
|
||||
debuglogger.Logf("failed to get the bucket policy: %v", err)
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, object, BypassGovernanceRetentionAction)
|
||||
if err != nil {
|
||||
// if user doesn't have "s3:BypassGovernanceRetention" permission
|
||||
// return object is locked
|
||||
debuglogger.Logf("the user is missing 's3:BypassGovernanceRetention' permission")
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseObjectLockRetentionOutput(input []byte) (*types.ObjectLockRetention, error) {
|
||||
var retention types.ObjectLockRetention
|
||||
if err := json.Unmarshal(input, &retention); err != nil {
|
||||
debuglogger.Logf("parse object lock retention output: %v", err)
|
||||
return nil, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
|
||||
@@ -136,7 +210,16 @@ func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResu
|
||||
}
|
||||
}
|
||||
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend) error {
|
||||
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend, isOverwrite bool) error {
|
||||
if isOverwrite {
|
||||
// if bucket versioning is enabled, any overwrite request
|
||||
// should be enabled, as it leads to a new object version
|
||||
// creation
|
||||
res, err := be.GetBucketVersioning(ctx, bucket)
|
||||
if err == nil && res.Status != nil && *res.Status == types.BucketVersioningStatusEnabled {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
data, err := be.GetObjectLockConfiguration(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
|
||||
@@ -198,31 +281,35 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
|
||||
}
|
||||
|
||||
if retention.Mode != "" && retention.RetainUntilDate != nil {
|
||||
if retention.RetainUntilDate.After(time.Now()) {
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
} else {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
// if the object retention is expired, the object
|
||||
// is allowed for write operations(delete, modify)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch retention.Mode {
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
} else {
|
||||
policy, err := be.GetBucketPolicy(ctx, bucket)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBucketPublic {
|
||||
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
|
||||
} else {
|
||||
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
|
||||
}
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrObjectLocked)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,7 +177,21 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
|
||||
meta[string(keyBucketLock)] = backend.GetPtrFromString(encodeBytes(defaultLockParsed))
|
||||
}
|
||||
|
||||
_, err := az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
tagging, err := backend.ParseCreateBucketTags(input.CreateBucketConfiguration.Tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tagging != nil {
|
||||
tags, err := json.Marshal(tagging)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
|
||||
meta[string(keyTags)] = backend.GetPtrFromString(encodeBytes(tags))
|
||||
}
|
||||
|
||||
_, err = az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
if errors.Is(s3err.GetAPIError(s3err.ErrBucketAlreadyExists), azureErrToS3Err(err)) {
|
||||
aclBytes, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyAclCapital))
|
||||
if err != nil {
|
||||
@@ -364,7 +378,7 @@ func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", true, retParsed)
|
||||
err = az.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", retParsed)
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
@@ -569,6 +583,11 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
|
||||
}
|
||||
}
|
||||
|
||||
if resp.TagCount != nil {
|
||||
tagcount := int32(*resp.TagCount)
|
||||
result.TagCount = &tagcount
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -977,7 +996,7 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, fmt.Errorf("parse object retention: %w", err)
|
||||
}
|
||||
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", true, retParsed)
|
||||
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", retParsed)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
|
||||
}
|
||||
@@ -1071,7 +1090,7 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object, _ string, tags map[string]string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1085,7 +1104,7 @@ func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object string, ta
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object, _ string) (map[string]string, error) {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1099,7 +1118,7 @@ func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object string) (m
|
||||
return parseAzTags(tags.BlobTagSet), nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object, _ string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1644,24 +1663,6 @@ func (az *Azure) DeleteBucketCors(ctx context.Context, bucket string) error {
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
|
||||
cfg, err := az.getContainerMetaData(ctx, bucket, string(keyBucketLock))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cfg) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
|
||||
var bucketLockCfg auth.BucketLockConfig
|
||||
if err := json.Unmarshal(cfg, &bucketLockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockCfg.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
|
||||
return az.setContainerMetaData(ctx, bucket, string(keyBucketLock), config)
|
||||
}
|
||||
|
||||
@@ -1678,7 +1679,7 @@ func (az *Azure) GetObjectLockConfiguration(ctx context.Context, bucket string)
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
err := az.isBucketObjectLockEnabled(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1700,28 +1701,7 @@ func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, version
|
||||
string(keyObjRetention): backend.GetPtrFromString(string(retention)),
|
||||
}
|
||||
} else {
|
||||
objLockCfg, ok := meta[string(keyObjRetention)]
|
||||
if !ok {
|
||||
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
|
||||
} else {
|
||||
var lockCfg types.ObjectLockRetention
|
||||
if err := json.Unmarshal([]byte(*objLockCfg), &lockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
switch lockCfg.Mode {
|
||||
// Compliance mode can't be overridden
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
// To override governance mode user should have "s3:BypassGovernanceRetention" permission
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
|
||||
}
|
||||
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
|
||||
}
|
||||
|
||||
_, err = blobClient.SetMetadata(ctx, meta, nil)
|
||||
|
||||
@@ -83,14 +83,14 @@ type Backend interface {
|
||||
DeleteBucketTagging(_ context.Context, bucket string) error
|
||||
|
||||
// object tagging operations
|
||||
GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object string) error
|
||||
GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error
|
||||
|
||||
// object lock operations
|
||||
PutObjectLockConfiguration(_ context.Context, bucket string, config []byte) error
|
||||
GetObjectLockConfiguration(_ context.Context, bucket string) ([]byte, error)
|
||||
PutObjectRetention(_ context.Context, bucket, object, versionId string, bypass bool, retention []byte) error
|
||||
PutObjectRetention(_ context.Context, bucket, object, versionId string, retention []byte) error
|
||||
GetObjectRetention(_ context.Context, bucket, object, versionId string) ([]byte, error)
|
||||
PutObjectLegalHold(_ context.Context, bucket, object, versionId string, status bool) error
|
||||
GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error)
|
||||
@@ -251,13 +251,13 @@ func (BackendUnsupported) DeleteBucketTagging(_ context.Context, bucket string)
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object string) error {
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object, versionId string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
@@ -267,7 +267,7 @@ func (BackendUnsupported) PutObjectLockConfiguration(_ context.Context, bucket s
|
||||
func (BackendUnsupported) GetObjectLockConfiguration(_ context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectRetention(_ context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
func (BackendUnsupported) PutObjectRetention(_ context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectRetention(_ context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
|
||||
@@ -317,14 +317,60 @@ func ParseObjectTags(tagging string) (map[string]string, error) {
|
||||
return tagSet, nil
|
||||
}
|
||||
|
||||
var validTagComponent = regexp.MustCompile(`^[a-zA-Z0-9:/_.\-+ ]+$`)
|
||||
|
||||
// isValidTagComponent matches strings which contain letters, decimal digits,
|
||||
// and special chars: '/', '_', '-', '+', '.', ' ' (space)
|
||||
func isValidTagComponent(str string) bool {
|
||||
if str == "" {
|
||||
return true
|
||||
// ParseCreateBucketTags parses and validates the bucket
|
||||
// tagging from CreateBucket input
|
||||
func ParseCreateBucketTags(tagging []types.Tag) (map[string]string, error) {
|
||||
if len(tagging) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tagset := make(map[string]string, len(tagging))
|
||||
|
||||
if len(tagging) > 50 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
|
||||
}
|
||||
|
||||
for _, tag := range tagging {
|
||||
// validate tag key length
|
||||
key := GetStringFromPtr(tag.Key)
|
||||
if len(key) == 0 || len(key) > 128 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag key string chars
|
||||
if !isValidTagComponent(key) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value length
|
||||
value := GetStringFromPtr(tag.Value)
|
||||
if len(value) > 256 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// validate tag value string chars
|
||||
if !isValidTagComponent(value) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// make sure there are no duplicate keys
|
||||
_, ok := tagset[key]
|
||||
if ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
|
||||
}
|
||||
|
||||
tagset[key] = value
|
||||
}
|
||||
|
||||
return tagset, nil
|
||||
}
|
||||
|
||||
// tag component (key/value) name rule regexp
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_Tag.html
|
||||
var validTagComponent = regexp.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`)
|
||||
|
||||
// isValidTagComponent validates the tag component(key/value) name
|
||||
func isValidTagComponent(str string) bool {
|
||||
return validTagComponent.Match([]byte(str))
|
||||
}
|
||||
|
||||
@@ -570,3 +616,19 @@ func EvaluateObjectDeletePreconditions(etag string, modTime time.Time, size int6
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidDirectoryName returns true if the string is a valid name
|
||||
// for a directory
|
||||
func IsValidDirectoryName(name string) bool {
|
||||
// directories may not contain a path separator
|
||||
if strings.ContainsRune(name, '/') {
|
||||
return false
|
||||
}
|
||||
|
||||
// directories may not contain null character
|
||||
if strings.ContainsRune(name, 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -78,6 +78,10 @@ type Posix struct {
|
||||
// if the filesystem supports it. This is needed for cases where
|
||||
// there are different filesystems mounted below the bucket level.
|
||||
forceNoTmpFile bool
|
||||
|
||||
// enable posix level bucket name validations, not needed if the
|
||||
// frontend handlers are already validating bucket names
|
||||
validateBucketName bool
|
||||
}
|
||||
|
||||
var _ backend.Backend = &Posix{}
|
||||
@@ -131,6 +135,10 @@ type PosixOpts struct {
|
||||
// ForceNoTmpFile disables the use of O_TMPFILE even if the filesystem
|
||||
// supports it
|
||||
ForceNoTmpFile bool
|
||||
// ValidateBucketNames enables minimal bucket name validation to prevent
|
||||
// incorrect access to the filesystem. This is only needed if the
|
||||
// frontend is not already validating bucket names.
|
||||
ValidateBucketNames bool
|
||||
}
|
||||
|
||||
func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, error) {
|
||||
@@ -180,17 +188,18 @@ func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, erro
|
||||
}
|
||||
|
||||
return &Posix{
|
||||
meta: meta,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
euid: os.Geteuid(),
|
||||
egid: os.Getegid(),
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
bucketlinks: opts.BucketLinks,
|
||||
versioningDir: verioningdirAbs,
|
||||
newDirPerm: opts.NewDirPerm,
|
||||
forceNoTmpFile: opts.ForceNoTmpFile,
|
||||
meta: meta,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
euid: os.Geteuid(),
|
||||
egid: os.Getegid(),
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
bucketlinks: opts.BucketLinks,
|
||||
versioningDir: verioningdirAbs,
|
||||
newDirPerm: opts.NewDirPerm,
|
||||
forceNoTmpFile: opts.ForceNoTmpFile,
|
||||
validateBucketName: opts.ValidateBucketNames,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -335,7 +344,18 @@ func (p *Posix) ListBuckets(_ context.Context, input s3response.ListBucketsInput
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) isBucketValid(bucket string) bool {
|
||||
if !p.validateBucketName {
|
||||
return true
|
||||
}
|
||||
|
||||
return backend.IsValidDirectoryName(bucket)
|
||||
}
|
||||
|
||||
func (p *Posix) HeadBucket(_ context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
if !p.isBucketValid(*input.Bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Lstat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -357,7 +377,16 @@ func (p *Posix) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, a
|
||||
|
||||
bucket := *input.Bucket
|
||||
|
||||
err := os.Mkdir(bucket, p.newDirPerm)
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
tagging, err := backend.ParseCreateBucketTags(input.CreateBucketConfiguration.Tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Mkdir(bucket, p.newDirPerm)
|
||||
if err != nil && os.IsExist(err) {
|
||||
aclJSON, err := p.meta.RetrieveAttribute(nil, bucket, "", aclkey)
|
||||
if err != nil {
|
||||
@@ -396,6 +425,16 @@ func (p *Posix) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, a
|
||||
if err != nil {
|
||||
return fmt.Errorf("set ownership: %w", err)
|
||||
}
|
||||
if tagging != nil {
|
||||
tags, err := json.Marshal(tagging)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
err = p.meta.StoreAttribute(nil, bucket, "", tagHdr, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set tags: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if input.ObjectLockEnabledForBucket != nil && *input.ObjectLockEnabledForBucket {
|
||||
// First enable bucket versioning
|
||||
@@ -459,6 +498,9 @@ func (p *Posix) isBucketEmpty(bucket string) error {
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucket(_ context.Context, bucket string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
// Check if the bucket is empty
|
||||
err := p.isBucketEmpty(bucket)
|
||||
if err != nil {
|
||||
@@ -482,6 +524,9 @@ func (p *Posix) DeleteBucket(_ context.Context, bucket string) error {
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -499,6 +544,9 @@ func (p *Posix) PutBucketOwnershipControls(_ context.Context, bucket string, own
|
||||
}
|
||||
func (p *Posix) GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error) {
|
||||
var ownship types.ObjectOwnership
|
||||
if !p.isBucketValid(bucket) {
|
||||
return ownship, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return ownship, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -518,6 +566,9 @@ func (p *Posix) GetBucketOwnershipControls(_ context.Context, bucket string) (ty
|
||||
return types.ObjectOwnership(ownership), nil
|
||||
}
|
||||
func (p *Posix) DeleteBucketOwnershipControls(_ context.Context, bucket string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -539,6 +590,9 @@ func (p *Posix) DeleteBucketOwnershipControls(_ context.Context, bucket string)
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketVersioning(ctx context.Context, bucket string, status types.BucketVersioningStatus) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if !p.versioningEnabled() {
|
||||
return s3err.GetAPIError(s3err.ErrVersioningNotConfigured)
|
||||
}
|
||||
@@ -588,6 +642,10 @@ func (p *Posix) GetBucketVersioning(_ context.Context, bucket string) (s3respons
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrVersioningNotConfigured)
|
||||
}
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -624,7 +682,7 @@ func (p *Posix) getBucketVersioningStatus(ctx context.Context, bucket string) (t
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrVersioningNotConfigured)) {
|
||||
return "", nil
|
||||
}
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrVersioningNotConfigured)) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -740,6 +798,10 @@ func (p *Posix) ListObjectVersions(ctx context.Context, input *s3.ListObjectVers
|
||||
var prefix, delim, keyMarker, versionIdMarker string
|
||||
var max int
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3response.ListVersionsResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
if input.Prefix != nil {
|
||||
prefix = *input.Prefix
|
||||
}
|
||||
@@ -1186,6 +1248,10 @@ func (p *Posix) CreateMultipartUpload(ctx context.Context, mpu s3response.Create
|
||||
bucket := *mpu.Bucket
|
||||
object := *mpu.Key
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1247,7 +1313,7 @@ func (p *Posix) CreateMultipartUpload(ctx context.Context, mpu s3response.Create
|
||||
|
||||
// set object tagging
|
||||
if tags != nil {
|
||||
err := p.PutObjectTagging(ctx, bucket, filepath.Join(objdir, uploadID), tags)
|
||||
err := p.PutObjectTagging(ctx, bucket, filepath.Join(objdir, uploadID), "", tags)
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.RemoveAll(filepath.Join(tmppath, uploadID))
|
||||
@@ -1295,7 +1361,7 @@ func (p *Posix) CreateMultipartUpload(ctx context.Context, mpu s3response.Create
|
||||
os.Remove(tmppath)
|
||||
return s3response.InitiateMultipartUploadResult{}, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
err = p.PutObjectRetention(ctx, bucket, filepath.Join(objdir, uploadID), "", true, retParsed)
|
||||
err = p.PutObjectRetention(ctx, bucket, filepath.Join(objdir, uploadID), "", retParsed)
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.RemoveAll(filepath.Join(tmppath, uploadID))
|
||||
@@ -1388,6 +1454,10 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
uploadID := *input.UploadId
|
||||
parts := input.MultipartUpload.Parts
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return res, "", s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return res, "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1415,7 +1485,11 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return res, "", fmt.Errorf("get mp checksums: %w", err)
|
||||
}
|
||||
var checksumAlgorithm types.ChecksumAlgorithm
|
||||
|
||||
// The checksum algorithm should default to CRC64NVME
|
||||
// just for data integrity. It isn't going to be saved
|
||||
// in the final object metadata
|
||||
checksumAlgorithm := types.ChecksumAlgorithmCrc64nvme
|
||||
if checksums.Algorithm != "" {
|
||||
checksumAlgorithm = checksums.Algorithm
|
||||
}
|
||||
@@ -1430,6 +1504,15 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
return res, "", s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
|
||||
}
|
||||
|
||||
// The checksum type should default to FULL_OBJECT(crc64nvme)
|
||||
checksumType := checksums.Type
|
||||
if checksums.Type == "" {
|
||||
// do not modify checksums.Type to further not save the checksum
|
||||
// in the final object. As if no checksum has been specified on mp
|
||||
// creation, the final object shouldn't contain the checksum metadata
|
||||
checksumType = types.ChecksumTypeFullObject
|
||||
}
|
||||
|
||||
// check all parts ok
|
||||
last := len(parts) - 1
|
||||
var totalsize int64
|
||||
@@ -1496,7 +1579,7 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
|
||||
var hashRdr *utils.HashReader
|
||||
var compositeChecksumRdr *utils.CompositeChecksumReader
|
||||
switch checksums.Type {
|
||||
switch checksumType {
|
||||
case types.ChecksumTypeFullObject:
|
||||
if !composableCRC {
|
||||
hashRdr, err = utils.NewHashReader(nil, "", utils.HashType(strings.ToLower(string(checksumAlgorithm))))
|
||||
@@ -1536,7 +1619,7 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
}
|
||||
|
||||
var rdr io.Reader = pf
|
||||
switch checksums.Type {
|
||||
switch checksumType {
|
||||
case types.ChecksumTypeFullObject:
|
||||
if composableCRC {
|
||||
if i == 0 {
|
||||
@@ -1568,7 +1651,8 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
err = customMove(pf, f.File())
|
||||
if err != nil {
|
||||
// Fail back to standard copy
|
||||
debuglogger.Logf("Custom data block move failed (%w), failing back to io.Copy()", err)
|
||||
debuglogger.Logf("custom data block move failed (%q/%q): %v, failing back to io.Copy()",
|
||||
bucket, object, err)
|
||||
fw := f.File()
|
||||
fw.Seek(0, io.SeekEnd)
|
||||
_, err = io.Copy(fw, rdr)
|
||||
@@ -1678,7 +1762,7 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
var sum string
|
||||
switch checksums.Type {
|
||||
case types.ChecksumTypeComposite:
|
||||
sum = compositeChecksumRdr.Sum()
|
||||
sum = fmt.Sprintf("%s-%v", compositeChecksumRdr.Sum(), len(parts))
|
||||
case types.ChecksumTypeFullObject:
|
||||
if !composableCRC {
|
||||
sum = hashRdr.Sum()
|
||||
@@ -1687,42 +1771,57 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
}
|
||||
}
|
||||
|
||||
var gotSum *string
|
||||
|
||||
switch checksumAlgorithm {
|
||||
case types.ChecksumAlgorithmCrc32:
|
||||
if input.ChecksumCRC32 != nil && *input.ChecksumCRC32 != sum {
|
||||
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
|
||||
}
|
||||
gotSum = input.ChecksumCRC32
|
||||
checksum.CRC32 = &sum
|
||||
crc32 = &sum
|
||||
case types.ChecksumAlgorithmCrc32c:
|
||||
if input.ChecksumCRC32C != nil && *input.ChecksumCRC32C != sum {
|
||||
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
|
||||
}
|
||||
gotSum = input.ChecksumCRC32C
|
||||
checksum.CRC32C = &sum
|
||||
crc32c = &sum
|
||||
case types.ChecksumAlgorithmSha1:
|
||||
if input.ChecksumSHA1 != nil && *input.ChecksumSHA1 != sum {
|
||||
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
|
||||
}
|
||||
gotSum = input.ChecksumSHA1
|
||||
checksum.SHA1 = &sum
|
||||
sha1 = &sum
|
||||
case types.ChecksumAlgorithmSha256:
|
||||
if input.ChecksumSHA256 != nil && *input.ChecksumSHA256 != sum {
|
||||
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
|
||||
}
|
||||
gotSum = input.ChecksumSHA256
|
||||
checksum.SHA256 = &sum
|
||||
sha256 = &sum
|
||||
case types.ChecksumAlgorithmCrc64nvme:
|
||||
if input.ChecksumCRC64NVME != nil && *input.ChecksumCRC64NVME != sum {
|
||||
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
|
||||
}
|
||||
gotSum = input.ChecksumCRC64NVME
|
||||
checksum.CRC64NVME = &sum
|
||||
crc64nvme = &sum
|
||||
}
|
||||
|
||||
// Check if the provided checksum and the calculated one are the same
|
||||
if gotSum != nil {
|
||||
s := *gotSum
|
||||
if checksums.Type == types.ChecksumTypeComposite && !strings.Contains(s, "-") {
|
||||
// if number of parts is not specified in the final checksum
|
||||
// make sure to add, to not fail in the final comparison
|
||||
s = fmt.Sprintf("%s-%v", s, len(parts))
|
||||
}
|
||||
|
||||
if s != sum {
|
||||
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
|
||||
}
|
||||
}
|
||||
|
||||
err := p.storeChecksums(f.File(), bucket, object, checksum)
|
||||
if err != nil {
|
||||
return res, "", fmt.Errorf("store object checksum: %w", err)
|
||||
}
|
||||
} else {
|
||||
// in this case no checksum has been specified on mp creation
|
||||
// and the complete request checksum is defaulted to crc64nvem
|
||||
// simply calculated the sum to further retrun in the response
|
||||
if hashRdr != nil {
|
||||
sum := hashRdr.Sum()
|
||||
crc64nvme = &sum
|
||||
}
|
||||
}
|
||||
|
||||
// load and set retention
|
||||
@@ -1765,7 +1864,7 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
ChecksumSHA1: sha1,
|
||||
ChecksumSHA256: sha256,
|
||||
ChecksumCRC64NVME: crc64nvme,
|
||||
ChecksumType: &checksums.Type,
|
||||
ChecksumType: &checksumType,
|
||||
}, versionID, nil
|
||||
}
|
||||
|
||||
@@ -1994,6 +2093,10 @@ func (p *Posix) AbortMultipartUpload(_ context.Context, mpu *s3.AbortMultipartUp
|
||||
object := *mpu.Key
|
||||
uploadID := *mpu.UploadId
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -2030,6 +2133,10 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
|
||||
bucket := *mpu.Bucket
|
||||
var delimiter string
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return lmu, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if mpu.Delimiter != nil {
|
||||
delimiter = *mpu.Delimiter
|
||||
}
|
||||
@@ -2190,6 +2297,10 @@ func (p *Posix) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3resp
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
stringMarker := ""
|
||||
if input.PartNumberMarker != nil {
|
||||
stringMarker = *input.PartNumberMarker
|
||||
@@ -2328,6 +2439,10 @@ type hashConfig struct {
|
||||
}
|
||||
|
||||
func (p *Posix) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
return p.UploadPartWithPostFunc(ctx, input, func(*os.File) error { return nil })
|
||||
}
|
||||
|
||||
func (p *Posix) UploadPartWithPostFunc(ctx context.Context, input *s3.UploadPartInput, postprocess func(f *os.File) error) (*s3.UploadPartOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
@@ -2340,6 +2455,10 @@ func (p *Posix) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
part := input.PartNumber
|
||||
length := int64(0)
|
||||
if input.ContentLength != nil {
|
||||
@@ -2503,6 +2622,11 @@ func (p *Posix) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.
|
||||
}
|
||||
}
|
||||
|
||||
err = postprocess(f.File())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload part post process failed: %w", err)
|
||||
}
|
||||
|
||||
err = f.link()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("link object in namespace: %w", err)
|
||||
@@ -2521,6 +2645,10 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
|
||||
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
if !p.isBucketValid(*upi.Bucket) {
|
||||
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(*upi.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -2740,6 +2868,10 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
|
||||
}
|
||||
|
||||
func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
return p.PutObjectWithPostFunc(ctx, po, func(*os.File) error { return nil })
|
||||
}
|
||||
|
||||
func (p *Posix) PutObjectWithPostFunc(ctx context.Context, po s3response.PutObjectInput, postprocess func(f *os.File) error) (s3response.PutObjectOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
@@ -2752,6 +2884,9 @@ func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3
|
||||
if po.ChecksumAlgorithm == "" {
|
||||
po.ChecksumAlgorithm = types.ChecksumAlgorithmCrc64nvme
|
||||
}
|
||||
if !p.isBucketValid(*po.Bucket) {
|
||||
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(*po.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -3015,6 +3150,13 @@ func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3
|
||||
return s3response.PutObjectOutput{}, fmt.Errorf("set versionId attr: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = postprocess(f.File())
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{},
|
||||
fmt.Errorf("put object post process failed: %w", err)
|
||||
}
|
||||
|
||||
err = f.link()
|
||||
if errors.Is(err, syscall.EEXIST) {
|
||||
return s3response.PutObjectOutput{
|
||||
@@ -3028,7 +3170,7 @@ func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3
|
||||
|
||||
// Set object tagging
|
||||
if tags != nil {
|
||||
err := p.PutObjectTagging(ctx, *po.Bucket, *po.Key, tags)
|
||||
err := p.PutObjectTagging(ctx, *po.Bucket, *po.Key, "", tags)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.PutObjectOutput{
|
||||
ETag: etag,
|
||||
@@ -3058,7 +3200,7 @@ func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, fmt.Errorf("parse object lock retention: %w", err)
|
||||
}
|
||||
err = p.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", true, retParsed)
|
||||
err = p.PutObjectRetention(ctx, *po.Bucket, *po.Key, "", retParsed)
|
||||
if err != nil {
|
||||
return s3response.PutObjectOutput{}, err
|
||||
}
|
||||
@@ -3086,6 +3228,10 @@ func (p *Posix) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (
|
||||
object := *input.Key
|
||||
isDir := strings.HasSuffix(object, "/")
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -3496,6 +3642,9 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
|
||||
}
|
||||
|
||||
bucket := *input.Bucket
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -3528,7 +3677,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
|
||||
fid, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -3594,7 +3743,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
|
||||
objMeta := p.loadObjectMetaData(nil, bucket, object, &fid, userMetaData)
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object)
|
||||
tags, err := p.getAttrTags(bucket, object, versionId)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
return nil, err
|
||||
}
|
||||
@@ -3674,7 +3823,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
|
||||
objMeta := p.loadObjectMetaData(f, bucket, object, &fi, userMetaData)
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object)
|
||||
tags, err := p.getAttrTags(bucket, object, versionId)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
return nil, err
|
||||
}
|
||||
@@ -3748,6 +3897,10 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -3780,7 +3933,7 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -3894,6 +4047,16 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
}
|
||||
}
|
||||
|
||||
var tagCount *int32
|
||||
tags, err := p.getAttrTags(bucket, object, versionId)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound)) {
|
||||
return nil, err
|
||||
}
|
||||
if tags != nil {
|
||||
tc := int32(len(tags))
|
||||
tagCount = &tc
|
||||
}
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: &length,
|
||||
AcceptRanges: backend.GetPtrFromString("bytes"),
|
||||
@@ -3918,6 +4081,7 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
ChecksumSHA256: checksums.SHA256,
|
||||
ChecksumCRC64NVME: checksums.CRC64NVME,
|
||||
ChecksumType: cType,
|
||||
TagCount: tagCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -3972,9 +4136,16 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
if !p.isBucketValid(srcBucket) {
|
||||
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
dstBucket := *input.Bucket
|
||||
dstObject := *input.Key
|
||||
|
||||
if !p.isBucketValid(dstBucket) {
|
||||
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err = os.Stat(srcBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4180,7 +4351,7 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
|
||||
err = p.PutObjectTagging(ctx, dstBucket, dstObject, tags)
|
||||
err = p.PutObjectTagging(ctx, dstBucket, dstObject, "", tags)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectOutput{}, err
|
||||
}
|
||||
@@ -4308,6 +4479,10 @@ func (p *Posix) ListObjectsParametrized(ctx context.Context, input *s3.ListObjec
|
||||
maxkeys = *input.MaxKeys
|
||||
}
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4337,6 +4512,12 @@ func (p *Posix) ListObjectsParametrized(ctx context.Context, input *s3.ListObjec
|
||||
}
|
||||
|
||||
func (p *Posix) FileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return func(string, fs.DirEntry) (s3response.Object, error) {
|
||||
return s3response.Object{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
}
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
var owner *types.Owner
|
||||
// Retreive the object owner data from bucket ACL, if fetchOwner is true
|
||||
@@ -4469,6 +4650,10 @@ func (p *Posix) ListObjectsV2Parametrized(ctx context.Context, input *s3.ListObj
|
||||
fetchOwner = *input.FetchOwner
|
||||
}
|
||||
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4502,6 +4687,9 @@ func (p *Posix) ListObjectsV2Parametrized(ctx context.Context, input *s3.ListObj
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4519,6 +4707,9 @@ func (p *Posix) PutBucketAcl(_ context.Context, bucket string, data []byte) erro
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
if !p.isBucketValid(*input.Bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4538,6 +4729,9 @@ func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketTagging(_ context.Context, bucket string, tags map[string]string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4569,6 +4763,9 @@ func (p *Posix) PutBucketTagging(_ context.Context, bucket string, tags map[stri
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketTagging(_ context.Context, bucket string) (map[string]string, error) {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4577,7 +4774,7 @@ func (p *Posix) GetBucketTagging(_ context.Context, bucket string) (map[string]s
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
tags, err := p.getAttrTags(bucket, "")
|
||||
tags, err := p.getAttrTags(bucket, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -4586,10 +4783,16 @@ func (p *Posix) GetBucketTagging(_ context.Context, bucket string) (map[string]s
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
return p.PutBucketTagging(ctx, bucket, nil)
|
||||
}
|
||||
|
||||
func (p *Posix) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (p *Posix) GetObjectTagging(_ context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4598,13 +4801,35 @@ func (p *Posix) GetObjectTagging(_ context.Context, bucket, object string) (map[
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
return p.getAttrTags(bucket, object)
|
||||
if versionId != "" {
|
||||
if !p.versioningEnabled() {
|
||||
//TODO: Maybe we need to return our custom error here?
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
}
|
||||
vId, err := p.meta.RetrieveAttribute(nil, bucket, object, versionIdKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil, fmt.Errorf("get obj versionId: %w", err)
|
||||
}
|
||||
|
||||
if string(vId) != versionId {
|
||||
bucket = filepath.Join(p.versioningDir, bucket)
|
||||
object = filepath.Join(genObjVersionKey(object), versionId)
|
||||
}
|
||||
}
|
||||
|
||||
return p.getAttrTags(bucket, object, versionId)
|
||||
}
|
||||
|
||||
func (p *Posix) getAttrTags(bucket, object string) (map[string]string, error) {
|
||||
func (p *Posix) getAttrTags(bucket, object, versionId string) (map[string]string, error) {
|
||||
tags := make(map[string]string)
|
||||
b, err := p.meta.RetrieveAttribute(nil, bucket, object, tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
@@ -4622,7 +4847,10 @@ func (p *Posix) getAttrTags(bucket, object string) (map[string]string, error) {
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (p *Posix) PutObjectTagging(_ context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4631,9 +4859,31 @@ func (p *Posix) PutObjectTagging(_ context.Context, bucket, object string, tags
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if versionId != "" {
|
||||
if !p.versioningEnabled() {
|
||||
//TODO: Maybe we need to return our custom error here?
|
||||
return s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
}
|
||||
vId, err := p.meta.RetrieveAttribute(nil, bucket, object, versionIdKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return fmt.Errorf("get obj versionId: %w", err)
|
||||
}
|
||||
|
||||
if string(vId) != versionId {
|
||||
bucket = filepath.Join(p.versioningDir, bucket)
|
||||
object = filepath.Join(genObjVersionKey(object), versionId)
|
||||
}
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
err = p.meta.DeleteAttribute(bucket, object, tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
@@ -4652,6 +4902,9 @@ func (p *Posix) PutObjectTagging(_ context.Context, bucket, object string, tags
|
||||
|
||||
err = p.meta.StoreAttribute(nil, bucket, object, tagHdr, b)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -4661,11 +4914,17 @@ func (p *Posix) PutObjectTagging(_ context.Context, bucket, object string, tags
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
return p.PutObjectTagging(ctx, bucket, object, nil)
|
||||
func (p *Posix) DeleteObjectTagging(ctx context.Context, bucket, object, versionId string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
return p.PutObjectTagging(ctx, bucket, object, versionId, nil)
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketPolicy(ctx context.Context, bucket string, policy []byte) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4696,6 +4955,9 @@ func (p *Posix) PutBucketPolicy(ctx context.Context, bucket string, policy []byt
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, error) {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4719,10 +4981,16 @@ func (p *Posix) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, err
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
return p.PutBucketPolicy(ctx, bucket, nil)
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketCors(_ context.Context, bucket string, cors []byte) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4749,6 +5017,9 @@ func (p *Posix) PutBucketCors(_ context.Context, bucket string, cors []byte) err
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketCors(_ context.Context, bucket string) ([]byte, error) {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4769,6 +5040,9 @@ func (p *Posix) GetBucketCors(_ context.Context, bucket string) ([]byte, error)
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucketCors(ctx context.Context, bucket string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
return p.PutBucketCors(ctx, bucket, nil)
|
||||
}
|
||||
|
||||
@@ -4797,6 +5071,9 @@ func (p *Posix) isBucketObjectLockEnabled(bucket string) error {
|
||||
}
|
||||
|
||||
func (p *Posix) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4805,21 +5082,21 @@ func (p *Posix) PutObjectLockConfiguration(ctx context.Context, bucket string, c
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
cfg, err := p.meta.RetrieveAttribute(nil, bucket, "", bucketLockKey)
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("get object lock config: %w", err)
|
||||
}
|
||||
if p.versioningEnabled() {
|
||||
// if versioning is enabled on gateway level and bucket versioning
|
||||
// status is not `Enabled`, object lock can't be enabled.
|
||||
// if object lock has been enabled on bucket creation
|
||||
// it means the versioning has been enabled alongside with object lock
|
||||
// and it can't be suspended ever again
|
||||
status, err := p.getBucketVersioningStatus(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var bucketLockCfg auth.BucketLockConfig
|
||||
if err := json.Unmarshal(cfg, &bucketLockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
if !bucketLockCfg.Enabled {
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
if status != types.BucketVersioningStatusEnabled {
|
||||
// if versioning is enabled on gateway level
|
||||
return s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
err = p.meta.StoreAttribute(nil, bucket, "", bucketLockKey, config)
|
||||
@@ -4831,6 +5108,9 @@ func (p *Posix) PutObjectLockConfiguration(ctx context.Context, bucket string, c
|
||||
}
|
||||
|
||||
func (p *Posix) GetObjectLockConfiguration(_ context.Context, bucket string) ([]byte, error) {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -4851,6 +5131,9 @@ func (p *Posix) GetObjectLockConfiguration(_ context.Context, bucket string) ([]
|
||||
}
|
||||
|
||||
func (p *Posix) PutObjectLegalHold(_ context.Context, bucket, object, versionId string, status bool) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
err := p.doesBucketAndObjectExist(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -4889,7 +5172,7 @@ func (p *Posix) PutObjectLegalHold(_ context.Context, bucket, object, versionId
|
||||
err = p.meta.StoreAttribute(nil, bucket, object, objectLegalHoldKey, statusData)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -4901,6 +5184,9 @@ func (p *Posix) PutObjectLegalHold(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
|
||||
func (p *Posix) GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error) {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
err := p.doesBucketAndObjectExist(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -4932,7 +5218,7 @@ func (p *Posix) GetObjectLegalHold(_ context.Context, bucket, object, versionId
|
||||
data, err := p.meta.RetrieveAttribute(nil, bucket, object, objectLegalHoldKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -4948,7 +5234,10 @@ func (p *Posix) GetObjectLegalHold(_ context.Context, bucket, object, versionId
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutObjectRetention(_ context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
func (p *Posix) PutObjectRetention(_ context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
err := p.doesBucketAndObjectExist(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -4977,41 +5266,6 @@ func (p *Posix) PutObjectRetention(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
}
|
||||
|
||||
objectLockCfg, err := p.meta.RetrieveAttribute(nil, bucket, object, objectRetentionKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
}
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
err := p.meta.StoreAttribute(nil, bucket, object, objectRetentionKey, retention)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set object lock config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("get object lock config: %w", err)
|
||||
}
|
||||
|
||||
var lockCfg types.ObjectLockRetention
|
||||
if err := json.Unmarshal(objectLockCfg, &lockCfg); err != nil {
|
||||
return fmt.Errorf("unmarshal object lock config: %w", err)
|
||||
}
|
||||
|
||||
switch lockCfg.Mode {
|
||||
// Compliance mode can't be overridden
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
// To override governance mode user should have "s3:BypassGovernanceRetention" permission
|
||||
case types.ObjectLockRetentionModeGovernance:
|
||||
if !bypass {
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
err = p.meta.StoreAttribute(nil, bucket, object, objectRetentionKey, retention)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set object lock config: %w", err)
|
||||
@@ -5021,6 +5275,9 @@ func (p *Posix) PutObjectRetention(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
|
||||
func (p *Posix) GetObjectRetention(_ context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
err := p.doesBucketAndObjectExist(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -5052,7 +5309,7 @@ func (p *Posix) GetObjectRetention(_ context.Context, bucket, object, versionId
|
||||
data, err := p.meta.RetrieveAttribute(nil, bucket, object, objectRetentionKey)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if versionId != "" {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchVersion)
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -5067,6 +5324,9 @@ func (p *Posix) GetObjectRetention(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
|
||||
func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket, owner string) error {
|
||||
if !p.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
return auth.UpdateBucketACLOwner(ctx, p, bucket, owner)
|
||||
}
|
||||
|
||||
|
||||
@@ -285,11 +285,14 @@ func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (s3res
|
||||
out, err := s.client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return s3response.GetBucketVersioningOutput{}, handleError(err)
|
||||
}
|
||||
|
||||
return s3response.GetBucketVersioningOutput{
|
||||
Status: &out.Status,
|
||||
MFADelete: &out.MFADelete,
|
||||
}, handleError(err)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
|
||||
@@ -1445,7 +1448,7 @@ func (s *S3Proxy) PutBucketAcl(ctx context.Context, bucket string, data []byte)
|
||||
return handleError(s.putMetaBucketObj(ctx, bucket, data, metaPrefixAcl))
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
if bucket == s.metaBucket {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
@@ -1460,20 +1463,22 @@ func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, t
|
||||
}
|
||||
|
||||
_, err := s.client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Tagging: tagging,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
Tagging: tagging,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
if bucket == s.metaBucket {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
output, err := s.client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
@@ -1487,13 +1492,14 @@ func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object, versionId string) error {
|
||||
if bucket == s.metaBucket {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
_, err := s.client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
VersionId: &versionId,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
@@ -1558,7 +1564,7 @@ func (s *S3Proxy) GetObjectLockConfiguration(ctx context.Context, bucket string)
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
func (s *S3Proxy) PutObjectRetention(ctx context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
|
||||
@@ -15,24 +15,9 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
// ScoutfsOpts are the options for the ScoutFS backend
|
||||
@@ -41,6 +26,8 @@ type ScoutfsOpts struct {
|
||||
ChownUID bool
|
||||
// ChownGID sets the GID of the object to the GID of the user on PUT
|
||||
ChownGID bool
|
||||
// SetProjectID sets the Project ID of the bucket/object to the project ID of the user on PUT
|
||||
SetProjectID bool
|
||||
// BucketLinks enables symlinks to directories to be treated as buckets
|
||||
BucketLinks bool
|
||||
//VersioningDir sets the version directory to enable object versioning
|
||||
@@ -51,322 +38,10 @@ type ScoutfsOpts struct {
|
||||
GlacierMode bool
|
||||
// DisableNoArchive prevents setting noarchive on temporary files
|
||||
DisableNoArchive bool
|
||||
}
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// disableNoArchive is used to disable setting scoutam noarchive flag
|
||||
// on mutlipart parts. This is enabled by default to prevent archive
|
||||
// copies of temporary multipart parts.
|
||||
disableNoArchive bool
|
||||
// ValidateBucketNames enables minimal bucket name validation to prevent
|
||||
// incorrect access to the filesystem. This is only needed if the
|
||||
// frontend is not already validating bucket names.
|
||||
ValidateBucketNames bool
|
||||
}
|
||||
|
||||
var _ backend.Backend = &ScoutFS{}
|
||||
|
||||
const (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
systemPrefix = "scoutfs.hide."
|
||||
onameAttr = systemPrefix + "objname"
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
stagecopykey = systemPrefix + "sam_stagereq"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
_ = s.rootdir
|
||||
}
|
||||
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
|
||||
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
out, err := s.Posix.UploadPart(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.disableNoArchive {
|
||||
sum := sha256.Sum256([]byte(*input.Key))
|
||||
partPath := filepath.Join(
|
||||
*input.Bucket, // bucket
|
||||
posix.MetaTmpMultipartDir, // temp multipart dir
|
||||
fmt.Sprintf("%x", sum), // hashed objname
|
||||
*input.UploadId, // upload id
|
||||
fmt.Sprintf("%v", *input.PartNumber), // part number
|
||||
)
|
||||
|
||||
err = setNoArchive(partPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set noarchive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input, moveData)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
res, err := s.Posix.HeadObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
objPath := filepath.Join(*input.Bucket, *input.Key)
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
|
||||
requestOngoing = stageComplete
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
|
||||
res.Restore = &requestOngoing
|
||||
res.StorageClass = stclass
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Posix.GetObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
res, err := posixFileToObj(path, d)
|
||||
if err != nil || d.IsDir() {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
}
|
||||
|
||||
func setFlag(objname string, flag uint64) error {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | flag
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err = json.Marshal(&newflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.Set(objname, flagskey, b)
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
return setFlag(objname, Staging)
|
||||
}
|
||||
|
||||
func setNoArchive(objname string) error {
|
||||
return setFlag(objname, NoArchive)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -17,24 +17,70 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/scoutfs-go"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
*posix.Posix
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
// if file offline and staging, x-amz-restore: ongoing-request="true"
|
||||
// if file offline and not staging, x-amz-restore: ongoing-request="false"
|
||||
// if file online, x-amz-restore: ongoing-request="false", expiry-date="Fri, 2 Dec 2050 00:00:00 GMT"
|
||||
// note: this expiry-date is not used but provided for client glacier compatibility
|
||||
// ListObjects: if file offline, set obj storage class to GLACIER
|
||||
// RestoreObject: add batch stage request to file
|
||||
glaciermode bool
|
||||
|
||||
// disableNoArchive is used to disable setting scoutam noarchive flag
|
||||
// on multipart parts. This is enabled by default to prevent archive
|
||||
// copies of temporary multipart parts.
|
||||
disableNoArchive bool
|
||||
|
||||
// enable posix level bucket name validations, not needed if the
|
||||
// frontend handlers are already validating bucket names
|
||||
validateBucketName bool
|
||||
|
||||
// projectIDEnabled enables setting projectid of new buckets and objects
|
||||
// to the account project id when non-0
|
||||
projectIDEnabled bool
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
metastore := meta.XattrMeta{}
|
||||
|
||||
p, err := posix.New(rootdir, metastore, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
NewDirPerm: opts.NewDirPerm,
|
||||
VersioningDir: opts.VersioningDir,
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
BucketLinks: opts.BucketLinks,
|
||||
NewDirPerm: opts.NewDirPerm,
|
||||
VersioningDir: opts.VersioningDir,
|
||||
ValidateBucketNames: opts.ValidateBucketNames,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -45,50 +91,491 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("open %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
setProjectID := opts.SetProjectID
|
||||
if opts.SetProjectID {
|
||||
setProjectID = fGetFormatVersion(f).AtLeast(versionScoutFsV2)
|
||||
if !setProjectID {
|
||||
fmt.Println("WARNING:")
|
||||
fmt.Println("Disabling ProjectIDs for unsupported FS format version")
|
||||
fmt.Println("See documentation for format version upgrades")
|
||||
}
|
||||
}
|
||||
|
||||
return &ScoutFS{
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
glaciermode: opts.GlacierMode,
|
||||
disableNoArchive: opts.DisableNoArchive,
|
||||
projectIDEnabled: setProjectID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func moveData(from *os.File, to *os.File) error {
|
||||
// May fail if the files are not 4K aligned; check for alignment
|
||||
ffi, err := from.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat from: %v", err)
|
||||
}
|
||||
tfi, err := to.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat to: %v", err)
|
||||
}
|
||||
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
const (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
)
|
||||
|
||||
err = scoutfs.MoveData(from, to)
|
||||
if err != nil {
|
||||
debuglogger.Logf("ScoutFs MoveData failed: %v", err)
|
||||
}
|
||||
return err
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
systemPrefix = "scoutfs.hide."
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScoutAM Flags
|
||||
|
||||
// Staging - file requested stage
|
||||
Staging uint64 = 1 << iota
|
||||
// StageFail - all copies failed to stage
|
||||
StageFail
|
||||
// NoArchive - no archive copies of file should be made
|
||||
NoArchive
|
||||
// ExtCacheRequested means file policy requests Ext Cache
|
||||
ExtCacheRequested
|
||||
// ExtCacheDone means this file ext cache copy has been
|
||||
// created already (and possibly pruned, so may not exist)
|
||||
ExtCacheDone
|
||||
)
|
||||
|
||||
func (s *ScoutFS) Shutdown() {
|
||||
s.Posix.Shutdown()
|
||||
s.rootfd.Close()
|
||||
}
|
||||
|
||||
func statMore(path string) (stat, error) {
|
||||
st, err := scoutfs.StatMore(path)
|
||||
if err != nil {
|
||||
return stat{}, err
|
||||
}
|
||||
var s stat
|
||||
|
||||
s.Meta_seq = st.Meta_seq
|
||||
s.Data_seq = st.Data_seq
|
||||
s.Data_version = st.Data_version
|
||||
s.Online_blocks = st.Online_blocks
|
||||
s.Offline_blocks = st.Offline_blocks
|
||||
s.Crtime_sec = st.Crtime_sec
|
||||
s.Crtime_nsec = st.Crtime_nsec
|
||||
|
||||
return s, nil
|
||||
func (*ScoutFS) String() string {
|
||||
return "ScoutFS Gateway"
|
||||
}
|
||||
|
||||
func (s *ScoutFS) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
err := s.Posix.CreateBucket(ctx, input, acl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.projectIDEnabled {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
if !isValidProjectID(acct.ProjectID) {
|
||||
// early return to avoid the open if we dont have a valid
|
||||
// project id
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(*input.Bucket)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("create bucket %q set project id - open: %v",
|
||||
*input.Bucket, err))
|
||||
return nil
|
||||
}
|
||||
|
||||
err = s.setProjectID(f, acct.ProjectID)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("create bucket %q set project id: %v",
|
||||
*input.Bucket, err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
res, err := s.Posix.HeadObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
objPath := filepath.Join(*input.Bucket, *input.Key)
|
||||
|
||||
stclass := types.StorageClassStandard
|
||||
requestOngoing := ""
|
||||
|
||||
requestOngoing = stageComplete
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
stclass = types.StorageClassGlacier
|
||||
requestOngoing = stageNotInProgress
|
||||
|
||||
ok, err := isStaging(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("check stage status: %w", err)
|
||||
}
|
||||
if ok {
|
||||
requestOngoing = stageInProgress
|
||||
}
|
||||
}
|
||||
|
||||
res.Restore = &requestOngoing
|
||||
res.StorageClass = stclass
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.PutObjectWithPostFunc(ctx, po, func(f *os.File) error {
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("put object %v/%v set project id: %v",
|
||||
filepath.Join(*po.Bucket, *po.Key), acct.ProjectID, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.UploadPartWithPostFunc(ctx, input,
|
||||
func(f *os.File) error {
|
||||
if !s.disableNoArchive {
|
||||
err := setNoArchive(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set noarchive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := s.setProjectID(f, acct.ProjectID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id %v: %w", acct.ProjectID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input,
|
||||
func(from *os.File, to *os.File) error {
|
||||
// May fail if the files are not 4K aligned; check for alignment
|
||||
ffi, err := from.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat from: %w", err)
|
||||
}
|
||||
tfi, err := to.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu stat to: %w", err)
|
||||
}
|
||||
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
err = s.setProjectID(to, acct.ProjectID)
|
||||
if err != nil {
|
||||
debuglogger.InernalError(fmt.Errorf("complete-mpu %q/%q set project id %v: %v",
|
||||
*input.Bucket, *input.Key, acct.ProjectID, err))
|
||||
}
|
||||
|
||||
err = scoutfs.MoveData(from, to)
|
||||
if err != nil {
|
||||
return fmt.Errorf("complete-mpu movedata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ScoutFS) isBucketValid(bucket string) bool {
|
||||
if !s.validateBucketName {
|
||||
return true
|
||||
}
|
||||
|
||||
return backend.IsValidDirectoryName(bucket)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, "/") && !fi.IsDir() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectState)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Posix.GetObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
res, err := posixFileToObj(path, d)
|
||||
if err != nil || d.IsDir() {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if !s.isBucketValid(bucket) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
err = setStaging(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stage object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStaging(objname string) (bool, error) {
|
||||
b, err := xattr.Get(objname, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var flags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return flags&Staging == Staging, nil
|
||||
}
|
||||
|
||||
func setFlag(objname string, flag uint64) error {
|
||||
f, err := os.Open(objname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return fsetFlag(f, flag)
|
||||
}
|
||||
|
||||
func fsetFlag(f *os.File, flag uint64) error {
|
||||
b, err := xattr.FGet(f, flagskey)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var oldflags uint64
|
||||
if !isNoAttr(err) {
|
||||
err = json.Unmarshal(b, &oldflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newflags := oldflags | flag
|
||||
|
||||
if newflags == oldflags {
|
||||
// no flags change, just return
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err = json.Marshal(&newflags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return xattr.FSet(f, flagskey, b)
|
||||
}
|
||||
|
||||
func setStaging(objname string) error {
|
||||
return setFlag(objname, Staging)
|
||||
}
|
||||
|
||||
func setNoArchive(f *os.File) error {
|
||||
return fsetFlag(f, NoArchive)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ScoutFS) setProjectID(f *os.File, proj int) error {
|
||||
if s.projectIDEnabled && isValidProjectID(proj) {
|
||||
err := scoutfs.SetProjectID(f, uint64(proj))
|
||||
if err != nil {
|
||||
return fmt.Errorf("set project id: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidProjectID(proj int) bool {
|
||||
return proj > 0
|
||||
}
|
||||
|
||||
const (
|
||||
sysscoutfs = "/sys/fs/scoutfs/"
|
||||
formatversion = "format_version"
|
||||
)
|
||||
|
||||
// GetFormatVersion returns ScoutFS version reported by sysfs
|
||||
func fGetFormatVersion(f *os.File) scoutFsVersion {
|
||||
fsid, err := scoutfs.GetIDs(f)
|
||||
if err != nil {
|
||||
return versionScoutFsNotScoutFS
|
||||
}
|
||||
|
||||
path := filepath.Join(sysscoutfs, fsid.ShortID, formatversion)
|
||||
buf, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
str := strings.TrimSpace(string(buf))
|
||||
vers, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return versionScoutFsUnknown
|
||||
}
|
||||
|
||||
return scoutFsVersion(vers)
|
||||
}
|
||||
|
||||
const (
|
||||
// versionScoutFsUnknown is unknown version
|
||||
versionScoutFsUnknown scoutFsVersion = iota
|
||||
// versionScoutFsV1 is version 1
|
||||
versionScoutFsV1
|
||||
// versionScoutFsV2 is version 2
|
||||
versionScoutFsV2
|
||||
// versionScoutFsMin is minimum scoutfs version
|
||||
versionScoutFsMin = versionScoutFsV1
|
||||
// versionScoutFsMax is maximum scoutfs version
|
||||
versionScoutFsMax = versionScoutFsV2
|
||||
// versionScoutFsNotScoutFS means the target FS is not scoutfs
|
||||
versionScoutFsNotScoutFS = versionScoutFsMax + 1
|
||||
)
|
||||
|
||||
// scoutFsVersion version
|
||||
type scoutFsVersion int
|
||||
|
||||
// AtLeast returns true if version is valid and at least b
|
||||
func (a scoutFsVersion) AtLeast(b scoutFsVersion) bool {
|
||||
return a.IsValid() && a >= b
|
||||
}
|
||||
|
||||
func (a scoutFsVersion) IsValid() bool {
|
||||
return a >= versionScoutFsMin && a <= versionScoutFsMax
|
||||
}
|
||||
|
||||
@@ -17,23 +17,15 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/versity/versitygw/backend"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
backend.BackendUnsupported
|
||||
}
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("scoutfs only available on linux")
|
||||
}
|
||||
|
||||
var (
|
||||
errNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
func moveData(_, _ *os.File) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
func statMore(_ string) (stat, error) {
|
||||
return stat{}, errNotSupported
|
||||
}
|
||||
|
||||
@@ -82,6 +82,11 @@ func adminCommand() *cli.Command {
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -115,6 +120,11 @@ func adminCommand() *cli.Command {
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -214,7 +224,7 @@ func initHTTPClient() *http.Client {
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
userID, groupID := ctx.Int("user-id"), ctx.Int("group-id")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("project-id")
|
||||
if access == "" || secret == "" {
|
||||
return fmt.Errorf("invalid input parameters for the new user access/secret keys")
|
||||
}
|
||||
@@ -223,11 +233,12 @@ func createUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
acc := auth.Account{
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
Role: auth.Role(role),
|
||||
UserID: userID,
|
||||
GroupID: groupID,
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
Role: auth.Role(role),
|
||||
UserID: userID,
|
||||
GroupID: groupID,
|
||||
ProjectID: projectID,
|
||||
}
|
||||
|
||||
accxml, err := xml.Marshal(acc)
|
||||
@@ -316,7 +327,14 @@ func deleteUser(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func updateUser(ctx *cli.Context) error {
|
||||
access, secret, userId, groupId, role := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id"), auth.Role(ctx.String("role"))
|
||||
access, secret, userId, groupId, projectID, role :=
|
||||
ctx.String("access"),
|
||||
ctx.String("secret"),
|
||||
ctx.Int("user-id"),
|
||||
ctx.Int("group-id"),
|
||||
ctx.Int("projectID"),
|
||||
auth.Role(ctx.String("role"))
|
||||
|
||||
props := auth.MutableProps{}
|
||||
if ctx.IsSet("role") {
|
||||
if !role.IsValid() {
|
||||
@@ -333,6 +351,9 @@ func updateUser(ctx *cli.Context) error {
|
||||
if ctx.IsSet("group-id") {
|
||||
props.GroupID = &groupId
|
||||
}
|
||||
if ctx.IsSet("project-id") {
|
||||
props.ProjectID = &projectID
|
||||
}
|
||||
|
||||
propsxml, err := xml.Marshal(props)
|
||||
if err != nil {
|
||||
@@ -433,10 +454,10 @@ const (
|
||||
func printAcctTable(accs []auth.Account) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
fmt.Fprintln(w, "Account\tRole\tUserID\tGroupID")
|
||||
fmt.Fprintln(w, "-------\t----\t------\t-------")
|
||||
fmt.Fprintln(w, "Account\tRole\tUserID\tGroupID\tProjectID")
|
||||
fmt.Fprintln(w, "-------\t----\t------\t-------\t---------")
|
||||
for _, acc := range accs {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", acc.Access, acc.Role, acc.UserID, acc.GroupID)
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\n", acc.Access, acc.Role, acc.UserID, acc.GroupID, acc.ProjectID)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
@@ -33,56 +32,63 @@ import (
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
var (
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
rabbitmqURL, rabbitmqExchange string
|
||||
rabbitmqRoutingKey string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
virtualDomain string
|
||||
debug bool
|
||||
keepAlive bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
ldapUserIdAtr, ldapGroupIdAtr string
|
||||
vaultEndpointURL, vaultSecretStoragePath string
|
||||
vaultAuthMethod, vaultMountPath string
|
||||
vaultRootToken, vaultRoleId string
|
||||
vaultRoleSecret, vaultServerCert string
|
||||
vaultClientCert, vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
ipaHost, ipaVaultName string
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure bool
|
||||
iamDebug bool
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
rabbitmqURL, rabbitmqExchange string
|
||||
rabbitmqRoutingKey string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL, accessLog string
|
||||
adminLogFile string
|
||||
healthPath string
|
||||
virtualDomain string
|
||||
debug bool
|
||||
keepAlive bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
disableStrictBucketNames bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
ldapUserIdAtr, ldapGroupIdAtr string
|
||||
ldapProjectIdAtr string
|
||||
ldapTLSSkipVerify bool
|
||||
vaultEndpointURL, vaultNamespace string
|
||||
vaultSecretStoragePath string
|
||||
vaultSecretStorageNamespace string
|
||||
vaultAuthMethod, vaultAuthNamespace string
|
||||
vaultMountPath string
|
||||
vaultRootToken, vaultRoleId string
|
||||
vaultRoleSecret, vaultServerCert string
|
||||
vaultClientCert, vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
ipaHost, ipaVaultName string
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure bool
|
||||
iamDebug bool
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -400,24 +406,54 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_IAM_LDAP_GROUP_ID_ATR"},
|
||||
Destination: &ldapGroupIdAtr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-project-id-atr",
|
||||
Usage: "ldap server user project id attribute name",
|
||||
EnvVars: []string{"VGW_IAM_LDAP_PROJECT_ID_ATR"},
|
||||
Destination: &ldapProjectIdAtr,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "iam-ldap-tls-skip-verify",
|
||||
Usage: "disable TLS certificate verification for LDAP connections (insecure, for self-signed certificates)",
|
||||
EnvVars: []string{"VGW_IAM_LDAP_TLS_SKIP_VERIFY"},
|
||||
Destination: &ldapTLSSkipVerify,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-endpoint-url",
|
||||
Usage: "vault server url",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_ENDPOINT_URL"},
|
||||
Destination: &vaultEndpointURL,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-namespace",
|
||||
Usage: "vault server namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_NAMESPACE"},
|
||||
Destination: &vaultNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-secret-storage-path",
|
||||
Usage: "vault server secret storage path",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_PATH"},
|
||||
Destination: &vaultSecretStoragePath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-secret-storage-namespace",
|
||||
Usage: "vault server secret storage namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_NAMESPACE"},
|
||||
Destination: &vaultSecretStorageNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-auth-method",
|
||||
Usage: "vault server auth method",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_AUTH_METHOD"},
|
||||
Destination: &vaultAuthMethod,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-auth-namespace",
|
||||
Usage: "vault server auth namespace",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_AUTH_NAMESPACE"},
|
||||
Destination: &vaultAuthNamespace,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-mount-path",
|
||||
Usage: "vault server mount path",
|
||||
@@ -537,6 +573,12 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_READ_ONLY"},
|
||||
Destination: &readonly,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-strict-bucket-names",
|
||||
Usage: "allow relaxed bucket naming (disables strict validation checks)",
|
||||
EnvVars: []string{"VGW_DISABLE_STRICT_BUCKET_NAMES"},
|
||||
Destination: &disableStrictBucketNames,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "metrics-service-name",
|
||||
Usage: "service name tag for metrics, hostname if blank",
|
||||
@@ -596,6 +638,8 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("root user access and secret key must be provided")
|
||||
}
|
||||
|
||||
utils.SetBucketNameValidationStrict(!disableStrictBucketNames)
|
||||
|
||||
if pprof != "" {
|
||||
// listen on specified port for pprof debug
|
||||
// point browser to http://<ip:port>/debug/pprof/
|
||||
@@ -604,15 +648,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
}()
|
||||
}
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
StreamRequestBody: true,
|
||||
DisableKeepalive: !keepAlive,
|
||||
Network: fiber.NetworkTCP,
|
||||
DisableStartupMessage: true,
|
||||
})
|
||||
|
||||
var opts []s3api.Option
|
||||
|
||||
if certFile != "" || keyFile != "" {
|
||||
@@ -644,11 +679,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
if virtualDomain != "" {
|
||||
opts = append(opts, s3api.WithHostStyle(virtualDomain))
|
||||
}
|
||||
|
||||
if keepAlive {
|
||||
opts = append(opts, s3api.WithKeepAlive())
|
||||
}
|
||||
if debug {
|
||||
debuglogger.SetDebugEnabled()
|
||||
}
|
||||
|
||||
if iamDebug {
|
||||
debuglogger.SetIAMDebugEnabled()
|
||||
}
|
||||
@@ -659,41 +695,46 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
Secret: rootUserSecret,
|
||||
Role: auth.RoleAdmin,
|
||||
},
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
LDAPUserIdAtr: ldapUserIdAtr,
|
||||
LDAPGroupIdAtr: ldapGroupIdAtr,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultAuthMethod: vaultAuthMethod,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
VaultRoleSecret: vaultRoleSecret,
|
||||
VaultServerCert: vaultServerCert,
|
||||
VaultClientCert: vaultClientCert,
|
||||
VaultClientCertKey: vaultClientCertKey,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
IpaHost: ipaHost,
|
||||
IpaVaultName: ipaVaultName,
|
||||
IpaUser: ipaUser,
|
||||
IpaPassword: ipaPassword,
|
||||
IpaInsecure: ipaInsecure,
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
LDAPUserIdAtr: ldapUserIdAtr,
|
||||
LDAPGroupIdAtr: ldapGroupIdAtr,
|
||||
LDAPProjectIdAtr: ldapProjectIdAtr,
|
||||
LDAPTLSSkipVerify: ldapTLSSkipVerify,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultNamespace: vaultNamespace,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultSecretStorageNamespace: vaultSecretStorageNamespace,
|
||||
VaultAuthMethod: vaultAuthMethod,
|
||||
VaultAuthNamespace: vaultAuthNamespace,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
VaultRoleSecret: vaultRoleSecret,
|
||||
VaultServerCert: vaultServerCert,
|
||||
VaultClientCert: vaultClientCert,
|
||||
VaultClientCertKey: vaultClientCertKey,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
IpaHost: ipaHost,
|
||||
IpaVaultName: ipaVaultName,
|
||||
IpaUser: ipaUser,
|
||||
IpaPassword: ipaPassword,
|
||||
IpaInsecure: ipaInsecure,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
@@ -733,7 +774,7 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
return fmt.Errorf("init bucket event notifications: %w", err)
|
||||
}
|
||||
|
||||
srv, err := s3api.New(app, be, middlewares.RootUserConfig{
|
||||
srv, err := s3api.New(be, middlewares.RootUserConfig{
|
||||
Access: rootUserAccess,
|
||||
Secret: rootUserSecret,
|
||||
}, port, region, iam, loggers.S3Logger, loggers.AdminLogger, evSender, metricsManager, opts...)
|
||||
@@ -744,13 +785,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
var admSrv *s3api.S3AdminServer
|
||||
|
||||
if admPort != "" {
|
||||
admApp := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
Network: fiber.NetworkTCP,
|
||||
DisableStartupMessage: true,
|
||||
})
|
||||
|
||||
var opts []s3api.AdminOpt
|
||||
|
||||
if admCertFile != "" || admKeyFile != "" {
|
||||
@@ -774,7 +808,7 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
opts = append(opts, s3api.WithAdminDebug())
|
||||
}
|
||||
|
||||
admSrv = s3api.NewAdminServer(admApp, be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, loggers.AdminLogger, opts...)
|
||||
admSrv = s3api.NewAdminServer(be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, loggers.AdminLogger, opts...)
|
||||
}
|
||||
|
||||
if !quiet {
|
||||
@@ -814,31 +848,36 @@ Loop:
|
||||
}
|
||||
saveErr := err
|
||||
|
||||
// first shut down the s3api and admin servers
|
||||
// as they have dependecy from other modules
|
||||
err = srv.ShutDown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown api server: %v\n", err)
|
||||
}
|
||||
|
||||
if admSrv != nil {
|
||||
err := admSrv.Shutdown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown admin server: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
be.Shutdown()
|
||||
|
||||
err = iam.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown iam: %v\n", err)
|
||||
}
|
||||
|
||||
if loggers.S3Logger != nil {
|
||||
err := loggers.S3Logger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown s3 logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
if loggers.AdminLogger != nil {
|
||||
err := loggers.AdminLogger.Shutdown()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "shutdown admin logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
@@ -846,9 +885,6 @@ Loop:
|
||||
if evSender != nil {
|
||||
err := evSender.Close()
|
||||
if err != nil {
|
||||
if saveErr == nil {
|
||||
saveErr = err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "close event sender: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,8 +32,9 @@ func pluginCommand() *cli.Command {
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "location of the config file",
|
||||
Usage: "location of the plugin config file",
|
||||
Aliases: []string{"c"},
|
||||
EnvVars: []string{"VGW_PLUGIN_CONFIG"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -120,12 +120,13 @@ func runPosix(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
opts := posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
ForceNoTmpFile: forceNoTmpFile,
|
||||
ValidateBucketNames: disableStrictBucketNames,
|
||||
}
|
||||
|
||||
var ms meta.MetadataStorer
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
var (
|
||||
glacier bool
|
||||
disableNoArchive bool
|
||||
setProjectID bool
|
||||
)
|
||||
|
||||
func scoutfsCommand() *cli.Command {
|
||||
@@ -66,6 +67,12 @@ move interfaces as well as support for tiered filesystems.`,
|
||||
EnvVars: []string{"VGW_CHOWN_GID"},
|
||||
Destination: &chowngid,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "projectid",
|
||||
Usage: "set project id on newly created buckets, files, and directories to client account ProjectID",
|
||||
EnvVars: []string{"VGW_SET_PROJECT_ID"},
|
||||
Destination: &setProjectID,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bucketlinks",
|
||||
Usage: "allow symlinked directories at bucket level to be treated as buckets",
|
||||
@@ -113,6 +120,8 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
opts.NewDirPerm = fs.FileMode(dirPerms)
|
||||
opts.DisableNoArchive = disableNoArchive
|
||||
opts.VersioningDir = versioningDir
|
||||
opts.ValidateBucketNames = disableStrictBucketNames
|
||||
opts.SetProjectID = setProjectID
|
||||
|
||||
be, err := scoutfs.New(ctx.Args().Get(0), opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -39,6 +39,7 @@ var (
|
||||
versioningEnabled bool
|
||||
azureTests bool
|
||||
tlsStatus bool
|
||||
parallel bool
|
||||
)
|
||||
|
||||
func testCommand() *cli.Command {
|
||||
@@ -115,6 +116,12 @@ func initTestCommands() []*cli.Command {
|
||||
Destination: &azureTests,
|
||||
Aliases: []string{"azure"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "parallel",
|
||||
Usage: "executes the tests concurrently",
|
||||
Destination: ¶llel,
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -304,9 +311,9 @@ func initTestCommands() []*cli.Command {
|
||||
}, extractIntTests()...)
|
||||
}
|
||||
|
||||
type testFunc func(*integration.S3Conf)
|
||||
type testFunc func(*integration.TestState)
|
||||
|
||||
func getAction(tf testFunc) func(*cli.Context) error {
|
||||
func getAction(tf testFunc) func(ctx *cli.Context) error {
|
||||
return func(ctx *cli.Context) error {
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
@@ -329,12 +336,14 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
tf(s)
|
||||
ts := integration.NewTestState(ctx.Context, s, parallel)
|
||||
tf(ts)
|
||||
ts.Wait()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("RAN:", integration.RunCount, "PASS:", integration.PassCount, "FAIL:", integration.FailCount)
|
||||
if integration.FailCount > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount)
|
||||
fmt.Println("RAN:", integration.RunCount.Load(), "PASS:", integration.PassCount.Load(), "FAIL:", integration.FailCount.Load())
|
||||
if integration.FailCount.Load() > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount.Load())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -25,18 +26,39 @@ import (
|
||||
)
|
||||
|
||||
type Color string
|
||||
type prefix string
|
||||
|
||||
const (
|
||||
green Color = "\033[32m"
|
||||
yellow Color = "\033[33m"
|
||||
blue Color = "\033[34m"
|
||||
red Color = "\033[31m"
|
||||
Purple Color = "\033[0;35m"
|
||||
|
||||
prefixPanic prefix = "[PANIC]: "
|
||||
prefixInernalError prefix = "[INTERNAL ERROR]: "
|
||||
prefixInfo prefix = "[INFO]: "
|
||||
prefixDebug prefix = "[DEBUG]: "
|
||||
|
||||
reset = "\033[0m"
|
||||
borderChar = "─"
|
||||
boxWidth = 120
|
||||
)
|
||||
|
||||
// Panic prints the panics out in the console
|
||||
func Panic(er error) {
|
||||
printError(prefixPanic, er)
|
||||
}
|
||||
|
||||
// InernalError prints the internal error out in the console
|
||||
func InernalError(er error) {
|
||||
printError(prefixInernalError, er)
|
||||
}
|
||||
|
||||
func printError(prefix prefix, er error) {
|
||||
fmt.Fprintf(os.Stderr, string(red)+string(prefix)+"%v"+reset+"\n", er)
|
||||
}
|
||||
|
||||
// Logs http request details: headers, body, params, query args
|
||||
func LogFiberRequestDetails(ctx *fiber.Ctx) {
|
||||
// Log the full request url
|
||||
@@ -102,8 +124,8 @@ func Logf(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[DEBUG]: "
|
||||
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
|
||||
|
||||
fmt.Printf(string(yellow)+string(prefixDebug)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// Infof prints out green info block with [INFO]: prefix
|
||||
@@ -111,8 +133,8 @@ func Infof(format string, v ...any) {
|
||||
if !debugEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[INFO]: "
|
||||
fmt.Printf(string(green)+debugPrefix+format+reset+"\n", v...)
|
||||
|
||||
fmt.Printf(string(green)+string(prefixInfo)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
var debugIAMEnabled atomic.Bool
|
||||
@@ -133,8 +155,8 @@ func IAMLogf(format string, v ...any) {
|
||||
if !debugIAMEnabled.Load() {
|
||||
return
|
||||
}
|
||||
debugPrefix := "[DEBUG]: "
|
||||
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
|
||||
|
||||
fmt.Printf(string(yellow)+string(prefixDebug)+format+reset+"\n", v...)
|
||||
}
|
||||
|
||||
// PrintInsideHorizontalBorders prints the text inside horizontal
|
||||
|
||||
51
docker-entrypoint.sh
Normal file
51
docker-entrypoint.sh
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
BIN="${VGW_BINARY:-/usr/local/bin/versitygw}"
|
||||
|
||||
if [ ! -x "$BIN" ]; then
|
||||
echo "Entrypoint error: versitygw binary not found at $BIN" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If arguments were provided, run them directly for backward compatibility.
|
||||
if [ "$#" -gt 0 ]; then
|
||||
exec "$BIN" "$@"
|
||||
fi
|
||||
|
||||
backend="${VGW_BACKEND:-}"
|
||||
if [ -z "$backend" ]; then
|
||||
cat >&2 <<'EOF'
|
||||
No command arguments were provided and VGW_BACKEND is unset.
|
||||
Set VGW_BACKEND to one of: posix, scoutfs, s3, azure, plugin
|
||||
or pass explicit arguments to the container to run the versitygw command directly.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$backend" in
|
||||
posix|scoutfs|s3|azure|plugin)
|
||||
;;
|
||||
*)
|
||||
echo "VGW_BACKEND invalid backend (was '$backend')." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
set -- "$backend"
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARG:-}" ]; then
|
||||
set -- "$@" "$VGW_BACKEND_ARG"
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_BACKEND_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_BACKEND_ARGS}
|
||||
fi
|
||||
|
||||
if [ -n "${VGW_ARGS:-}" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
set -- "$@" ${VGW_ARGS}
|
||||
fi
|
||||
|
||||
exec "$BIN" "$@"
|
||||
@@ -23,7 +23,8 @@
|
||||
# VersityGW Required Options #
|
||||
##############################
|
||||
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, or s3
|
||||
# VGW_BACKEND must be defined, and must be one of: posix, scoutfs, s3, azure,
|
||||
# or plugin
|
||||
# This defines the backend that the VGW will use for data access.
|
||||
VGW_BACKEND=posix
|
||||
|
||||
@@ -119,6 +120,12 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# https://<VGW_ENDPOINT>/<bucket>
|
||||
#VGW_VIRTUAL_DOMAIN=
|
||||
|
||||
# By default, versitygw will enforce similar bucket naming rules as described
|
||||
# in https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
||||
# Set to true to allow legacy or non-DNS-compliant bucket names by skipping
|
||||
# strict validation checks.
|
||||
#VGW_DISABLE_STRICT_BUCKET_NAMES=false
|
||||
|
||||
###############
|
||||
# Access Logs #
|
||||
###############
|
||||
@@ -272,6 +279,11 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_IAM_LDAP_ROLE_ATR=
|
||||
#VGW_IAM_LDAP_USER_ID_ATR=
|
||||
#VGW_IAM_LDAP_GROUP_ID_ATR=
|
||||
# Disable TLS certificate verification for LDAP connections (insecure, allows
|
||||
# self-signed certificates). This should only be used in testing environments
|
||||
# or when using self-signed certificates. The default is false (verification
|
||||
# enabled).
|
||||
#VGW_IAM_LDAP_TLS_SKIP_VERIFY=false
|
||||
|
||||
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
|
||||
# in an external FreeIPA service. Currently the FreeIPA IAM service only
|
||||
@@ -433,6 +445,11 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_CHOWN_UID=false
|
||||
#VGW_CHOWN_GID=false
|
||||
|
||||
# The VGW_SET_PROJECT_ID option will enable setting account defined ProjectID
|
||||
# for newly created buckets, files, and directories if the account ProjectID
|
||||
# is greater than 0 and the filesystem format version supports project IDs.
|
||||
#VGW_SET_PROJECT_ID=false
|
||||
|
||||
# The VGW_BUCKET_LINKS option will enable the gateway to treat symbolic links
|
||||
# to directories at the top level gateway directory as buckets.
|
||||
#VGW_BUCKET_LINKS=false
|
||||
@@ -480,3 +497,48 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_S3_DISABLE_CHECKSUM=false
|
||||
#VGW_S3_SSL_SKIP_VERIFY=false
|
||||
#VGW_S3_DEBUG=false
|
||||
|
||||
########
|
||||
# azure #
|
||||
########
|
||||
|
||||
# The azure backend allows the gateway to store objects in Azure Blob Storage.
|
||||
# Buckets created through the gateway map to blob containers within the
|
||||
# configured storage account. This backend is useful when existing workflows
|
||||
# expect an S3-compatible interface while data resides in Azure.
|
||||
|
||||
# When the azure backend is selected, configure credentials with one of the
|
||||
# following approaches:
|
||||
# - Shared key: Define AZ_ACCOUNT_NAME with the storage account name and
|
||||
# AZ_ACCESS_KEY with the corresponding account key.
|
||||
# - SAS token: Set AZ_SAS_TOKEN to an account or container scoped SAS token.
|
||||
# Provide AZ_ENDPOINT if the token does not implicitly define the endpoint.
|
||||
# - Default Azure credentials: Leave AZ_ACCOUNT_NAME and AZ_ACCESS_KEY blank
|
||||
# and configure the standard Azure identity environment variables supported
|
||||
# by the DefaultAzureCredential chain (e.g. AZURE_CLIENT_ID, AZURE_TENANT_ID,
|
||||
# AZURE_CLIENT_SECRET, managed identity, etc.).
|
||||
# Use AZ_ENDPOINT to override the service URL (for example when targeting
|
||||
# Azurite or a sovereign cloud). If unset, it defaults to
|
||||
# https://<account>.blob.core.windows.net/ when an account name is provided.
|
||||
#AZ_ACCOUNT_NAME=
|
||||
#AZ_ACCESS_KEY=
|
||||
#AZ_SAS_TOKEN=
|
||||
#AZ_ENDPOINT=
|
||||
|
||||
##########
|
||||
# plugin #
|
||||
##########
|
||||
|
||||
# The plugin backend loads a Go plugin shared object that exposes a variable
|
||||
# named "Backend" of type *plugins.BackendPlugin. The gateway uses the
|
||||
# exported constructor to create the backend implementation at runtime.
|
||||
|
||||
# Set VGW_BACKEND_ARG to the absolute path of the compiled plugin (.so) file.
|
||||
# The path must be readable by the gateway service account and remain stable
|
||||
# across restarts.
|
||||
#VGW_BACKEND_ARG=/usr/lib/versitygw/plugins/example.so
|
||||
|
||||
# Provide the plugin-specific configuration file path via VGW_PLUGIN_CONFIG.
|
||||
# The gateway automatically forwards this value to the plugin backend when it
|
||||
# starts up.
|
||||
#VGW_PLUGIN_CONFIG=/etc/versitygw.d/example-plugin.conf
|
||||
|
||||
@@ -17,7 +17,7 @@ Group=root
|
||||
|
||||
EnvironmentFile=/etc/versitygw.d/%i.conf
|
||||
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3") ]]; then echo "VGW_BACKEND environment variable not set to one of posix, scoutfs, or s3"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
ExecStart=/bin/bash -c 'if [[ ! ("${VGW_BACKEND}" == "posix" || "${VGW_BACKEND}" == "scoutfs" || "${VGW_BACKEND}" == "s3" || "${VGW_BACKEND}" == "azure" || "${VGW_BACKEND}" == "plugin") ]]; then echo "VGW_BACKEND environment variable ${VGW_BACKEND} not set to valid backend type"; exit 1; fi && exec /usr/bin/versitygw "$VGW_BACKEND" "$VGW_BACKEND_ARG"'
|
||||
|
||||
# Let systemd restart this service always
|
||||
Restart=always
|
||||
|
||||
79
go.mod
79
go.mod
@@ -5,20 +5,21 @@ go 1.24.0
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1
|
||||
github.com/aws/smithy-go v1.23.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||
github.com/DataDog/datadog-go/v5 v5.8.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2
|
||||
github.com/aws/smithy-go v1.23.2
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-ldap/ldap/v3 v3.4.11
|
||||
github.com/go-ldap/ldap/v3 v3.4.12
|
||||
github.com/gofiber/fiber/v2 v2.52.9
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/nats-io/nats.go v1.45.0
|
||||
github.com/minio/crc64nvme v1.1.1
|
||||
github.com/nats-io/nats.go v1.47.0
|
||||
github.com/oklog/ulid/v2 v2.1.1
|
||||
github.com/pkg/xattr v0.4.12
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
@@ -26,28 +27,31 @@ require (
|
||||
github.com/smira/go-statsd v1.3.4
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/urfave/cli/v2 v2.27.7
|
||||
github.com/valyala/fasthttp v1.66.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sync v0.17.0
|
||||
golang.org/x/sys v0.36.0
|
||||
github.com/valyala/fasthttp v1.68.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/sys v0.38.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.1.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.11 // indirect
|
||||
@@ -56,32 +60,31 @@ require (
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.13.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.8
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.12
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.6
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.20
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.7
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
|
||||
163
go.sum
163
go.sum
@@ -1,68 +1,72 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
|
||||
github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=
|
||||
github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1 h1:dNhEwKaO3LJhGYKajl2DjobArfa5R9YF72z3Dy+PH3k=
|
||||
github.com/DataDog/datadog-go/v5 v5.7.1/go.mod h1:CA9Ih6tb3jtxk+ps1xvTnxmhjr7ldE8TiwrZyrm31ss=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/DataDog/datadog-go/v5 v5.8.1 h1:+GOES5W9zpKlhwHptZVW2C0NLVf7ilr7pHkDcbNvpIc=
|
||||
github.com/DataDog/datadog-go/v5 v5.8.1/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.8 h1:kQjtOLlTU4m4A64TsRcqwNChhGCwaPBt+zCQt/oWsHU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.8/go.mod h1:QPpc7IgljrKwH0+E6/KolCgr4WPLerURiU592AYzfSY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.12 h1:zmc9e1q90wMn8wQbjryy8IwA6Q4XlaL9Bx2zIqdNNbk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.12/go.mod h1:3VzdRDR5u3sSJRI4kYcOSIBbeYsgtVk7dG5R/U6qLWY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.6 h1:bByPm7VcaAgeT2+z5m0Lj5HDzm+g9AwbA3WFx2hPby0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.6/go.mod h1:PhTe8fR8aFW0wDc6IV9BHeIzXhpv3q6AaVHnqiv5Pyc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 h1:UCxq0X9O3xrlENdKf1r9eRJoKz/b0AfGkpp3a7FPlhg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7/go.mod h1:rHRoJUNUASj5Z/0eqI4w32vKvC7atoWR0jC+IkmVH8k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 h1:Y6DTZUn7ZUC4th9FMBbo8LVE+1fyq3ofw+tRwkUd3PY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7/go.mod h1:x3XE6vMnU9QvHN/Wrx2s44kwzV2o2g5x/siw4ZUJ9g8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 h1:BszAktdUo2xlzmYHjWMq70DqJ7cROM8iBd3f6hrpuMQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7/go.mod h1:XJ1yHki/P7ZPuG4fd3f0Pg/dSGA2cTQBCLw82MH2H48=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 h1:zmZ8qvtE9chfhBPuKB2aQFxW5F/rpwXUgmcVCgQzqRw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7/go.mod h1:vVYfbpd2l+pKqlSIDIOgouxNsGu5il9uDp0ooWb0jys=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 h1:u3VbDKUCWarWiU+aIUK4gjTr/wQFXV17y3hgNno9fcA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7/go.mod h1:/OuMQwhSyRapYxq6ZNpPer8juGNrB4P5Oz8bZ2cgjQE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1 h1:+RpGuaQ72qnU83qBKVwxkznewEdAGhIWo/PQCmkhhog=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.1/go.mod h1:xajPTguLoeQMAOE44AAP2RQoUhF8ey1g5IFHARv71po=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 h1:e0XBRn3AptQotkyBFrHAxFB8mDhAIOfsG+7KyJ0dg98=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8=
|
||||
github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
|
||||
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.20 h1:/jWF4Wu90EhKCgjTdy1DGxcbcbNrjfBHvksEL79tfQc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.20/go.mod h1:95Hh1Tc5VYKL9NJ7tAkDcqeKt+MCXQB1hQZaRdJIZE0=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24 h1:iJ2FmPT35EaIB0+kMa6TnQ+PwG5A1prEdAw+PsMzfHg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24/go.mod h1:U91+DrfjAiXPDEGYhh/x29o4p0qHX5HDqG7y5VViv64=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.7 h1:u8danF+A2Zv//pFZvj5V23v/6XG4AxuSVup5s6nxSnI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.7/go.mod h1:uvLIvU8iJPEU5so7b6lLDNArWpOX6sRBfL5wBABmlfc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 h1:eg/WYAa12vqTphzIdWMzqYRVKKnCboVPRlvaybNCqPA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13/go.mod h1:/FDdxWhz1486obGrKKC1HONd7krpk38LBt+dutLcN9k=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 h1:NvMjwvv8hpGUILarKw7Z4Q0w1H9anXKsesMxtw++MA4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4/go.mod h1:455WPHSwaGj2waRSpQp7TsnpOnBfw8iDfPfbwl7KPJE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 h1:zhBJXdhWIFZ1acfDYIhu4+LCzdUS2Vbcum7D01dXlHQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13/go.mod h1:JaaOeCE368qn2Hzi3sEzY6FgAZVCIYcC2nwbro2QCh8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 h1:DhdbtDl4FdNlj31+xiRXANxEE+eC7n8JQz+/ilwQ8Uc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2/go.mod h1:+wArOOrcHUevqdto9k1tKOF5++YTe9JEcPSc9Tx2ZSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 h1:NjShtS1t8r5LUfFVtFeI8xLAHQNTa7UI0VawXlrBMFQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 h1:gTsnx0xXNQ6SBbymoDvcoRHL+q4l/dAFsQuKfDWSaGc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 h1:HK5ON3KmQV2HcAunnx4sKLB9aPf3gKGwVAf7xnx0QT0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -72,8 +76,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo=
|
||||
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
|
||||
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
@@ -111,8 +115,10 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
|
||||
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -123,12 +129,14 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
|
||||
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.45.0 h1:/wGPbnYXDM0pLKFjZTX+2JOw9TQPoIgTFrUaH97giwA=
|
||||
github.com/nats-io/nats.go v1.45.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM=
|
||||
github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
|
||||
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
@@ -147,9 +155,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
@@ -176,10 +181,10 @@ github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
|
||||
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.66.0 h1:M87A0Z7EayeyNaV6pfO3tUTUiYO0dZfEJnRGXTVNuyU=
|
||||
github.com/valyala/fasthttp v1.66.0/go.mod h1:Y4eC+zwoocmXSVCB1JmhNbYtS7tZPRI2ztPB72EVObs=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/valyala/fasthttp v1.68.0 h1:v12Nx16iepr8r9ySOwqI+5RBJ/DqTxhOy1HrHoDFnok=
|
||||
github.com/valyala/fasthttp v1.68.0/go.mod h1:5EXiRfYQAoiO/khu4oU9VISC/eVY6JqmSpPJoHCKsz4=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b h1:kuqsuYRMG1c6YXBAQvWO7CiurlpYtjDJWI6oZ2K/ZZE=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240625221833-95fd765b760b/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
@@ -195,18 +200,18 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -217,15 +222,15 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
|
||||
11
runtests.sh
11
runtests.sh
@@ -16,7 +16,6 @@ ECHO "Generating TLS certificate and key in the cert.pem and key.pem files"
|
||||
openssl genpkey -algorithm RSA -out key.pem -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http"
|
||||
# run server in background not versioning-enabled
|
||||
# port: 7070(default)
|
||||
@@ -33,7 +32,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow --parallel; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_PID
|
||||
exit 1
|
||||
@@ -70,7 +69,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow; then
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow --parallel; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_HTTPS_PID
|
||||
exit 1
|
||||
@@ -90,7 +89,6 @@ fi
|
||||
|
||||
kill $GW_HTTPS_PID
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http against the versioning-enabled gateway"
|
||||
# run server in background versioning-enabled
|
||||
# port: 7072
|
||||
@@ -108,7 +106,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs --parallel; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_PID
|
||||
exit 1
|
||||
@@ -140,7 +138,7 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs; then
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs --parallel; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_HTTPS_PID
|
||||
exit 1
|
||||
@@ -162,4 +160,3 @@ exit 0
|
||||
# go tool covdata percent -i=/tmp/covdata
|
||||
# go tool covdata textfmt -i=/tmp/covdata -o profile.txt
|
||||
# go tool cover -html=profile.txt
|
||||
|
||||
|
||||
@@ -35,42 +35,42 @@ func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMSe
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user",
|
||||
controllers.ProcessHandlers(ctrl.CreateUser, metrics.ActionAdminCreateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminCreateUser),
|
||||
))
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user",
|
||||
controllers.ProcessHandlers(ctrl.DeleteUser, metrics.ActionAdminDeleteUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminDeleteUser),
|
||||
))
|
||||
|
||||
// UpdateUser admin api
|
||||
app.Patch("/update-user",
|
||||
controllers.ProcessHandlers(ctrl.UpdateUser, metrics.ActionAdminUpdateUser, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminUpdateUser),
|
||||
))
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users",
|
||||
controllers.ProcessHandlers(ctrl.ListUsers, metrics.ActionAdminListUsers, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListUsers),
|
||||
))
|
||||
|
||||
// ChangeBucketOwner admin api
|
||||
app.Patch("/change-bucket-owner",
|
||||
controllers.ProcessHandlers(ctrl.ChangeBucketOwner, metrics.ActionAdminChangeBucketOwner, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminChangeBucketOwner),
|
||||
))
|
||||
|
||||
// ListBucketsAndOwners admin api
|
||||
app.Patch("/list-buckets",
|
||||
controllers.ProcessHandlers(ctrl.ListBuckets, metrics.ActionAdminListBuckets, services,
|
||||
middlewares.VerifyV4Signature(root, iam, region),
|
||||
middlewares.VerifyV4Signature(root, iam, region, false, true),
|
||||
middlewares.IsAdmin(metrics.ActionAdminListBuckets),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/gofiber/fiber/v2/middleware/recover"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
@@ -36,9 +37,8 @@ type S3AdminServer struct {
|
||||
debug bool
|
||||
}
|
||||
|
||||
func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, opts ...AdminOpt) *S3AdminServer {
|
||||
func NewAdminServer(be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, opts ...AdminOpt) *S3AdminServer {
|
||||
server := &S3AdminServer{
|
||||
app: app,
|
||||
backend: be,
|
||||
router: new(S3AdminRouter),
|
||||
port: port,
|
||||
@@ -48,6 +48,22 @@ func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUse
|
||||
opt(server)
|
||||
}
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
Network: fiber.NetworkTCP,
|
||||
DisableStartupMessage: true,
|
||||
ErrorHandler: globalErrorHandler,
|
||||
})
|
||||
|
||||
server.app = app
|
||||
|
||||
app.Use(recover.New(
|
||||
recover.Config{
|
||||
EnableStackTrace: true,
|
||||
StackTraceHandler: stackTraceHandler,
|
||||
}))
|
||||
|
||||
// Logging middlewares
|
||||
if !server.quiet {
|
||||
app.Use(logger.New(logger.Config{
|
||||
@@ -84,3 +100,8 @@ func (sa *S3AdminServer) Serve() (err error) {
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
// ShutDown gracefully shuts down the server with a context timeout
|
||||
func (sa S3AdminServer) Shutdown() error {
|
||||
return sa.app.ShutdownWithTimeout(shutDownDuration)
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
||||
// panic("mock out the DeleteObject method")
|
||||
// },
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) error {
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) error {
|
||||
// panic("mock out the DeleteObjectTagging method")
|
||||
// },
|
||||
// DeleteObjectsFunc: func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
|
||||
@@ -101,7 +101,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) ([]byte, error) {
|
||||
// panic("mock out the GetObjectRetention method")
|
||||
// },
|
||||
// GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
// GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error) {
|
||||
// panic("mock out the GetObjectTagging method")
|
||||
// },
|
||||
// HeadBucketFunc: func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
@@ -161,10 +161,10 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string, config []byte) error {
|
||||
// panic("mock out the PutObjectLockConfiguration method")
|
||||
// },
|
||||
// PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, bypass bool, retention []byte) error {
|
||||
// PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error {
|
||||
// panic("mock out the PutObjectRetention method")
|
||||
// },
|
||||
// PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
// PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error {
|
||||
// panic("mock out the PutObjectTagging method")
|
||||
// },
|
||||
// RestoreObjectFunc: func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error {
|
||||
@@ -229,7 +229,7 @@ type BackendMock struct {
|
||||
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
|
||||
// DeleteObjectTaggingFunc mocks the DeleteObjectTagging method.
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) error
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) error
|
||||
|
||||
// DeleteObjectsFunc mocks the DeleteObjects method.
|
||||
DeleteObjectsFunc func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteResult, error)
|
||||
@@ -271,7 +271,7 @@ type BackendMock struct {
|
||||
GetObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) ([]byte, error)
|
||||
|
||||
// GetObjectTaggingFunc mocks the GetObjectTagging method.
|
||||
GetObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error)
|
||||
GetObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error)
|
||||
|
||||
// HeadBucketFunc mocks the HeadBucket method.
|
||||
HeadBucketFunc func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
@@ -331,10 +331,10 @@ type BackendMock struct {
|
||||
PutObjectLockConfigurationFunc func(contextMoqParam context.Context, bucket string, config []byte) error
|
||||
|
||||
// PutObjectRetentionFunc mocks the PutObjectRetention method.
|
||||
PutObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, bypass bool, retention []byte) error
|
||||
PutObjectRetentionFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error
|
||||
|
||||
// PutObjectTaggingFunc mocks the PutObjectTagging method.
|
||||
PutObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error
|
||||
PutObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error
|
||||
|
||||
// RestoreObjectFunc mocks the RestoreObject method.
|
||||
RestoreObjectFunc func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error
|
||||
@@ -452,6 +452,8 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
}
|
||||
// DeleteObjects holds details about calls to the DeleteObjects method.
|
||||
DeleteObjects []struct {
|
||||
@@ -560,6 +562,8 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
}
|
||||
// HeadBucket holds details about calls to the HeadBucket method.
|
||||
HeadBucket []struct {
|
||||
@@ -722,8 +726,6 @@ type BackendMock struct {
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
// Bypass is the bypass argument value.
|
||||
Bypass bool
|
||||
// Retention is the retention argument value.
|
||||
Retention []byte
|
||||
}
|
||||
@@ -735,6 +737,8 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// VersionId is the versionId argument value.
|
||||
VersionId string
|
||||
// Tags is the tags argument value.
|
||||
Tags map[string]string
|
||||
}
|
||||
@@ -1270,7 +1274,7 @@ func (mock *BackendMock) DeleteObjectCalls() []struct {
|
||||
}
|
||||
|
||||
// DeleteObjectTagging calls DeleteObjectTaggingFunc.
|
||||
func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bucket string, object string) error {
|
||||
func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string) error {
|
||||
if mock.DeleteObjectTaggingFunc == nil {
|
||||
panic("BackendMock.DeleteObjectTaggingFunc: method is nil but Backend.DeleteObjectTagging was just called")
|
||||
}
|
||||
@@ -1278,15 +1282,17 @@ func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bu
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
}
|
||||
mock.lockDeleteObjectTagging.Lock()
|
||||
mock.calls.DeleteObjectTagging = append(mock.calls.DeleteObjectTagging, callInfo)
|
||||
mock.lockDeleteObjectTagging.Unlock()
|
||||
return mock.DeleteObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
return mock.DeleteObjectTaggingFunc(contextMoqParam, bucket, object, versionId)
|
||||
}
|
||||
|
||||
// DeleteObjectTaggingCalls gets all the calls that were made to DeleteObjectTagging.
|
||||
@@ -1297,11 +1303,13 @@ func (mock *BackendMock) DeleteObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}
|
||||
mock.lockDeleteObjectTagging.RLock()
|
||||
calls = mock.calls.DeleteObjectTagging
|
||||
@@ -1794,7 +1802,7 @@ func (mock *BackendMock) GetObjectRetentionCalls() []struct {
|
||||
}
|
||||
|
||||
// GetObjectTagging calls GetObjectTaggingFunc.
|
||||
func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string) (map[string]string, error) {
|
||||
if mock.GetObjectTaggingFunc == nil {
|
||||
panic("BackendMock.GetObjectTaggingFunc: method is nil but Backend.GetObjectTagging was just called")
|
||||
}
|
||||
@@ -1802,15 +1810,17 @@ func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucke
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
}
|
||||
mock.lockGetObjectTagging.Lock()
|
||||
mock.calls.GetObjectTagging = append(mock.calls.GetObjectTagging, callInfo)
|
||||
mock.lockGetObjectTagging.Unlock()
|
||||
return mock.GetObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
return mock.GetObjectTaggingFunc(contextMoqParam, bucket, object, versionId)
|
||||
}
|
||||
|
||||
// GetObjectTaggingCalls gets all the calls that were made to GetObjectTagging.
|
||||
@@ -1821,11 +1831,13 @@ func (mock *BackendMock) GetObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
}
|
||||
mock.lockGetObjectTagging.RLock()
|
||||
calls = mock.calls.GetObjectTagging
|
||||
@@ -2554,7 +2566,7 @@ func (mock *BackendMock) PutObjectLockConfigurationCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObjectRetention calls PutObjectRetentionFunc.
|
||||
func (mock *BackendMock) PutObjectRetention(contextMoqParam context.Context, bucket string, object string, versionId string, bypass bool, retention []byte) error {
|
||||
func (mock *BackendMock) PutObjectRetention(contextMoqParam context.Context, bucket string, object string, versionId string, retention []byte) error {
|
||||
if mock.PutObjectRetentionFunc == nil {
|
||||
panic("BackendMock.PutObjectRetentionFunc: method is nil but Backend.PutObjectRetention was just called")
|
||||
}
|
||||
@@ -2563,20 +2575,18 @@ func (mock *BackendMock) PutObjectRetention(contextMoqParam context.Context, buc
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Bypass bool
|
||||
Retention []byte
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
Bypass: bypass,
|
||||
Retention: retention,
|
||||
}
|
||||
mock.lockPutObjectRetention.Lock()
|
||||
mock.calls.PutObjectRetention = append(mock.calls.PutObjectRetention, callInfo)
|
||||
mock.lockPutObjectRetention.Unlock()
|
||||
return mock.PutObjectRetentionFunc(contextMoqParam, bucket, object, versionId, bypass, retention)
|
||||
return mock.PutObjectRetentionFunc(contextMoqParam, bucket, object, versionId, retention)
|
||||
}
|
||||
|
||||
// PutObjectRetentionCalls gets all the calls that were made to PutObjectRetention.
|
||||
@@ -2588,7 +2598,6 @@ func (mock *BackendMock) PutObjectRetentionCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Bypass bool
|
||||
Retention []byte
|
||||
} {
|
||||
var calls []struct {
|
||||
@@ -2596,7 +2605,6 @@ func (mock *BackendMock) PutObjectRetentionCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Bypass bool
|
||||
Retention []byte
|
||||
}
|
||||
mock.lockPutObjectRetention.RLock()
|
||||
@@ -2606,7 +2614,7 @@ func (mock *BackendMock) PutObjectRetentionCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObjectTagging calls PutObjectTaggingFunc.
|
||||
func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucket string, object string, versionId string, tags map[string]string) error {
|
||||
if mock.PutObjectTaggingFunc == nil {
|
||||
panic("BackendMock.PutObjectTaggingFunc: method is nil but Backend.PutObjectTagging was just called")
|
||||
}
|
||||
@@ -2614,17 +2622,19 @@ func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucke
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionId: versionId,
|
||||
Tags: tags,
|
||||
}
|
||||
mock.lockPutObjectTagging.Lock()
|
||||
mock.calls.PutObjectTagging = append(mock.calls.PutObjectTagging, callInfo)
|
||||
mock.lockPutObjectTagging.Unlock()
|
||||
return mock.PutObjectTaggingFunc(contextMoqParam, bucket, object, tags)
|
||||
return mock.PutObjectTaggingFunc(contextMoqParam, bucket, object, versionId, tags)
|
||||
}
|
||||
|
||||
// PutObjectTaggingCalls gets all the calls that were made to PutObjectTagging.
|
||||
@@ -2635,12 +2645,14 @@ func (mock *BackendMock) PutObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
VersionId string
|
||||
Tags map[string]string
|
||||
}
|
||||
mock.lockPutObjectTagging.RLock()
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
@@ -42,7 +41,6 @@ type S3ApiController struct {
|
||||
|
||||
const (
|
||||
// time constants
|
||||
iso8601Format = "20060102T150405Z"
|
||||
iso8601TimeFormatExtended = "Mon Jan _2 15:04:05 2006"
|
||||
timefmt = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
|
||||
@@ -201,7 +199,7 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(serr, "", "", ""))
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Internal Error, %v\n", err)
|
||||
debuglogger.InernalError(err)
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
// If the error is not 's3err.APIError' return 'InternalError'
|
||||
@@ -209,12 +207,32 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
|
||||
// At this point, the S3 action has succeeded in the backend and
|
||||
// the event has already occurred. This means the S3 event must be sent,
|
||||
// even if unexpected issues arise while further parsing the response payload.
|
||||
if svc.EventSender != nil && opts.EventName != "" {
|
||||
svc.EventSender.SendEvent(ctx, s3event.EventMeta{
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
ObjectETag: opts.ObjectETag,
|
||||
VersionId: opts.VersionId,
|
||||
EventName: opts.EventName,
|
||||
})
|
||||
}
|
||||
|
||||
if opts.Status == 0 {
|
||||
opts.Status = http.StatusOK
|
||||
}
|
||||
|
||||
// if no data payload is provided, send the response status
|
||||
if response.Data == nil {
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, []byte{}, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
ctx.Status(opts.Status)
|
||||
return nil
|
||||
}
|
||||
@@ -228,6 +246,13 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
} else {
|
||||
if responseBytes, err = xml.Marshal(response.Data); err != nil {
|
||||
debuglogger.Logf("Internal Error, %v", err)
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, err, nil, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
return ctx.Status(http.StatusInternalServerError).Send(s3err.GetAPIErrorResponse(
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
@@ -237,29 +262,19 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
}
|
||||
}
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
if svc.EventSender != nil {
|
||||
svc.EventSender.SendEvent(ctx, s3event.EventMeta{
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
ObjectETag: opts.ObjectETag,
|
||||
VersionId: opts.VersionId,
|
||||
EventName: opts.EventName,
|
||||
})
|
||||
}
|
||||
|
||||
if ok {
|
||||
if len(responseBytes) > 0 {
|
||||
ctx.Response().Header.Set("Content-Length", fmt.Sprint(len(responseBytes)))
|
||||
}
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Send(responseBytes)
|
||||
}
|
||||
|
||||
@@ -267,6 +282,13 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
if msglen > maxXMLBodyLen {
|
||||
debuglogger.Logf("XML encoded body len %v exceeds max len %v",
|
||||
msglen, maxXMLBodyLen)
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, err, []byte{}, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(
|
||||
@@ -279,6 +301,14 @@ func ProcessController(ctx *fiber.Ctx, controller Controller, s3action string, s
|
||||
// Set the Content-Length header
|
||||
ctx.Response().Header.SetContentLength(msglen)
|
||||
|
||||
if svc.Logger != nil {
|
||||
svc.Logger.Log(ctx, nil, responseBytes, s3log.LogMeta{
|
||||
Action: s3action,
|
||||
BucketOwner: opts.BucketOwner,
|
||||
ObjectSize: opts.ObjectSize,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Send(res)
|
||||
}
|
||||
|
||||
@@ -287,6 +317,8 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers map[string]*string) {
|
||||
if headers == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Response().Header.DisableNormalizing()
|
||||
for key, val := range headers {
|
||||
if val == nil || *val == "" {
|
||||
continue
|
||||
|
||||
@@ -15,10 +15,13 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -42,6 +45,9 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
@@ -54,6 +60,17 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrAccessDenied)) {
|
||||
return &Response{
|
||||
// access denied for head object still returns region header
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -63,8 +80,8 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"X-Amz-Access-Point-Alias": utils.GetStringPtr("false"),
|
||||
"X-Amz-Bucket-Region": utils.GetStringPtr(region),
|
||||
"x-amz-access-point-alias": utils.GetStringPtr("false"),
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -48,6 +48,9 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
@@ -98,8 +101,8 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
Headers: map[string]*string{
|
||||
"X-Amz-Access-Point-Alias": utils.GetStringPtr("false"),
|
||||
"X-Amz-Bucket-Region": utils.GetStringPtr(region),
|
||||
"x-amz-access-point-alias": utils.GetStringPtr("false"),
|
||||
"x-amz-bucket-region": utils.GetStringPtr(region),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
|
||||
@@ -67,7 +67,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, dObj.Objects, bypass, IsBucketPublic, c.be)
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, dObj.Objects, bypass, IsBucketPublic, c.be, false)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -271,37 +270,6 @@ func (c S3ApiController) PutBucketCors(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
algo, checksusms, err := utils.ParseChecksumHeadersAndSdkAlgo(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
if algo != "" {
|
||||
rdr, err := utils.NewHashReader(bytes.NewReader(body), checksusms[algo], utils.HashType(strings.ToLower(string(algo))))
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// Pass the same body to avoid data duplication
|
||||
_, err = rdr.Read(body)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to read hash calculation data: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
err = c.be.PutBucketCors(ctx.Context(), bucket, body)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -384,6 +352,15 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.ValidateCannedACL(acl)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
ownership, err := c.be.GetBucketOwnershipControls(ctx.Context(), bucket)
|
||||
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound)) {
|
||||
return &Response{
|
||||
@@ -451,14 +428,6 @@ func (c S3ApiController) PutBucketAcl(ctx *fiber.Ctx) (*Response, error) {
|
||||
AccessControlPolicy: &accessControlPolicy,
|
||||
}
|
||||
} else if acl != "" {
|
||||
if acl != "private" && acl != "public-read" && acl != "public-read-write" {
|
||||
debuglogger.Logf("invalid acl: %q", acl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
if grants != "" {
|
||||
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
|
||||
grants, acl)
|
||||
@@ -532,14 +501,28 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
// validate the bucket name
|
||||
if ok := utils.IsValidBucketName(bucket); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
// validate bucket canned acl
|
||||
err := auth.ValidateCannedACL(acl)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// validate the object ownership value
|
||||
if ok := utils.IsValidOwnership(objectOwnership); !ok {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid x-amz-object-ownership header: %v", objectOwnership),
|
||||
@@ -565,6 +548,32 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrBothCannedAndHeaderGrants)
|
||||
}
|
||||
|
||||
var body s3response.CreateBucketConfiguration
|
||||
if len(ctx.Body()) != 0 {
|
||||
// request body is optional for CreateBucket
|
||||
err := xml.Unmarshal(ctx.Body(), &body)
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse the request body: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if body.LocationConstraint != "" {
|
||||
region := utils.ContextKeyRegion.Get(ctx).(string)
|
||||
if body.LocationConstraint != region {
|
||||
debuglogger.Logf("invalid location constraint: %s", body.LocationConstraint)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: acct.Access,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrInvalidLocationConstraint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defACL := auth.ACL{
|
||||
Owner: acct.Access,
|
||||
}
|
||||
@@ -594,6 +603,9 @@ func (c S3ApiController) CreateBucket(ctx *fiber.Ctx) (*Response, error) {
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: objectOwnership,
|
||||
ObjectLockEnabledForBucket: &lockEnabled,
|
||||
CreateBucketConfiguration: &types.CreateBucketConfiguration{
|
||||
Tags: body.TagSet,
|
||||
},
|
||||
}, updAcl)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -528,22 +528,6 @@ func TestS3ApiController_PutBucketCors(t *testing.T) {
|
||||
err: s3err.GetUnsopportedCORSMethodErr("invalid_method"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid checksum algo",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validBody,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Sdk-Checksum-Algorithm": "invalid_algo",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: "root"},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend error",
|
||||
input: testInput{
|
||||
@@ -711,6 +695,11 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
Role: auth.RoleUser,
|
||||
}
|
||||
|
||||
invLocConstBody, err := xml.Marshal(s3response.CreateBucketConfiguration{
|
||||
LocationConstraint: "us-west-1",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input testInput
|
||||
@@ -740,11 +729,62 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: adminAcc.Access,
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidBucketName),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "malformed body",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
},
|
||||
body: []byte("invalid_body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "invalid canned acl",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
},
|
||||
headers: map[string]string{
|
||||
"x-amz-acl": "invalid_acl",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidArgument),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid location constraint",
|
||||
input: testInput{
|
||||
locals: map[utils.ContextKey]any{
|
||||
utils.ContextKeyAccount: adminAcc,
|
||||
utils.ContextKeyRegion: "us-east-1",
|
||||
},
|
||||
body: invLocConstBody,
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{BucketOwner: adminAcc.Access},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidLocationConstraint),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ownership",
|
||||
input: testInput{
|
||||
@@ -757,7 +797,9 @@ func TestS3ApiController_CreateBucket(t *testing.T) {
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: adminAcc.Access,
|
||||
},
|
||||
},
|
||||
err: s3err.APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -1059,7 +1101,7 @@ func TestS3ApiController_PutBucketAcl(t *testing.T) {
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidRequest),
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidArgument),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,11 +30,17 @@ import (
|
||||
func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
isBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
action := auth.DeleteObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.DeleteObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
@@ -44,7 +50,7 @@ func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error)
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.DeleteObjectTaggingAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -55,7 +61,16 @@ func (c S3ApiController) DeleteObjectTagging(ctx *fiber.Ctx) (*Response, error)
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.DeleteObjectTagging(ctx.Context(), bucket, key)
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.DeleteObjectTagging(ctx.Context(), bucket, key, versionId)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
Status: http.StatusNoContent,
|
||||
@@ -124,7 +139,10 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
isBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
//TODO: check s3:DeleteObjectVersion policy in case a use tries to delete a version of an object
|
||||
action := auth.DeleteObjectAction
|
||||
if versionId != "" {
|
||||
action = auth.DeleteObjectVersionAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
@@ -135,7 +153,7 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.DeleteObjectAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -146,6 +164,15 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(
|
||||
ctx.Context(),
|
||||
bucket,
|
||||
@@ -159,6 +186,7 @@ func (c S3ApiController) DeleteObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
bypass,
|
||||
isBucketPublic,
|
||||
c.be,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
|
||||
@@ -45,6 +45,23 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -81,7 +98,7 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string) error {
|
||||
DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -99,7 +116,8 @@ func TestS3ApiController_DeleteObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
locals: tt.input.locals,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -206,6 +224,23 @@ func TestS3ApiController_DeleteObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object locked",
|
||||
input: testInput{
|
||||
@@ -289,7 +324,8 @@ func TestS3ApiController_DeleteObject(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
locals: tt.input.locals,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,11 +35,17 @@ import (
|
||||
func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
isPublicBucket := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
action := auth.GetObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.GetObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -48,7 +54,7 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.GetObjectTaggingAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isPublicBucket,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -59,7 +65,16 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectTagging(ctx.Context(), bucket, key)
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectTagging(ctx.Context(), bucket, key, versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -81,7 +96,7 @@ func (c S3ApiController) GetObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c S3ApiController) GetObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
@@ -113,6 +128,15 @@ func (c S3ApiController) GetObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectRetention(ctx.Context(), bucket, key, versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
@@ -160,6 +184,15 @@ func (c S3ApiController) GetObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
data, err := c.be.GetObjectLegalHold(ctx.Context(), bucket, key, versionId)
|
||||
return &Response{
|
||||
Data: auth.ParseObjectLegalHoldOutput(data),
|
||||
@@ -293,6 +326,11 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
isPublicBucket := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
|
||||
action := auth.GetObjectAttributesAction
|
||||
if versionId != "" {
|
||||
action = auth.GetObjectVersionAttributesAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -301,7 +339,7 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.GetObjectAttributesAction,
|
||||
Action: action,
|
||||
IsPublicRequest: isPublicBucket,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -312,6 +350,15 @@ func (c S3ApiController) GetObjectAttributes(ctx *fiber.Ctx) (*Response, error)
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// parse max parts
|
||||
maxParts, err := utils.ParseUint(maxPartsStr)
|
||||
if err != nil {
|
||||
@@ -455,6 +502,15 @@ func (c S3ApiController) GetObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
partNumber = &partNumberQuery
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// validate the checksum mode
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
|
||||
@@ -52,6 +52,23 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -95,7 +112,7 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string) (map[string]string, error) {
|
||||
GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string) (map[string]string, error) {
|
||||
return tt.input.beRes.(map[string]string), tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -113,8 +130,9 @@ func TestS3ApiController_GetObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -147,6 +165,23 @@ func TestS3ApiController_GetObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -218,8 +253,9 @@ func TestS3ApiController_GetObjectRetention(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -249,6 +285,23 @@ func TestS3ApiController_GetObjectLegalHold(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -305,8 +358,9 @@ func TestS3ApiController_GetObjectLegalHold(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -555,6 +609,23 @@ func TestS3ApiController_GetObjectAttributes(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid max parts",
|
||||
input: testInput{
|
||||
@@ -663,6 +734,7 @@ func TestS3ApiController_GetObjectAttributes(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -693,6 +765,23 @@ func TestS3ApiController_GetObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid checksum mode",
|
||||
input: testInput{
|
||||
@@ -757,7 +846,7 @@ func TestS3ApiController_GetObject(t *testing.T) {
|
||||
"Range": "100-200",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"versionId": "versionId",
|
||||
"versionId": "01BX5ZZKBKACTAV9WEVGEMMVRZ",
|
||||
},
|
||||
locals: defaultLocals,
|
||||
beRes: &s3.GetObjectOutput{
|
||||
|
||||
@@ -80,6 +80,15 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
partNumber = &partNumberQuery
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
checksumMode := types.ChecksumMode(strings.ToUpper(ctx.Get("x-amz-checksum-mode")))
|
||||
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
|
||||
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
|
||||
@@ -126,30 +135,31 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
return &Response{
|
||||
Headers: map[string]*string{
|
||||
"ETag": res.ETag,
|
||||
"x-amz-restore": res.Restore,
|
||||
"accept-ranges": res.AcceptRanges,
|
||||
"Content-Range": res.ContentRange,
|
||||
"Content-Disposition": res.ContentDisposition,
|
||||
"Content-Encoding": res.ContentEncoding,
|
||||
"Content-Language": res.ContentLanguage,
|
||||
"Cache-Control": res.CacheControl,
|
||||
"Content-Length": utils.ConvertPtrToStringPtr(res.ContentLength),
|
||||
"Content-Type": res.ContentType,
|
||||
"Expires": res.ExpiresString,
|
||||
"ETag": res.ETag,
|
||||
"Last-Modified": utils.FormatDatePtrToString(res.LastModified, timefmt),
|
||||
"x-amz-restore": res.Restore,
|
||||
"accept-ranges": res.AcceptRanges,
|
||||
"x-amz-checksum-crc32": res.ChecksumCRC32,
|
||||
"x-amz-checksum-crc64nvme": res.ChecksumCRC64NVME,
|
||||
"x-amz-checksum-crc32c": res.ChecksumCRC32C,
|
||||
"x-amz-checksum-sha1": res.ChecksumSHA1,
|
||||
"x-amz-checksum-sha256": res.ChecksumSHA256,
|
||||
"Content-Type": res.ContentType,
|
||||
"x-amz-version-id": res.VersionId,
|
||||
"Content-Length": utils.ConvertPtrToStringPtr(res.ContentLength),
|
||||
"x-amz-mp-parts-count": utils.ConvertPtrToStringPtr(res.PartsCount),
|
||||
"x-amz-object-lock-mode": utils.ConvertToStringPtr(res.ObjectLockMode),
|
||||
"x-amz-object-lock-legal-hold": utils.ConvertToStringPtr(res.ObjectLockLegalHoldStatus),
|
||||
"x-amz-storage-class": utils.ConvertToStringPtr(res.StorageClass),
|
||||
"x-amz-checksum-type": utils.ConvertToStringPtr(res.ChecksumType),
|
||||
"x-amz-object-lock-retain-until-date": utils.FormatDatePtrToString(res.ObjectLockRetainUntilDate, time.RFC3339),
|
||||
"Last-Modified": utils.FormatDatePtrToString(res.LastModified, timefmt),
|
||||
"x-amz-tagging-count": utils.ConvertPtrToStringPtr(res.TagCount),
|
||||
},
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
|
||||
@@ -51,13 +51,30 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid part number",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"partNumber": "-4",
|
||||
"versionId": "id",
|
||||
"versionId": "01BX5ZZKBKACTAV9WEVGEMMVRZ",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
@@ -147,6 +164,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
"x-amz-checksum-type": nil,
|
||||
"x-amz-object-lock-retain-until-date": nil,
|
||||
"Last-Modified": nil,
|
||||
"x-amz-tagging-count": nil,
|
||||
"Content-Type": utils.GetStringPtr("application/xml"),
|
||||
"Content-Length": utils.GetStringPtr("100"),
|
||||
},
|
||||
|
||||
@@ -278,7 +278,7 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrEmptyParts)
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
var mpuObjectSize *int64
|
||||
@@ -305,7 +305,7 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
mpuObjectSize = &val
|
||||
}
|
||||
|
||||
checksums, err := utils.ParseChecksumHeaders(ctx)
|
||||
checksums, err := utils.ParseCompleteMpChecksumHeaders(ctx)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
@@ -325,6 +325,15 @@ func (c S3ApiController) CompleteMultipartUpload(ctx *fiber.Ctx) (*Response, err
|
||||
|
||||
ifMatch, ifNoneMatch := utils.ParsePreconditionMatchHeaders(ctx)
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, isBucketPublic, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
res, versid, err := c.be.CompleteMultipartUpload(ctx.Context(),
|
||||
&s3.CompleteMultipartUploadInput{
|
||||
Bucket: &bucket,
|
||||
|
||||
@@ -404,7 +404,7 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrEmptyParts),
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -479,13 +479,30 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
err: s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object is locked",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
beRes: s3response.CompleteMultipartUploadResult{},
|
||||
locals: defaultLocals,
|
||||
body: validMpBody,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
beRes: s3response.CompleteMultipartUploadResult{},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -514,6 +531,7 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
headers: map[string]string{
|
||||
"X-Amz-Mp-Object-Size": "3",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -542,6 +560,12 @@ func TestS3ApiController_CompleteMultipartUpload(t *testing.T) {
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
|
||||
@@ -36,11 +36,17 @@ import (
|
||||
func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
bucket := ctx.Params("bucket")
|
||||
key := strings.TrimPrefix(ctx.Path(), fmt.Sprintf("/%s/", bucket))
|
||||
versionId := ctx.Query("versionId")
|
||||
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
|
||||
isRoot := utils.ContextKeyIsRoot.Get(ctx).(bool)
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
action := auth.PutObjectTaggingAction
|
||||
if versionId != "" {
|
||||
action = auth.PutObjectVersionTaggingAction
|
||||
}
|
||||
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
@@ -49,7 +55,7 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
Action: auth.PutObjectTaggingAction,
|
||||
Action: action,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -60,6 +66,15 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitObject)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
@@ -69,7 +84,7 @@ func (c S3ApiController) PutObjectTagging(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, key, tagging)
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, key, versionId, tagging)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -88,7 +103,7 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
@@ -98,7 +113,8 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
Object: key,
|
||||
Action: auth.PutObjectRetentionAction,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
}); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -106,20 +122,18 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if bypass {
|
||||
policy, err := c.be.GetBucketPolicy(ctx.Context(), bucket)
|
||||
if err != nil {
|
||||
bypass = false
|
||||
} else {
|
||||
if err := auth.VerifyBucketPolicy(policy, acct.Access, bucket, key, auth.BypassGovernanceRetentionAction); err != nil {
|
||||
bypass = false
|
||||
}
|
||||
}
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// parse the request body bytes into a go struct and validate
|
||||
retention, err := auth.ParseObjectLockRetentionInput(ctx.Body())
|
||||
if err != nil {
|
||||
debuglogger.Logf("failed to parse object lock configuration input: %v", err)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -127,7 +141,27 @@ func (c S3ApiController) PutObjectRetention(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.PutObjectRetention(ctx.Context(), bucket, key, versionId, bypass, retention)
|
||||
// check if the operation is allowed
|
||||
err = auth.IsObjectLockRetentionPutAllowed(ctx.Context(), c.be, bucket, key, versionId, acct.Access, retention, bypass)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
// parse the retention to JSON
|
||||
data, err := auth.ParseObjectLockRetentionInputToJSON(retention)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = c.be.PutObjectRetention(ctx.Context(), bucket, key, versionId, data)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -144,7 +178,7 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
IsBucketPublic := utils.ContextKeyPublicBucket.IsSet(ctx)
|
||||
parsedAcl := utils.ContextKeyParsedAcl.Get(ctx).(auth.ACL)
|
||||
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
@@ -154,7 +188,17 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
Object: key,
|
||||
Action: auth.PutObjectLegalHoldAction,
|
||||
IsPublicRequest: IsBucketPublic,
|
||||
}); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
err = utils.ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -181,7 +225,7 @@ func (c S3ApiController) PutObjectLegalHold(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, s3err.GetAPIError(s3err.ErrMalformedXML)
|
||||
}
|
||||
|
||||
err := c.be.PutObjectLegalHold(ctx.Context(), bucket, key, versionId, legalHold.Status == types.ObjectLockLegalHoldStatusOn)
|
||||
err = c.be.PutObjectLegalHold(ctx.Context(), bucket, key, versionId, legalHold.Status == types.ObjectLockLegalHoldStatusOn)
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
@@ -345,6 +389,15 @@ func (c S3ApiController) UploadPartCopy(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if len(ctx.Request().Body()) != 0 {
|
||||
debuglogger.Logf("expected empty request body")
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrNonEmptyRequestBody)
|
||||
}
|
||||
|
||||
if partNumber < minPartNumber || partNumber > maxPartNumber {
|
||||
debuglogger.Logf("invalid part number: %d", partNumber)
|
||||
return &Response{
|
||||
@@ -481,6 +534,15 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
if len(ctx.Request().Body()) != 0 {
|
||||
debuglogger.Logf("expected empty request body")
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, s3err.GetAPIError(s3err.ErrNonEmptyRequestBody)
|
||||
}
|
||||
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
if metaDirective != "" && metaDirective != types.MetadataDirectiveCopy && metaDirective != types.MetadataDirectiveReplace {
|
||||
@@ -522,6 +584,15 @@ func (c S3ApiController) CopyObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
|
||||
preconditionHdrs := utils.ParsePreconditionHeaders(ctx, utils.WithCopySource())
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, false, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
res, err := c.be.CopyObject(ctx.Context(),
|
||||
s3response.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
@@ -620,7 +691,7 @@ func (c S3ApiController) PutObject(ctx *fiber.Ctx) (*Response, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, IsBucketPublic, c.be)
|
||||
err = auth.CheckObjectAccess(ctx.Context(), bucket, acct.Access, []types.ObjectIdentifier{{Key: &key}}, true, IsBucketPublic, c.be, true)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
|
||||
@@ -64,6 +64,23 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -115,7 +132,7 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object string, tags map[string]string) error {
|
||||
PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket, object, versionId string, tags map[string]string) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
@@ -133,8 +150,9 @@ func TestS3ApiController_PutObjectTagging(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -171,6 +189,23 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -186,12 +221,29 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedXML),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "retention put not allowed",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
body: validRetentionBody,
|
||||
locals: defaultLocals,
|
||||
beErr: s3err.GetAPIError(s3err.ErrNoSuchBucket),
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -203,46 +255,11 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success bypass GetBucketPolicy fails",
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
headers: map[string]string{
|
||||
"X-Amz-Bypass-Governance-Retention": "true",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success bypass VerifyBucketPolicy fails",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockResp: []byte("invalid_policy"),
|
||||
headers: map[string]string{
|
||||
"X-Amz-Bypass-Governance-Retention": "true",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful response",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
body: validRetentionBody,
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -256,15 +273,14 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
be := &BackendMock{
|
||||
PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
|
||||
PutObjectRetentionFunc: func(contextMoqParam context.Context, bucket, object, versionId string, retention []byte) error {
|
||||
return tt.input.beErr
|
||||
},
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
if tt.input.extraMockResp == nil {
|
||||
return nil, tt.input.extraMockErr
|
||||
} else {
|
||||
return tt.input.extraMockResp.([]byte), tt.input.extraMockErr
|
||||
}
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetObjectRetentionFunc: func(contextMoqParam context.Context, bucket, object, versionId string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
}
|
||||
|
||||
@@ -281,6 +297,7 @@ func TestS3ApiController_PutObjectRetention(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -317,6 +334,23 @@ func TestS3ApiController_PutObjectLegalHold(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
queries: map[string]string{
|
||||
"versionId": "invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid request body",
|
||||
input: testInput{
|
||||
@@ -399,8 +433,9 @@ func TestS3ApiController_PutObjectLegalHold(t *testing.T) {
|
||||
tt.output.response,
|
||||
tt.output.err,
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
locals: tt.input.locals,
|
||||
body: tt.input.body,
|
||||
queries: tt.input.queries,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -598,6 +633,26 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrAccessDenied),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source: invalid versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object?versionId=invalid_versionId",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"partNumber": "2",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source",
|
||||
input: testInput{
|
||||
@@ -618,6 +673,27 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidCopySourceEncoding),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non empty request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
queries: map[string]string{
|
||||
"partNumber": "2",
|
||||
},
|
||||
body: []byte("body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNonEmptyRequestBody),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid part number",
|
||||
input: testInput{
|
||||
@@ -715,6 +791,7 @@ func TestS3ApiController_UploadPartCopy(t *testing.T) {
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
queries: tt.input.queries,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -836,6 +913,41 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidCopySourceBucket),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid copy source: versionId",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object?versionId=invalid_versionId",
|
||||
},
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidVersionId),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non empty request body",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
body: []byte("body"),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNonEmptyRequestBody),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid metadata directive",
|
||||
input: testInput{
|
||||
@@ -910,6 +1022,24 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object is locked",
|
||||
input: testInput{
|
||||
locals: defaultLocals,
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
MetaOpts: &MetaOptions{
|
||||
BucketOwner: "root",
|
||||
},
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrObjectLocked),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backend returns error",
|
||||
input: testInput{
|
||||
@@ -919,6 +1049,7 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
headers: map[string]string{
|
||||
"X-Amz-Copy-Source": "bucket/object",
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -949,6 +1080,7 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
ETag: utils.GetStringPtr("ETag"),
|
||||
},
|
||||
},
|
||||
extraMockErr: s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
output: testOutput{
|
||||
response: &Response{
|
||||
@@ -978,6 +1110,12 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
GetBucketPolicyFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
@@ -992,6 +1130,7 @@ func TestS3ApiController_CopyObject(t *testing.T) {
|
||||
ctxInputs{
|
||||
locals: tt.input.locals,
|
||||
headers: tt.input.headers,
|
||||
body: tt.input.body,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1193,6 +1332,9 @@ func TestS3ApiController_PutObject(t *testing.T) {
|
||||
GetObjectLockConfigurationFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
|
||||
return nil, tt.input.extraMockErr
|
||||
},
|
||||
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
|
||||
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := S3ApiController{
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// ParseAcl retreives the bucket acl and stores in the context locals
|
||||
@@ -42,6 +43,16 @@ func ParseAcl(be backend.Backend) fiber.Handler {
|
||||
parsedAcl.Owner = utils.ContextKeyRootAccessKey.Get(ctx).(string)
|
||||
}
|
||||
|
||||
// if expected bucket owner doesn't match the bucket owner
|
||||
// the gateway should return AccessDenied.
|
||||
// This header appears in all actions except 'CreateBucket' and 'ListBuckets'.
|
||||
// 'ParseACL' is also applied to all actions except for 'CreateBucket' and 'ListBuckets',
|
||||
// so it's a perfect place to check the expected bucket owner
|
||||
bucketOwner := ctx.Get("X-Amz-Expected-Bucket-Owner")
|
||||
if bucketOwner != "" && bucketOwner != parsedAcl.Owner {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
utils.ContextKeyParsedAcl.Set(ctx, parsedAcl)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,9 +17,7 @@ package middlewares
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -39,7 +37,7 @@ type RootUserConfig struct {
|
||||
Secret string
|
||||
}
|
||||
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string) fiber.Handler {
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string, streamBody bool, requireContentSha256 bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
@@ -52,9 +50,27 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
// Validate the dates difference
|
||||
err = utils.ValidateDate(tdate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authorization := ctx.Get("Authorization")
|
||||
if authorization == "" {
|
||||
return s3err.GetAPIError(s3err.ErrAuthHeaderEmpty)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidAuthHeader)
|
||||
}
|
||||
|
||||
authData, err := utils.ParseAuthorization(authorization)
|
||||
@@ -63,11 +79,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
}
|
||||
|
||||
if authData.Region != region {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", authData.Region),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
return s3err.MalformedAuth.IncorrectRegion(region, authData.Region)
|
||||
}
|
||||
|
||||
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
|
||||
@@ -80,29 +92,11 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
return err
|
||||
}
|
||||
|
||||
utils.ContextKeyAccount.Set(ctx, account)
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedDate)
|
||||
}
|
||||
|
||||
if date[:8] != authData.Date {
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
return s3err.MalformedAuth.DateMismatch()
|
||||
}
|
||||
|
||||
// Validate the dates difference
|
||||
err = utils.ValidateDate(tdate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.ContextKeyAccount.Set(ctx, account)
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
@@ -115,10 +109,13 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
}
|
||||
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if requireContentSha256 && hashPayload == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingContentSha256)
|
||||
}
|
||||
if !utils.IsValidSh256PayloadHeader(hashPayload) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidSHA256Paylod)
|
||||
}
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
if streamBody {
|
||||
// for streaming PUT actions, authorization is deferred
|
||||
// until end of stream due to need to get length and
|
||||
// checksum of the stream to validate authorization
|
||||
@@ -166,7 +163,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string)
|
||||
}
|
||||
}
|
||||
|
||||
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength)
|
||||
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
121
s3api/middlewares/checksum.go
Normal file
121
s3api/middlewares/checksum.go
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// VerifyChecksums parses, validates, and calculates the
|
||||
// Content-MD5 and x-amz-checksum-* headers.
|
||||
// Additionally, it ensures that the request body is not empty
|
||||
// for actions that require a non-empty body. For large data actions(PutObject, UploadPart),
|
||||
// it wraps the body reader to handle Content-MD5:
|
||||
// the x-amz-checksum-* headers are explicitly processed by the backend.
|
||||
func VerifyChecksums(streamBody bool, requireBody bool, requireChecksum bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
md5sum := ctx.Get("Content-Md5")
|
||||
|
||||
if streamBody {
|
||||
// for large data actions(PutObject, UploadPart)
|
||||
// only stack the md5 reader,as x-amz-checksum-*
|
||||
// calculation is explicitly handled in back-end
|
||||
if md5sum == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isValidMD5(md5sum) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
r, err = utils.NewHashReader(r, md5sum, utils.HashTypeMd5)
|
||||
return r
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
body := ctx.Body()
|
||||
if requireBody && len(body) == 0 {
|
||||
return s3err.GetAPIError(s3err.ErrMissingRequestBody)
|
||||
}
|
||||
|
||||
var rdr io.Reader
|
||||
var err error
|
||||
if md5sum != "" {
|
||||
if !isValidMD5(md5sum) {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
rdr, err = utils.NewHashReader(bytes.NewReader(body), md5sum, utils.HashTypeMd5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// parse and validate checksum headers
|
||||
algo, checksums, err := utils.ParseChecksumHeadersAndSdkAlgo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if algo != "" {
|
||||
r, err := utils.NewHashReader(bytes.NewReader(body), checksums[algo], utils.HashType(strings.ToLower(string(algo))))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rdr != nil {
|
||||
// combine both md5 and the checksum readers
|
||||
rdr = io.MultiReader(rdr, r)
|
||||
} else {
|
||||
rdr = r
|
||||
}
|
||||
}
|
||||
|
||||
if rdr == nil && requireChecksum {
|
||||
return s3err.GetAPIError(s3err.ErrChecksumRequired)
|
||||
}
|
||||
|
||||
if rdr != nil {
|
||||
_, err = io.Copy(io.Discard, rdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func isValidMD5(s string) bool {
|
||||
decoded, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(decoded) == 16
|
||||
}
|
||||
@@ -32,6 +32,10 @@ func HostStyleParser(virtualDomain string) fiber.Handler {
|
||||
return ctx.Next()
|
||||
}
|
||||
path := ctx.Path()
|
||||
if path == "/" {
|
||||
// omit the trailing / for bucket operations
|
||||
path = ""
|
||||
}
|
||||
pathStyleUrl := fmt.Sprintf("/%v%v", bucket, path)
|
||||
ctx.Path(pathStyleUrl)
|
||||
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"io"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyMD5Body() fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
incomingSum := ctx.Get("Content-Md5")
|
||||
if incomingSum == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
r, err = utils.NewHashReader(r, incomingSum, utils.HashTypeMd5)
|
||||
return r
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
sum := md5.Sum(ctx.Body())
|
||||
calculatedSum := utils.Base64SumString(sum[:])
|
||||
|
||||
if incomingSum != calculatedSum {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
41
s3api/middlewares/md5_test.go
Normal file
41
s3api/middlewares/md5_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_isValidMD5(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
want bool
|
||||
}{
|
||||
{"invalid", "hello world", false},
|
||||
{"valid base64", "aGVsbCBzLGRham5mamFuc2Zhc2RmZHNhZmRzYWY=", false},
|
||||
{"valid 1", "CY9rzUYh03PK3k6DJie09g==", true},
|
||||
{"valid 2", "uU0nuZNNPgilLlLX2n2r+s==", true},
|
||||
{"valid 3", "7Qdih1MuhjZehB6Sv8UNjA==", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := isValidMD5(tt.s)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region string) fiber.Handler {
|
||||
func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region string, streamBody bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
@@ -32,10 +32,15 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region
|
||||
if utils.ContextKeyPublicBucket.IsSet(ctx) {
|
||||
return nil
|
||||
}
|
||||
if ctx.Query("X-Amz-Signature") == "" {
|
||||
if !utils.IsPresignedURLAuth(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("X-Amz-Security-Token") {
|
||||
// OIDC Authorization with X-Amz-Security-Token is not supported
|
||||
return s3err.QueryAuthErrors.SecurityTokenNotSupported()
|
||||
}
|
||||
|
||||
// Set in the context the "authenticated" key, in case the authentication succeeds,
|
||||
// otherwise the middleware will return the caucht error
|
||||
utils.ContextKeyAuthenticated.Set(ctx, true)
|
||||
@@ -66,7 +71,7 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
if streamBody {
|
||||
// Content-Length has to be set for data uploads: PutObject, UploadPart
|
||||
if contentLengthStr == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingContentLength)
|
||||
@@ -83,7 +88,7 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, region
|
||||
return nil
|
||||
}
|
||||
|
||||
err = utils.CheckPresignedSignature(ctx, authData, account.Secret)
|
||||
err = utils.CheckPresignedSignature(ctx, authData, account.Secret, streamBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
@@ -28,10 +30,10 @@ import (
|
||||
|
||||
// AuthorizePublicBucketAccess checks if the bucket grants public
|
||||
// access to anonymous requesters
|
||||
func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPermission auth.Action, permission auth.Permission) fiber.Handler {
|
||||
func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPermission auth.Action, permission auth.Permission, region string, streamBody bool) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// skip for authenticated requests
|
||||
if ctx.Query("X-Amz-Algorithm") != "" || ctx.Get("Authorization") != "" {
|
||||
if utils.IsPresignedURLAuth(ctx) || ctx.Get("Authorization") != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -57,12 +59,27 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
bucket, object := parsePath(ctx.Path())
|
||||
err := auth.VerifyPublicAccess(ctx.Context(), be, policyPermission, permission, bucket, object)
|
||||
if err != nil {
|
||||
if s3action == metrics.ActionHeadBucket {
|
||||
// add the bucket region header for HeadBucket
|
||||
// if anonymous access is denied
|
||||
ctx.Response().Header.Add("x-amz-bucket-region", region)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
payloadType := ctx.Get("X-Amz-Content-Sha256")
|
||||
if utils.IsUnsignedStreamingPayload(payloadType) {
|
||||
// at this point the bucket is considered as public
|
||||
// as public access is granted
|
||||
utils.ContextKeyPublicBucket.Set(ctx, true)
|
||||
|
||||
payloadHash := ctx.Get("X-Amz-Content-Sha256")
|
||||
err = utils.IsAnonymousPayloadHashSupported(payloadHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if streamBody {
|
||||
if utils.IsUnsignedStreamingPayload(payloadHash) {
|
||||
// stack an unsigned streaming payload reader
|
||||
checksumType, err := utils.ExtractChecksumType(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -73,16 +90,34 @@ func AuthorizePublicBucketAccess(be backend.Backend, s3action string, policyPerm
|
||||
cr, err = utils.NewUnsignedChunkReader(r, checksumType)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
|
||||
}
|
||||
|
||||
return err
|
||||
} else if utils.IsUnsignedPaylod(payloadHash) {
|
||||
// for UNSIGNED-PAYLOD simply store the body reader in context locals
|
||||
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
|
||||
return nil
|
||||
} else {
|
||||
// stack a hash reader to calculated the payload sha256 hash
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewHashReader(r, payloadHash, utils.HashTypeSha256Hex)
|
||||
return cr
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
utils.ContextKeyPublicBucket.Set(ctx, true)
|
||||
if payloadHash != "" {
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if payloadHash != hexPayload {
|
||||
return s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
732
s3api/router.go
732
s3api/router.go
File diff suppressed because it is too large
Load Diff
@@ -16,20 +16,30 @@ package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/gofiber/fiber/v2/middleware/recover"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/metrics"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
const (
|
||||
shutDownDuration = time.Second * 10
|
||||
)
|
||||
|
||||
type S3ApiServer struct {
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
@@ -38,12 +48,12 @@ type S3ApiServer struct {
|
||||
cert *tls.Certificate
|
||||
quiet bool
|
||||
readonly bool
|
||||
keepAlive bool
|
||||
health string
|
||||
virtualDomain string
|
||||
}
|
||||
|
||||
func New(
|
||||
app *fiber.App,
|
||||
be backend.Backend,
|
||||
root middlewares.RootUserConfig,
|
||||
port, region string,
|
||||
@@ -55,7 +65,6 @@ func New(
|
||||
opts ...Option,
|
||||
) (*S3ApiServer, error) {
|
||||
server := &S3ApiServer{
|
||||
app: app,
|
||||
backend: be,
|
||||
router: new(S3ApiRouter),
|
||||
port: port,
|
||||
@@ -65,6 +74,25 @@ func New(
|
||||
opt(server)
|
||||
}
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
StreamRequestBody: true,
|
||||
DisableKeepalive: !server.keepAlive,
|
||||
Network: fiber.NetworkTCP,
|
||||
DisableStartupMessage: true,
|
||||
ErrorHandler: globalErrorHandler,
|
||||
})
|
||||
|
||||
server.app = app
|
||||
|
||||
// initialize the panic recovery middleware
|
||||
app.Use(recover.New(
|
||||
recover.Config{
|
||||
EnableStackTrace: true,
|
||||
StackTraceHandler: stackTraceHandler,
|
||||
}))
|
||||
|
||||
// Logging middlewares
|
||||
if !server.quiet {
|
||||
app.Use(logger.New(logger.Config{
|
||||
@@ -132,9 +160,60 @@ func WithHostStyle(virtualDomain string) Option {
|
||||
return func(s *S3ApiServer) { s.virtualDomain = virtualDomain }
|
||||
}
|
||||
|
||||
// WithKeepAlive enables the server keep alive
|
||||
func WithKeepAlive() Option {
|
||||
return func(s *S3ApiServer) { s.keepAlive = true }
|
||||
}
|
||||
|
||||
func (sa *S3ApiServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
// ShutDown gracefully shuts down the server with a context timeout
|
||||
func (sa *S3ApiServer) ShutDown() error {
|
||||
return sa.app.ShutdownWithTimeout(shutDownDuration)
|
||||
}
|
||||
|
||||
// stackTraceHandler stores the system panics
|
||||
// in the context locals
|
||||
func stackTraceHandler(ctx *fiber.Ctx, e any) {
|
||||
utils.ContextKeyStack.Set(ctx, e)
|
||||
}
|
||||
|
||||
// globalErrorHandler catches the errors before reaching to
|
||||
// the handlers and any system panics
|
||||
func globalErrorHandler(ctx *fiber.Ctx, er error) error {
|
||||
if utils.ContextKeyStack.IsSet(ctx) {
|
||||
// if stack is set, it means the stack trace
|
||||
// has caught a panic
|
||||
// log it as a panic log
|
||||
debuglogger.Panic(er)
|
||||
} else {
|
||||
// handle the fiber specific errors
|
||||
var fiberErr *fiber.Error
|
||||
if errors.As(er, &fiberErr) {
|
||||
if strings.Contains(fiberErr.Message, "cannot parse Content-Length") {
|
||||
ctx.Status(http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
if strings.Contains(fiberErr.Message, "error when reading request headers") {
|
||||
// This error means fiber failed to parse the incoming request
|
||||
// which is a malfoedmed one. Return a BadRequest in this case
|
||||
err := s3err.GetAPIError(s3err.ErrCannotParseHTTPRequest)
|
||||
ctx.Status(err.HTTPStatusCode)
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(err, "", "", ""))
|
||||
}
|
||||
}
|
||||
|
||||
// additionally log the internal error
|
||||
debuglogger.InernalError(er)
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusInternalServerError)
|
||||
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
|
||||
@@ -16,66 +16,12 @@ package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
type args struct {
|
||||
app *fiber.App
|
||||
be backend.Backend
|
||||
port string
|
||||
root middlewares.RootUserConfig
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
be := backend.BackendUnsupported{}
|
||||
router := S3ApiRouter{}
|
||||
port := ":7070"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantS3ApiServer *S3ApiServer
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Create S3 api server",
|
||||
args: args{
|
||||
app: app,
|
||||
be: be,
|
||||
port: port,
|
||||
root: middlewares.RootUserConfig{},
|
||||
},
|
||||
wantS3ApiServer: &S3ApiServer{
|
||||
app: app,
|
||||
port: port,
|
||||
router: &router,
|
||||
backend: be,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.root,
|
||||
tt.args.port, "us-east-1", &auth.IAMServiceInternal{}, nil, nil, nil, nil)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotS3ApiServer, tt.wantS3ApiServer) {
|
||||
t.Errorf("New() = %v, want %v", gotS3ApiServer, tt.wantS3ApiServer)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ApiServer_Serve(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -103,10 +103,10 @@ func (ar *AuthReader) validateSignature() error {
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedDate)
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
return CheckValidSignature(ar.ctx, ar.auth, ar.secret, hashPayload, tdate, int64(ar.size))
|
||||
return CheckValidSignature(ar.ctx, ar.auth, ar.secret, hashPayload, tdate, int64(ar.size), true)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -114,11 +114,11 @@ const (
|
||||
)
|
||||
|
||||
// CheckValidSignature validates the ctx v4 auth signature
|
||||
func CheckValidSignature(ctx *fiber.Ctx, auth AuthData, secret, checksum string, tdate time.Time, contentLen int64) error {
|
||||
func CheckValidSignature(ctx *fiber.Ctx, auth AuthData, secret, checksum string, tdate time.Time, contentLen int64, streamBody bool) error {
|
||||
signedHdrs := strings.Split(auth.SignedHeaders, ";")
|
||||
|
||||
// Create a new http request instance from fasthttp request
|
||||
req, err := createHttpRequestFromCtx(ctx, signedHdrs, contentLen)
|
||||
req, err := createHttpRequestFromCtx(ctx, signedHdrs, contentLen, streamBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create http request from context: %w", err)
|
||||
}
|
||||
@@ -184,54 +184,61 @@ func ParseAuthorization(authorization string) (AuthData, error) {
|
||||
}
|
||||
|
||||
if len(authParts) < 2 {
|
||||
return a, s3err.GetAPIError(s3err.ErrMissingFields)
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidAuthHeader)
|
||||
}
|
||||
|
||||
algo := authParts[0]
|
||||
|
||||
if algo != "AWS4-HMAC-SHA256" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported)
|
||||
return a, s3err.GetAPIError(s3err.ErrUnsupportedAuthorizationType)
|
||||
}
|
||||
|
||||
kvData := authParts[1]
|
||||
kvPairs := strings.Split(kvData, ",")
|
||||
// we are expecting at least Credential, SignedHeaders, and Signature
|
||||
// key value pairs here
|
||||
if len(kvPairs) < 3 {
|
||||
return a, s3err.GetAPIError(s3err.ErrMissingFields)
|
||||
if len(kvPairs) != 3 {
|
||||
return a, s3err.MalformedAuth.MissingComponents()
|
||||
}
|
||||
|
||||
var access, region, signedHeaders, signature, date string
|
||||
|
||||
for _, kv := range kvPairs {
|
||||
for i, kv := range kvPairs {
|
||||
keyValue := strings.Split(kv, "=")
|
||||
if len(keyValue) != 2 {
|
||||
switch {
|
||||
case strings.HasPrefix(kv, "Credential"):
|
||||
return a, s3err.GetAPIError(s3err.ErrCredMalformed)
|
||||
case strings.HasPrefix(kv, "SignedHeaders"):
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
return a, s3err.GetAPIError(s3err.ErrMissingFields)
|
||||
return a, s3err.MalformedAuth.MalformedComponent(kv)
|
||||
}
|
||||
key, value := keyValue[0], keyValue[1]
|
||||
switch i {
|
||||
case 0:
|
||||
if key != "Credential" {
|
||||
return a, s3err.MalformedAuth.MissingCredential()
|
||||
}
|
||||
case 1:
|
||||
if key != "SignedHeaders" {
|
||||
return a, s3err.MalformedAuth.MissingSignedHeaders()
|
||||
}
|
||||
case 2:
|
||||
if key != "Signature" {
|
||||
return a, s3err.MalformedAuth.MissingSignature()
|
||||
}
|
||||
}
|
||||
key := strings.TrimSpace(keyValue[0])
|
||||
value := strings.TrimSpace(keyValue[1])
|
||||
|
||||
switch key {
|
||||
case "Credential":
|
||||
creds := strings.Split(value, "/")
|
||||
if len(creds) != 5 {
|
||||
return a, s3err.GetAPIError(s3err.ErrCredMalformed)
|
||||
return a, s3err.MalformedAuth.MalformedCredential()
|
||||
}
|
||||
if creds[3] != "s3" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureIncorrService)
|
||||
return a, s3err.MalformedAuth.IncorrectService(creds[3])
|
||||
}
|
||||
if creds[4] != "aws4_request" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureTerminationStr)
|
||||
return a, s3err.MalformedAuth.InvalidTerminal(creds[4])
|
||||
}
|
||||
_, err := time.Parse(yyyymmdd, creds[1])
|
||||
if err != nil {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
return a, s3err.MalformedAuth.InvalidDateFormat(creds[1])
|
||||
}
|
||||
access = creds[0]
|
||||
date = creds[1]
|
||||
|
||||
@@ -92,7 +92,7 @@ func Test_Client_UserAgent(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Get("/", func(c *fiber.Ctx) error {
|
||||
req, err := createHttpRequestFromCtx(c, signedHdrs, int64(c.Request().Header.ContentLength()))
|
||||
req, err := createHttpRequestFromCtx(c, signedHdrs, int64(c.Request().Header.ContentLength()), true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -133,6 +133,25 @@ func IsUnsignedStreamingPayload(str string) bool {
|
||||
return payloadType(str) == payloadTypeStreamingUnsignedTrailer
|
||||
}
|
||||
|
||||
// IsAnonymousPayloadHashSupported returns error if payload hash
|
||||
// is streaming signed.
|
||||
// e.g.
|
||||
// "STREAMING-AWS4-HMAC-SHA256-PAYLOAD", "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD" ...
|
||||
func IsAnonymousPayloadHashSupported(hash string) error {
|
||||
switch payloadType(hash) {
|
||||
case payloadTypeStreamingEcdsa, payloadTypeStreamingEcdsaTrailer, payloadTypeStreamingSigned, payloadTypeStreamingSignedTrailer:
|
||||
return s3err.GetAPIError(s3err.ErrUnsupportedAnonymousSignedStreaming)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsUnsignedPaylod checks if the provided payload hash type
|
||||
// is "UNSIGNED-PAYLOAD"
|
||||
func IsUnsignedPaylod(hash string) bool {
|
||||
return hash == string(payloadTypeUnsigned)
|
||||
}
|
||||
|
||||
// IsChunkEncoding checks for streaming/unsigned authorization types
|
||||
func IsStreamingPayload(str string) bool {
|
||||
pt := payloadType(str)
|
||||
@@ -155,7 +174,7 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr
|
||||
}
|
||||
|
||||
if decContLength > maxObjSizeLimit {
|
||||
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, maxObjSizeLimit)
|
||||
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, int64(maxObjSizeLimit))
|
||||
return nil, s3err.GetAPIError(s3err.ErrEntityTooLarge)
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// Region, StartTime, IsRoot, Account, AccessKey context locals
|
||||
// are set to defualut values in middlewares.SetDefaultValues
|
||||
// are set to default values in middlewares.SetDefaultValues
|
||||
// to avoid the nil interface conversions
|
||||
type ContextKey string
|
||||
|
||||
@@ -35,6 +35,7 @@ const (
|
||||
ContextKeySkipResBodyLog ContextKey = "skip-res-body-log"
|
||||
ContextKeyBodyReader ContextKey = "body-reader"
|
||||
ContextKeySkip ContextKey = "__skip"
|
||||
ContextKeyStack ContextKey = "stack"
|
||||
)
|
||||
|
||||
func (ck ContextKey) Values() []ContextKey {
|
||||
|
||||
@@ -115,7 +115,7 @@ func (hr *HashReader) Read(p []byte) (int, error) {
|
||||
case HashTypeMd5:
|
||||
sum := hr.Sum()
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
return n, s3err.GetAPIError(s3err.ErrBadDigest)
|
||||
}
|
||||
case HashTypeSha256Hex:
|
||||
sum := hr.Sum()
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
@@ -35,6 +34,9 @@ import (
|
||||
|
||||
const (
|
||||
unsignedPayload string = "UNSIGNED-PAYLOAD"
|
||||
|
||||
algoHMAC string = "AWS4-HMAC-SHA256"
|
||||
algoECDSA string = "AWS4-ECDSA-P256-SHA256"
|
||||
)
|
||||
|
||||
// PresignedAuthReader is an io.Reader that validates presigned request authorization
|
||||
@@ -62,7 +64,7 @@ func (pr *PresignedAuthReader) Read(p []byte) (int, error) {
|
||||
n, err := pr.r.Read(p)
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
cerr := CheckPresignedSignature(pr.ctx, pr.auth, pr.secret)
|
||||
cerr := CheckPresignedSignature(pr.ctx, pr.auth, pr.secret, true)
|
||||
if cerr != nil {
|
||||
return n, cerr
|
||||
}
|
||||
@@ -72,7 +74,7 @@ func (pr *PresignedAuthReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
// CheckPresignedSignature validates presigned request signature
|
||||
func CheckPresignedSignature(ctx *fiber.Ctx, auth AuthData, secret string) error {
|
||||
func CheckPresignedSignature(ctx *fiber.Ctx, auth AuthData, secret string, streamBody bool) error {
|
||||
signedHdrs := strings.Split(auth.SignedHeaders, ";")
|
||||
|
||||
var contentLength int64
|
||||
@@ -86,7 +88,7 @@ func CheckPresignedSignature(ctx *fiber.Ctx, auth AuthData, secret string) error
|
||||
}
|
||||
|
||||
// Create a new http request instance from fasthttp request
|
||||
req, err := createPresignedHttpRequestFromCtx(ctx, signedHdrs, contentLength)
|
||||
req, err := createPresignedHttpRequestFromCtx(ctx, signedHdrs, contentLength, streamBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create http request from context: %w", err)
|
||||
}
|
||||
@@ -136,65 +138,70 @@ func ParsePresignedURIParts(ctx *fiber.Ctx) (AuthData, error) {
|
||||
|
||||
// Get and verify algorithm query parameter
|
||||
algo := ctx.Query("X-Amz-Algorithm")
|
||||
if algo == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
if algo != "AWS4-HMAC-SHA256" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQuerySignatureAlgo)
|
||||
err := validateAlgorithm(algo)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
|
||||
// Parse and validate credentials query parameter
|
||||
credsQuery := ctx.Query("X-Amz-Credential")
|
||||
if credsQuery == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
return a, s3err.QueryAuthErrors.MissingRequiredParams()
|
||||
}
|
||||
|
||||
creds := strings.Split(credsQuery, "/")
|
||||
if len(creds) != 5 {
|
||||
return a, s3err.GetAPIError(s3err.ErrCredMalformed)
|
||||
return a, s3err.QueryAuthErrors.MalformedCredential()
|
||||
}
|
||||
|
||||
// validate the service
|
||||
if creds[3] != "s3" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureIncorrService)
|
||||
return a, s3err.QueryAuthErrors.IncorrectService(creds[3])
|
||||
}
|
||||
|
||||
// validate the terminal
|
||||
if creds[4] != "aws4_request" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureTerminationStr)
|
||||
return a, s3err.QueryAuthErrors.IncorrectTerminal(creds[4])
|
||||
}
|
||||
_, err := time.Parse(yyyymmdd, creds[1])
|
||||
|
||||
// validate the date
|
||||
_, err = time.Parse(yyyymmdd, creds[1])
|
||||
if err != nil {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
return a, s3err.QueryAuthErrors.InvalidDateFormat(creds[1])
|
||||
}
|
||||
|
||||
region, ok := ContextKeyRegion.Get(ctx).(string)
|
||||
if !ok {
|
||||
region = ""
|
||||
}
|
||||
// validate the region
|
||||
if creds[2] != region {
|
||||
return a, s3err.QueryAuthErrors.IncorrectRegion(region, creds[2])
|
||||
}
|
||||
|
||||
// Parse and validate Date query param
|
||||
date := ctx.Query("X-Amz-Date")
|
||||
if date == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
return a, s3err.QueryAuthErrors.MissingRequiredParams()
|
||||
}
|
||||
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return a, s3err.GetAPIError(s3err.ErrMalformedDate)
|
||||
return a, s3err.QueryAuthErrors.InvalidXAmzDateFormat()
|
||||
}
|
||||
|
||||
if date[:8] != creds[1] {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
}
|
||||
|
||||
if ContextKeyRegion.Get(ctx) != creds[2] {
|
||||
return a, s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", creds[2]),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
return a, s3err.QueryAuthErrors.DateMismatch(creds[1], date[:8])
|
||||
}
|
||||
|
||||
signature := ctx.Query("X-Amz-Signature")
|
||||
if signature == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
return a, s3err.QueryAuthErrors.MissingRequiredParams()
|
||||
}
|
||||
|
||||
signedHdrs := ctx.Query("X-Amz-SignedHeaders")
|
||||
if signedHdrs == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
return a, s3err.QueryAuthErrors.MissingRequiredParams()
|
||||
}
|
||||
|
||||
// Validate X-Amz-Expires query param and check if request is expired
|
||||
@@ -215,20 +222,20 @@ func ParsePresignedURIParts(ctx *fiber.Ctx) (AuthData, error) {
|
||||
|
||||
func validateExpiration(str string, date time.Time) error {
|
||||
if str == "" {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
return s3err.QueryAuthErrors.MissingRequiredParams()
|
||||
}
|
||||
|
||||
exp, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedExpires)
|
||||
return s3err.QueryAuthErrors.ExpiresNumber()
|
||||
}
|
||||
|
||||
if exp < 0 {
|
||||
return s3err.GetAPIError(s3err.ErrNegativeExpires)
|
||||
return s3err.QueryAuthErrors.ExpiresNegative()
|
||||
}
|
||||
|
||||
if exp > 604800 {
|
||||
return s3err.GetAPIError(s3err.ErrMaximumExpires)
|
||||
return s3err.QueryAuthErrors.ExpiresTooLarge()
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
@@ -240,3 +247,43 @@ func validateExpiration(str string, date time.Time) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateAlgorithm validates the algorithm
|
||||
// for AWS4-ECDSA-P256-SHA256 it returns a custom non AWS error
|
||||
// currently only AWS4-HMAC-SHA256 algorithm is supported
|
||||
func validateAlgorithm(algo string) error {
|
||||
switch algo {
|
||||
case "":
|
||||
return s3err.QueryAuthErrors.MissingRequiredParams()
|
||||
case algoHMAC:
|
||||
return nil
|
||||
case algoECDSA:
|
||||
return s3err.QueryAuthErrors.OnlyHMACSupported()
|
||||
default:
|
||||
// all other algorithms are considerd as invalid
|
||||
return s3err.QueryAuthErrors.UnsupportedAlgorithm()
|
||||
}
|
||||
}
|
||||
|
||||
// IsPresignedURLAuth determines if the request is presigned:
|
||||
// which is authorization with query params
|
||||
func IsPresignedURLAuth(ctx *fiber.Ctx) bool {
|
||||
algo := ctx.Query("X-Amz-Algorithm")
|
||||
creds := ctx.Query("X-Amz-Credential")
|
||||
signature := ctx.Query("X-Amz-Signature")
|
||||
signedHeaders := ctx.Query("X-Amz-SignedHeaders")
|
||||
expires := ctx.Query("X-Amz-Expires")
|
||||
|
||||
return !isEmpty(algo, creds, signature, signedHeaders, expires)
|
||||
}
|
||||
|
||||
// isEmpty checks if all the given strings are empty
|
||||
func isEmpty(args ...string) bool {
|
||||
for _, a := range args {
|
||||
if a != "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
@@ -37,7 +38,7 @@ func Test_validateExpiration(t *testing.T) {
|
||||
str: "",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidQueryParams),
|
||||
err: s3err.QueryAuthErrors.MissingRequiredParams(),
|
||||
},
|
||||
{
|
||||
name: "invalid-expiration",
|
||||
@@ -45,7 +46,7 @@ func Test_validateExpiration(t *testing.T) {
|
||||
str: "invalid_expiration",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedExpires),
|
||||
err: s3err.QueryAuthErrors.ExpiresNumber(),
|
||||
},
|
||||
{
|
||||
name: "negative-expiration",
|
||||
@@ -53,7 +54,7 @@ func Test_validateExpiration(t *testing.T) {
|
||||
str: "-320",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNegativeExpires),
|
||||
err: s3err.QueryAuthErrors.ExpiresNegative(),
|
||||
},
|
||||
{
|
||||
name: "exceeding-expiration",
|
||||
@@ -61,7 +62,7 @@ func Test_validateExpiration(t *testing.T) {
|
||||
str: "6048000",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrMaximumExpires),
|
||||
err: s3err.QueryAuthErrors.ExpiresTooLarge(),
|
||||
},
|
||||
{
|
||||
name: "expired value",
|
||||
@@ -98,3 +99,22 @@ func Test_validateExpiration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validateAlgorithm(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
algo string
|
||||
err error
|
||||
}{
|
||||
{"empty", "", s3err.QueryAuthErrors.MissingRequiredParams()},
|
||||
{"AWS4-HMAC-SHA256", "AWS4-HMAC-SHA256", nil},
|
||||
{"AWS4-ECDSA-P256-SHA256", "AWS4-ECDSA-P256-SHA256", s3err.QueryAuthErrors.OnlyHMACSupported()},
|
||||
{"invalid", "invalid algo", s3err.QueryAuthErrors.UnsupportedAlgorithm()},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateAlgorithm(tt.algo)
|
||||
assert.EqualValues(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,6 @@ import (
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
|
||||
const (
|
||||
chunkHdrDelim = "\r\n"
|
||||
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
|
||||
@@ -26,10 +26,12 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/debuglogger"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
@@ -41,13 +43,23 @@ var (
|
||||
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
|
||||
)
|
||||
|
||||
var strictBucketNameValidation atomic.Bool
|
||||
|
||||
func init() {
|
||||
strictBucketNameValidation.Store(true)
|
||||
}
|
||||
|
||||
func SetBucketNameValidationStrict(strict bool) {
|
||||
strictBucketNameValidation.Store(strict)
|
||||
}
|
||||
|
||||
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
|
||||
metadata = make(map[string]string)
|
||||
headers.DisableNormalizing()
|
||||
for key, value := range headers.AllInOrder() {
|
||||
hKey := string(key)
|
||||
if strings.HasPrefix(strings.ToLower(hKey), "x-amz-meta-") {
|
||||
trimmedKey := hKey[11:]
|
||||
trimmedKey := strings.ToLower(hKey[11:])
|
||||
headerValue := string(value)
|
||||
metadata[trimmedKey] = headerValue
|
||||
}
|
||||
@@ -57,10 +69,10 @@ func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]strin
|
||||
return
|
||||
}
|
||||
|
||||
func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength int64) (*http.Request, error) {
|
||||
func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength int64, streamBody bool) (*http.Request, error) {
|
||||
req := ctx.Request()
|
||||
var body io.Reader
|
||||
if IsBigDataAction(ctx) {
|
||||
if streamBody {
|
||||
body = req.BodyStream()
|
||||
} else {
|
||||
body = bytes.NewReader(req.Body())
|
||||
@@ -112,10 +124,10 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength int64) (*http.Request, error) {
|
||||
func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength int64, streamBody bool) (*http.Request, error) {
|
||||
req := ctx.Request()
|
||||
var body io.Reader
|
||||
if IsBigDataAction(ctx) {
|
||||
if streamBody {
|
||||
body = req.BodyStream()
|
||||
} else {
|
||||
body = bytes.NewReader(req.Body())
|
||||
@@ -166,7 +178,7 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
|
||||
func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
|
||||
ctx.Response().Header.DisableNormalizing()
|
||||
for key, val := range meta {
|
||||
ctx.Response().Header.Set(fmt.Sprintf("X-Amz-Meta-%s", key), val)
|
||||
ctx.Response().Header.Set(fmt.Sprintf("x-amz-meta-%s", key), val)
|
||||
}
|
||||
ctx.Response().Header.EnableNormalizing()
|
||||
}
|
||||
@@ -209,6 +221,10 @@ func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
|
||||
}
|
||||
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if !strictBucketNameValidation.Load() {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
|
||||
return false
|
||||
@@ -236,15 +252,6 @@ func includeHeader(hdr string, signedHdrs []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func IsBigDataAction(ctx *fiber.Ctx) bool {
|
||||
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
|
||||
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// expiration time window
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationTimeStamp
|
||||
const timeExpirationSec = 15 * 60
|
||||
@@ -455,18 +462,35 @@ func ParseCalculatedChecksumHeaders(ctx *fiber.Ctx) (ChecksumValues, error) {
|
||||
return checksums, nil
|
||||
}
|
||||
|
||||
// ParseChecksumHeaders parses/validates x-amz-checksum-x headers key/values
|
||||
func ParseChecksumHeaders(ctx *fiber.Ctx) (ChecksumValues, error) {
|
||||
// ParseCompleteMpChecksumHeaders parses and validates
|
||||
// the 'CompleteMultipartUpload' x-amz-checksum-x headers
|
||||
// by supporting both 'checksum' and 'checksum-<part_length>' formats
|
||||
func ParseCompleteMpChecksumHeaders(ctx *fiber.Ctx) (ChecksumValues, error) {
|
||||
// first parse/validate 'x-amz-checksum-x' headers
|
||||
checksums, err := ParseCalculatedChecksumHeaders(ctx)
|
||||
if err != nil {
|
||||
return checksums, err
|
||||
}
|
||||
|
||||
// check if the values are valid
|
||||
for al, val := range checksums {
|
||||
algo := strings.ToLower(string(al))
|
||||
if al != types.ChecksumAlgorithmCrc64nvme {
|
||||
chParts := strings.Split(val, "-")
|
||||
if len(chParts) > 2 {
|
||||
debuglogger.Logf("invalid checksum header: x-amz-checksum-%s: %s", algo, val)
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", algo))
|
||||
}
|
||||
if len(chParts) == 2 {
|
||||
_, err := strconv.ParseInt(chParts[1], 10, 32)
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid checksum header: x-amz-checksum-%s: %s", algo, val)
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", algo))
|
||||
}
|
||||
val = chParts[0]
|
||||
}
|
||||
}
|
||||
if !IsValidChecksum(val, al) {
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
|
||||
return checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", algo))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -488,14 +512,25 @@ func ParseChecksumHeadersAndSdkAlgo(ctx *fiber.Ctx) (types.ChecksumAlgorithm, Ch
|
||||
return sdkAlgorithm, checksums, err
|
||||
}
|
||||
|
||||
if len(checksums) == 0 && sdkAlgorithm != "" {
|
||||
if ctx.Get("X-Amz-Trailer") == "" {
|
||||
// This is a special case when x-amz-trailer is there
|
||||
// it means the upload is done with chunked encoding
|
||||
// where the checksum verification is handled in the chunk reader
|
||||
debuglogger.Logf("'x-amz-sdk-checksum-algorithm : %s' is used without corresponding x-amz-checksum-* header", sdkAlgorithm)
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrChecksumSDKAlgoMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
for al, val := range checksums {
|
||||
if !IsValidChecksum(val, al) {
|
||||
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
|
||||
}
|
||||
|
||||
// If any other checksum value is provided,
|
||||
// rather than x-amz-sdk-checksum-algorithm
|
||||
if sdkAlgorithm != "" && sdkAlgorithm != al {
|
||||
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
|
||||
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr("x-amz-sdk-checksum-algorithm")
|
||||
}
|
||||
sdkAlgorithm = al
|
||||
}
|
||||
@@ -652,6 +687,11 @@ const (
|
||||
TagLimitObject TagLimit = 10
|
||||
)
|
||||
|
||||
// The tag key/value validation pattern comes from
|
||||
// AWS S3 docs
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_Tag.html
|
||||
var tagRule = regexp.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`)
|
||||
|
||||
// Parses and validates tagging
|
||||
func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
|
||||
var tagging s3response.TaggingInput
|
||||
@@ -676,18 +716,30 @@ func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
|
||||
tagSet := make(map[string]string, tLen)
|
||||
|
||||
for _, tag := range tagging.TagSet.Tags {
|
||||
// validate tag key
|
||||
// validate tag key length
|
||||
if len(tag.Key) == 0 || len(tag.Key) > 128 {
|
||||
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value
|
||||
// validate tag key string chars
|
||||
if !tagRule.MatchString(tag.Key) {
|
||||
debuglogger.Logf("invalid tag key: %s", tag.Key)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
|
||||
}
|
||||
|
||||
// validate tag value length
|
||||
if len(tag.Value) > 256 {
|
||||
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// validate tag value string chars
|
||||
if !tagRule.MatchString(tag.Value) {
|
||||
debuglogger.Logf("invalid tag value: %s", tag.Value)
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
|
||||
}
|
||||
|
||||
// make sure there are no duplicate keys
|
||||
_, ok := tagSet[tag.Key]
|
||||
if ok {
|
||||
@@ -767,7 +819,7 @@ func ValidateCopySource(copysource string) error {
|
||||
|
||||
// cut till the versionId as it's the only query param
|
||||
// that is recognized in copy source
|
||||
object, _, _ := strings.Cut(rest, "?versionId=")
|
||||
object, versionId, _ := strings.Cut(rest, "?versionId=")
|
||||
|
||||
// objects containing '../', '...../' ... are considered valid in AWS
|
||||
// but for the security purposes these should be considered as invalid
|
||||
@@ -777,6 +829,12 @@ func ValidateCopySource(copysource string) error {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)
|
||||
}
|
||||
|
||||
// validate the versionId
|
||||
err = ValidateVersionId(versionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -796,3 +854,17 @@ func ApplyOverride(original, override *string) *string {
|
||||
}
|
||||
return original
|
||||
}
|
||||
|
||||
// ValidateVersionId check if the input versionId is 'ulid' compatible
|
||||
func ValidateVersionId(versionId string) error {
|
||||
if versionId == "" || versionId == "null" {
|
||||
return nil
|
||||
}
|
||||
_, err := ulid.Parse(versionId)
|
||||
if err != nil {
|
||||
debuglogger.Logf("invalid versionId: %s", versionId)
|
||||
return s3err.GetAPIError(s3err.ErrInvalidVersionId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestCreateHttpRequestFromCtx(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := createHttpRequestFromCtx(tt.args.ctx, tt.hdrs, 0)
|
||||
got, err := createHttpRequestFromCtx(tt.args.ctx, tt.hdrs, 0, true)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("CreateHttpRequestFromCtx() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -231,6 +231,28 @@ func TestIsValidBucketName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetBucketNameValidationStrict(t *testing.T) {
|
||||
SetBucketNameValidationStrict(true)
|
||||
t.Cleanup(func() {
|
||||
SetBucketNameValidationStrict(true)
|
||||
})
|
||||
|
||||
invalidBucket := "Invalid_Bucket"
|
||||
if IsValidBucketName(invalidBucket) {
|
||||
t.Fatalf("expected %q to be invalid with strict validation", invalidBucket)
|
||||
}
|
||||
|
||||
SetBucketNameValidationStrict(false)
|
||||
if !IsValidBucketName(invalidBucket) {
|
||||
t.Fatalf("expected %q to be accepted when strict validation disabled", invalidBucket)
|
||||
}
|
||||
|
||||
SetBucketNameValidationStrict(true)
|
||||
if IsValidBucketName(invalidBucket) {
|
||||
t.Fatalf("expected %q to be invalid after re-enabling strict validation", invalidBucket)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUint(t *testing.T) {
|
||||
type args struct {
|
||||
str string
|
||||
@@ -933,12 +955,15 @@ func TestValidateCopySource(t *testing.T) {
|
||||
{"invalid object name 3", "bucket", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
|
||||
{"invalid object name 4", "bucket/../foo/dir/../../../", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
|
||||
{"invalid object name 5", "bucket/.?versionId=smth", s3err.GetAPIError(s3err.ErrInvalidCopySourceObject)},
|
||||
// invalid versionId
|
||||
{"invalid versionId 1", "bucket/object?versionId=invalid", s3err.GetAPIError(s3err.ErrInvalidVersionId)},
|
||||
{"invalid versionId 2", "bucket/object?versionId=01BX5ZZKBKACTAV9WEVGEMMV", s3err.GetAPIError(s3err.ErrInvalidVersionId)},
|
||||
// success
|
||||
{"no error 1", "bucket/object", nil},
|
||||
{"no error 2", "bucket/object/key", nil},
|
||||
{"no error 3", "bucket/4*&(*&(89765))", nil},
|
||||
{"no error 4", "bucket/foo/../bar", nil},
|
||||
{"no error 5", "bucket/foo/bar/baz?versionId=id", nil},
|
||||
{"no error 5", "bucket/foo/bar/baz?versionId=01BX5ZZKBKACTAV9WEVGEMMVRZ", nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
95
s3err/presigned-urls.go
Normal file
95
s3err/presigned-urls.go
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3err
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Factory for building AuthorizationQueryParametersError errors.
|
||||
func authQueryParamError(format string, args ...any) APIError {
|
||||
return APIError{
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: fmt.Sprintf(format, args...),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
var QueryAuthErrors = struct {
|
||||
UnsupportedAlgorithm func() APIError
|
||||
MalformedCredential func() APIError
|
||||
IncorrectService func(string) APIError
|
||||
IncorrectRegion func(expected, actual string) APIError
|
||||
IncorrectTerminal func(string) APIError
|
||||
InvalidDateFormat func(string) APIError
|
||||
DateMismatch func(expected, actual string) APIError
|
||||
ExpiresTooLarge func() APIError
|
||||
ExpiresNegative func() APIError
|
||||
ExpiresNumber func() APIError
|
||||
MissingRequiredParams func() APIError
|
||||
InvalidXAmzDateFormat func() APIError
|
||||
RequestNotYetValid func() APIError
|
||||
RequestExpired func() APIError
|
||||
InvalidAccessKeyId func() APIError
|
||||
// a custom non-AWS error
|
||||
OnlyHMACSupported func() APIError
|
||||
SecurityTokenNotSupported func() APIError
|
||||
}{
|
||||
|
||||
UnsupportedAlgorithm: func() APIError {
|
||||
return authQueryParamError(`X-Amz-Algorithm only supports "AWS4-HMAC-SHA256 and AWS4-ECDSA-P256-SHA256"`)
|
||||
},
|
||||
MalformedCredential: func() APIError {
|
||||
return authQueryParamError(`Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting "<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request".`)
|
||||
},
|
||||
IncorrectService: func(s string) APIError {
|
||||
return authQueryParamError(`Error parsing the X-Amz-Credential parameter; incorrect service %q. This endpoint belongs to "s3".`, s)
|
||||
},
|
||||
IncorrectRegion: func(expected, actual string) APIError {
|
||||
return authQueryParamError(`Error parsing the X-Amz-Credential parameter; the region %q is wrong; expecting %q`, actual, expected)
|
||||
},
|
||||
IncorrectTerminal: func(s string) APIError {
|
||||
return authQueryParamError(`Error parsing the X-Amz-Credential parameter; incorrect terminal %q. This endpoint uses "aws4_request".`, s)
|
||||
},
|
||||
InvalidDateFormat: func(s string) APIError {
|
||||
return authQueryParamError(`Error parsing the X-Amz-Credential parameter; incorrect date format %q. This date in the credential must be in the format "yyyyMMdd".`, s)
|
||||
},
|
||||
DateMismatch: func(expected, actual string) APIError {
|
||||
return authQueryParamError(`Invalid credential date %q. This date is not the same as X-Amz-Date: %q.`, expected, actual)
|
||||
},
|
||||
ExpiresTooLarge: func() APIError {
|
||||
return authQueryParamError("X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds")
|
||||
},
|
||||
ExpiresNegative: func() APIError {
|
||||
return authQueryParamError("X-Amz-Expires must be non-negative")
|
||||
},
|
||||
ExpiresNumber: func() APIError {
|
||||
return authQueryParamError("X-Amz-Expires should be a number")
|
||||
},
|
||||
MissingRequiredParams: func() APIError {
|
||||
return authQueryParamError("Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.")
|
||||
},
|
||||
InvalidXAmzDateFormat: func() APIError {
|
||||
return authQueryParamError(`X-Amz-Date must be in the ISO8601 Long Format "yyyyMMdd'T'HHmmss'Z'"`)
|
||||
},
|
||||
// a custom non-AWS error
|
||||
OnlyHMACSupported: func() APIError {
|
||||
return authQueryParamError("X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\"")
|
||||
},
|
||||
SecurityTokenNotSupported: func() APIError {
|
||||
return authQueryParamError("Authorization with X-Amz-Security-Token is not supported")
|
||||
},
|
||||
}
|
||||
168
s3err/s3err.go
168
s3err/s3err.go
@@ -75,6 +75,7 @@ const (
|
||||
ErrNoSuchUpload
|
||||
ErrInvalidBucketName
|
||||
ErrInvalidDigest
|
||||
ErrBadDigest
|
||||
ErrInvalidMaxKeys
|
||||
ErrInvalidMaxBuckets
|
||||
ErrInvalidMaxUploads
|
||||
@@ -82,11 +83,11 @@ const (
|
||||
ErrInvalidPartNumberMarker
|
||||
ErrInvalidObjectAttributes
|
||||
ErrInvalidPart
|
||||
ErrEmptyParts
|
||||
ErrInvalidPartNumber
|
||||
ErrInvalidPartOrder
|
||||
ErrInvalidCompleteMpPartNumber
|
||||
ErrInternalError
|
||||
ErrNonEmptyRequestBody
|
||||
ErrInvalidCopyDest
|
||||
ErrInvalidCopySourceRange
|
||||
ErrInvalidCopySourceBucket
|
||||
@@ -97,9 +98,10 @@ const (
|
||||
ErrDuplicateTagKey
|
||||
ErrBucketTaggingLimited
|
||||
ErrObjectTaggingLimited
|
||||
ErrCannotParseHTTPRequest
|
||||
ErrInvalidURLEncodedTagging
|
||||
ErrAuthHeaderEmpty
|
||||
ErrSignatureVersionNotSupported
|
||||
ErrInvalidAuthHeader
|
||||
ErrUnsupportedAuthorizationType
|
||||
ErrMalformedPOSTRequest
|
||||
ErrPOSTFileRequired
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
@@ -107,30 +109,22 @@ const (
|
||||
ErrEntityTooLarge
|
||||
ErrMissingFields
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
ErrMalformedXML
|
||||
ErrMalformedDate
|
||||
ErrMalformedPresignedDate
|
||||
ErrMalformedCredentialDate
|
||||
ErrMissingSignHeadersTag
|
||||
ErrMissingSignTag
|
||||
ErrUnsignedHeaders
|
||||
ErrInvalidQueryParams
|
||||
ErrInvalidQuerySignatureAlgo
|
||||
ErrExpiredPresignRequest
|
||||
ErrMalformedExpires
|
||||
ErrNegativeExpires
|
||||
ErrMaximumExpires
|
||||
ErrSignatureDoesNotMatch
|
||||
ErrSignatureDateDoesNotMatch
|
||||
ErrSignatureTerminationStr
|
||||
ErrSignatureIncorrService
|
||||
ErrContentSHA256Mismatch
|
||||
ErrInvalidSHA256Paylod
|
||||
ErrUnsupportedAnonymousSignedStreaming
|
||||
ErrMissingContentLength
|
||||
ErrInvalidAccessKeyID
|
||||
ErrRequestNotReadyYet
|
||||
ErrMissingDateHeader
|
||||
ErrGetUploadsWithKey
|
||||
ErrCopySourceNotAllowed
|
||||
ErrInvalidRequest
|
||||
ErrAuthNotSetup
|
||||
ErrNotImplemented
|
||||
@@ -165,7 +159,11 @@ const (
|
||||
ErrInvalidVersionId
|
||||
ErrNoSuchVersion
|
||||
ErrSuspendedVersioningNotAllowed
|
||||
ErrMissingRequestBody
|
||||
ErrMultipleChecksumHeaders
|
||||
ErrChecksumSDKAlgoMismatch
|
||||
ErrChecksumRequired
|
||||
ErrMissingContentSha256
|
||||
ErrInvalidChecksumAlgorithm
|
||||
ErrInvalidChecksumPart
|
||||
ErrChecksumTypeWithAlgo
|
||||
@@ -178,6 +176,8 @@ const (
|
||||
ErrMissingCORSOrigin
|
||||
ErrCORSIsNotEnabled
|
||||
ErrNotModified
|
||||
ErrInvalidLocationConstraint
|
||||
ErrInvalidArgument
|
||||
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
@@ -267,6 +267,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The Content-Md5 you specified is not valid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBadDigest: {
|
||||
Code: "BadDigest",
|
||||
Description: "The Content-MD5 you specified did not match what we received.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidMaxBuckets: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Argument max-buckets must be an integer between 1 and 10000.",
|
||||
@@ -317,16 +322,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "We encountered an internal error, please try again.",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
ErrNonEmptyRequestBody: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The request included a body. Requests of this type must not include a non-empty body.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPart: {
|
||||
Code: "InvalidPart",
|
||||
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEmptyParts: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "You must specify at least one part",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPartNumber: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Part number must be an integer between 1 and 10000, inclusive.",
|
||||
@@ -392,6 +397,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Object tags cannot be greater than 10",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCannotParseHTTPRequest: {
|
||||
Code: "BadRequest",
|
||||
Description: "An error occurred when parsing the HTTP request.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidURLEncodedTagging: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.",
|
||||
@@ -402,14 +412,14 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The XML you provided was not well-formed or did not validate against our published schema.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAuthHeaderEmpty: {
|
||||
ErrInvalidAuthHeader: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Authorization header is invalid -- one and only one ' ' (space) required.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrSignatureVersionNotSupported: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.",
|
||||
ErrUnsupportedAuthorizationType: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unsupported Authorization Type",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedPOSTRequest: {
|
||||
@@ -447,21 +457,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Missing Credential field for this request.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCredMalformed: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedDate: {
|
||||
Code: "MalformedDate",
|
||||
Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedPresignedDate: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSignHeadersTag: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Signature header missing SignedHeaders field.",
|
||||
@@ -477,36 +472,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "There were headers present in the request which were not signed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidQueryParams: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidQuerySignatureAlgo: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrExpiredPresignRequest: {
|
||||
Code: "AccessDenied",
|
||||
Description: "Request has expired.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrMalformedExpires: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Expires should be a number.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNegativeExpires: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Expires must be non-negative.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMaximumExpires: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidAccessKeyID: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "The AWS Access Key Id you provided does not exist in our records.",
|
||||
@@ -522,21 +492,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrSignatureDateDoesNotMatch: {
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: "Date in Credential scope does not match YYYYMMDD from ISO-8601 version of date from HTTP.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrSignatureTerminationStr: {
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: "Credential should be scoped with a valid terminator: 'aws4_request'.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrSignatureIncorrService: {
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: "Credential should be scoped to correct service: s3.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrContentSHA256Mismatch: {
|
||||
Code: "XAmzContentSHA256Mismatch",
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
@@ -547,6 +502,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "x-amz-content-sha256 must be UNSIGNED-PAYLOAD, STREAMING-UNSIGNED-PAYLOAD-TRAILER, STREAMING-AWS4-HMAC-SHA256-PAYLOAD, STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER, STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD, STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER or a valid sha256 value.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedAnonymousSignedStreaming: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Anonymous requests don't support this x-amz-content-sha256 value. Please use UNSIGNED-PAYLOAD or STREAMING-UNSIGNED-PAYLOAD-TRAILER.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingContentLength: {
|
||||
Code: "MissingContentLength",
|
||||
Description: "You must provide the Content-Length HTTP header.",
|
||||
@@ -557,6 +517,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "AWS authentication requires a valid Date or x-amz-date header.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrGetUploadsWithKey: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Key is not expected for the GET method ?uploads subresource",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCopySourceNotAllowed: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "You can only specify a copy source header for copy requests.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRequest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Invalid Request.",
|
||||
@@ -609,13 +579,13 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
},
|
||||
ErrObjectLockConfigurationNotAllowed: {
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets.",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to apply a Object Lock configuration",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrObjectLocked: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Object is WORM protected and cannot be overwritten.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
Code: "AccessDenied",
|
||||
Description: "Access Denied because object protected by object lock.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrPastObjectLockRetainDate: {
|
||||
Code: "InvalidRequest",
|
||||
@@ -659,7 +629,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
},
|
||||
ErrRequestTimeTooSkewed: {
|
||||
Code: "RequestTimeTooSkewed",
|
||||
Description: "The difference between the request time and the server's time is too large.",
|
||||
Description: "The difference between the request time and the current time is too large.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketAclWithObjectOwnership: {
|
||||
@@ -727,6 +697,26 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingRequestBody: {
|
||||
Code: "MissingRequestBodyError",
|
||||
Description: "Request Body is empty",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrChecksumSDKAlgoMismatch: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* or x-amz-trailer headers were found.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrChecksumRequired: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Missing required header for this request: Content-MD5 OR x-amz-checksum-*",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingContentSha256: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Missing required header for this request: x-amz-content-sha256",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMultipleChecksumHeaders: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.",
|
||||
@@ -792,6 +782,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Not Modified",
|
||||
HTTPStatusCode: http.StatusNotModified,
|
||||
},
|
||||
ErrInvalidLocationConstraint: {
|
||||
Code: "InvalidLocationConstraint",
|
||||
Description: "The specified location-constraint is not valid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidArgument: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// non aws errors
|
||||
ErrExistingObjectIsDirectory: {
|
||||
@@ -926,7 +926,7 @@ func GetChecksumBadDigestErr(algo types.ChecksumAlgorithm) APIError {
|
||||
func GetChecksumSchemaMismatchErr(algo types.ChecksumAlgorithm, t types.ChecksumType) APIError {
|
||||
return APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: fmt.Sprintf("The %v checksum type cannot be used with the %v checksum algorithm.", algo, strings.ToLower(string(t))),
|
||||
Description: fmt.Sprintf("The %v checksum type cannot be used with the %v checksum algorithm.", strings.ToUpper(string(t)), strings.ToLower(string(algo))),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
77
s3err/sigv4.go
Normal file
77
s3err/sigv4.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3err
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Factory for building AuthorizationHeaderMalformed errors.
|
||||
func malformedAuthError(format string, args ...any) APIError {
|
||||
return APIError{
|
||||
Code: "AuthorizationHeaderMalformed",
|
||||
Description: fmt.Sprintf("The authorization header is malformed; %s", fmt.Sprintf(format, args...)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
var MalformedAuth = struct {
|
||||
InvalidDateFormat func(string) APIError
|
||||
MalformedCredential func() APIError
|
||||
MissingCredential func() APIError
|
||||
MissingSignature func() APIError
|
||||
MissingSignedHeaders func() APIError
|
||||
InvalidTerminal func(string) APIError
|
||||
IncorrectRegion func(expected, actual string) APIError
|
||||
IncorrectService func(string) APIError
|
||||
MalformedComponent func(string) APIError
|
||||
MissingComponents func() APIError
|
||||
DateMismatch func() APIError
|
||||
}{
|
||||
InvalidDateFormat: func(s string) APIError {
|
||||
return malformedAuthError("incorrect date format %q. This date in the credential must be in the format \"yyyyMMdd\".", s)
|
||||
},
|
||||
MalformedCredential: func() APIError {
|
||||
return malformedAuthError("the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".")
|
||||
},
|
||||
MissingCredential: func() APIError {
|
||||
return malformedAuthError("missing Credential.")
|
||||
},
|
||||
MissingSignature: func() APIError {
|
||||
return malformedAuthError("missing Signature.")
|
||||
},
|
||||
MissingSignedHeaders: func() APIError {
|
||||
return malformedAuthError("missing SignedHeaders.")
|
||||
},
|
||||
InvalidTerminal: func(s string) APIError {
|
||||
return malformedAuthError("incorrect terminal %q. This endpoint uses \"aws4_request\".", s)
|
||||
},
|
||||
IncorrectRegion: func(expected, actual string) APIError {
|
||||
return malformedAuthError("the region %q is wrong; expecting %q", actual, expected)
|
||||
},
|
||||
IncorrectService: func(s string) APIError {
|
||||
return malformedAuthError("incorrect service %q. This endpoint belongs to \"s3\".", s)
|
||||
},
|
||||
MalformedComponent: func(s string) APIError {
|
||||
return malformedAuthError("the authorization component %q is malformed.", s)
|
||||
},
|
||||
MissingComponents: func() APIError {
|
||||
return malformedAuthError("the authorization header requires three components: Credential, SignedHeaders, and Signature.")
|
||||
},
|
||||
DateMismatch: func() APIError {
|
||||
return malformedAuthError("The authorization header is malformed; Invalid credential date. Date is not the same as X-Amz-Date.")
|
||||
},
|
||||
}
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
logFileMode = 0600
|
||||
logFileMode = 0644
|
||||
timeFormat = "02/January/2006:15:04:05 -0700"
|
||||
)
|
||||
|
||||
@@ -45,12 +45,12 @@ var _ AuditLogger = &FileLogger{}
|
||||
|
||||
// InitFileLogger initializes audit logs to local file
|
||||
func InitFileLogger(logname string) (AuditLogger, error) {
|
||||
f, err := os.OpenFile(logname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
f, err := os.OpenFile(logname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, logFileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open log: %w", err)
|
||||
}
|
||||
|
||||
f.WriteString(fmt.Sprintf("log starts %v\n", time.Now()))
|
||||
fmt.Fprintf(f, "log starts %v\n", time.Now())
|
||||
|
||||
return &FileLogger{logfile: logname, f: f}, nil
|
||||
}
|
||||
|
||||
@@ -728,3 +728,8 @@ type LocationConstraint struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"`
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type CreateBucketConfiguration struct {
|
||||
LocationConstraint string
|
||||
TagSet []types.Tag `xml:"Tags>Tag"`
|
||||
}
|
||||
|
||||
@@ -22,6 +22,9 @@ RUN apt-get update && \
|
||||
bc \
|
||||
libxml2-utils \
|
||||
xmlstarlet \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
xxd \
|
||||
ca-certificates && \
|
||||
update-ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
@@ -63,6 +66,15 @@ RUN git clone https://github.com/bats-core/bats-core.git && \
|
||||
cd bats-core && \
|
||||
./install.sh /home/tester
|
||||
|
||||
# Create a shared venv & install Python deps there
|
||||
ENV VENV=/opt/venv
|
||||
RUN python3 -m venv "$VENV" \
|
||||
&& "$VENV/bin/python" -m pip install --upgrade pip \
|
||||
&& "$VENV/bin/pip" install --no-cache-dir awscrt
|
||||
|
||||
# Make the venv the default Python for all subsequent RUN/CMD/ENTRYPOINT
|
||||
ENV PATH="$VENV/bin:${PATH}"
|
||||
|
||||
USER tester
|
||||
COPY --chown=tester:tester . /home/tester
|
||||
|
||||
|
||||
@@ -92,6 +92,7 @@ complete_multipart_upload_rest_invalid_checksum() {
|
||||
if ! check_param_count_v2 "bucket, key, upload ID, parts payload, type, algorithm, correct hash" 7 $#; then
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket name: $1"
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" PARTS="$4" CHECKSUM_TYPE="$5" CHECKSUM_ALGORITHM="$6" CHECKSUM_HASH="$7" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh 2>&1); then
|
||||
log 2 "error completing multipart upload: $result"
|
||||
return 1
|
||||
|
||||
@@ -41,7 +41,7 @@ get_bucket_ownership_controls_rest() {
|
||||
if ! check_param_count "get_bucket_ownership_controls_rest" "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OUTPUT_FILE="$TEST_FILE_FOLDER/ownershipControls.txt" ./tests/rest_scripts/get_bucket_ownership_controls.sh); then
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/ownershipControls.txt" ./tests/rest_scripts/get_bucket_ownership_controls.sh); then
|
||||
log 2 "error getting bucket ownership controls: $result"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -66,10 +66,10 @@ get_object_legal_hold_rest_version_id() {
|
||||
log 2 "error getting object legal hold: $result"
|
||||
return 1
|
||||
fi
|
||||
legal_hold=$(cat "$TEST_FILE_FOLDER/legal_hold.txt")
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "get-object-legal-hold returned code $result: $(cat "$TEST_FILE_FOLDER/legal_hold.txt")"
|
||||
log 2 "get-object-legal-hold returned code $result: $legal_hold)"
|
||||
return 1
|
||||
fi
|
||||
legal_hold=$(cat "$TEST_FILE_FOLDER/legal_hold.txt")
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -53,5 +53,6 @@ get_object_lock_configuration_rest() {
|
||||
return 1
|
||||
fi
|
||||
lock_config="$(cat "$TEST_FILE_FOLDER/object-lock-config.txt")"
|
||||
log 5 "lock config: $lock_config"
|
||||
return 0
|
||||
}
|
||||
@@ -69,7 +69,7 @@ reset_bucket_acl() {
|
||||
log 2 "error resetting direct ACL"
|
||||
return 1
|
||||
fi
|
||||
if ! put_bucket_acl_rest "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$acl_file"; then
|
||||
if ! put_bucket_acl_rest "$1" "$TEST_FILE_FOLDER/$acl_file"; then
|
||||
log 2 "error putting bucket acl (s3api)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -60,15 +60,17 @@ put_object_tagging_rest() {
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
|
||||
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
|
||||
content_md5=$(echo -n "$tagging" | openssl dgst -binary -md5 | openssl base64)
|
||||
# shellcheck disable=SC2154
|
||||
canonical_request="PUT
|
||||
/$1/$2
|
||||
tagging=
|
||||
content-md5:$content_md5
|
||||
host:$aws_endpoint_url_address
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
content-md5;host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
|
||||
@@ -78,7 +80,8 @@ $payload_hash"
|
||||
get_signature
|
||||
# shellcheck disable=SC2154
|
||||
reply=$(send_command curl -ks -w "%{http_code}" -X PUT "$header://$aws_endpoint_url_address/$1/$2?tagging" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "Content-MD5: $content_md5" \
|
||||
-H "x-amz-content-sha256: $payload_hash" \
|
||||
-H "x-amz-date: $current_date_time" \
|
||||
-d "$tagging" -o "$TEST_FILE_FOLDER"/put_tagging_error.txt 2>&1)
|
||||
|
||||
@@ -96,7 +96,7 @@ upload_part_rest_without_upload_id() {
|
||||
log 2 "error uploading part $i: $result"
|
||||
return 1
|
||||
fi
|
||||
if ! check_rest_expected_error "$result" "$TEST_FILE_FOLDER/response.txt" "405" "MethodNotAllowed" "method is not allowed"; then
|
||||
if ! check_rest_expected_error "$result" "$TEST_FILE_FOLDER/response.txt" "400" "InvalidArgument" "does not accept partNumber without uploadId"; then
|
||||
log 2 "error checking error"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -110,7 +110,7 @@ test_multipart_upload_with_checksum() {
|
||||
log 2 "error calculating multipart checksum"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_with_checksum "$1" "$2" "$TEST_FILE_FOLDER/$2" "$upload_id" 2 "$3" "$4"; then
|
||||
if ! complete_multipart_upload_with_checksum "$bucket_name" "$2" "$TEST_FILE_FOLDER/$2" "$upload_id" 2 "$3" "$4"; then
|
||||
log 2 "error completing multipart upload"
|
||||
return 1
|
||||
fi
|
||||
@@ -125,7 +125,7 @@ test_complete_multipart_upload_unneeded_algorithm_parameter() {
|
||||
log 2 "error performing multipart upload with checksum before completion"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_rest_nonexistent_param "$1" "$2" "$upload_id" "$parts_payload"; then
|
||||
if ! complete_multipart_upload_rest_nonexistent_param "$bucket_name" "$2" "$upload_id" "$parts_payload"; then
|
||||
log 2 "error completing multipart upload with nonexistent param"
|
||||
return 1
|
||||
fi
|
||||
@@ -144,7 +144,7 @@ test_complete_multipart_upload_incorrect_checksum() {
|
||||
log 2 "error calculating multipart checksum"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_rest_incorrect_checksum "$1" "$2" "$upload_id" "$parts_payload" "$3" "$4" "$checksum"; then
|
||||
if ! complete_multipart_upload_rest_incorrect_checksum "$bucket_name" "$2" "$upload_id" "$parts_payload" "$3" "$4" "$checksum"; then
|
||||
log 2 "error completing multipart upload with nonexistent param"
|
||||
return 1
|
||||
fi
|
||||
@@ -159,7 +159,7 @@ test_complete_multipart_upload_invalid_checksum() {
|
||||
log 2 "error performing multipart upload with checksum before completion"
|
||||
return 1
|
||||
fi
|
||||
if ! complete_multipart_upload_rest_invalid_checksum "$1" "$2" "$upload_id" "$parts_payload" "$3" "$4" "wrong"; then
|
||||
if ! complete_multipart_upload_rest_invalid_checksum "$bucket_name" "$2" "$upload_id" "$parts_payload" "$3" "$4" "wrong"; then
|
||||
log 2 "error completing multipart upload with nonexistent param"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -39,7 +39,7 @@ copy_object_invalid_copy_source() {
|
||||
log 2 "'copy_object_invalid_copy_source' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OBJECT_KEY="dummy-copy" COPY_SOURCE="dummy" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/copy_object.sh); then
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="dummy-copy" COPY_SOURCE="dummy" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/copy_object.sh); then
|
||||
log 2 "error copying object: $result"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/drivers/delete_bucket/delete_bucket_rest.sh
|
||||
source ./tests/drivers/get_bucket_acl/get_bucket_acl_rest.sh
|
||||
source ./tests/drivers/get_object/get_object_rest.sh
|
||||
source ./tests/drivers/put_bucket_acl/put_bucket_acl_rest.sh
|
||||
@@ -47,8 +48,14 @@ setup_and_create_bucket_and_check_acl() {
|
||||
log 5 "username=$username, password=$password"
|
||||
envs="$1=$id OBJECT_OWNERSHIP=BucketOwnerPreferred"
|
||||
log 5 "envs: $envs"
|
||||
|
||||
if ! bucket_name=$(get_bucket_name "$BUCKET_ONE_NAME" 2>&1); then
|
||||
log 2 "error retrieving bucket name: $bucket_name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if ! create_bucket_and_check_acl "$BUCKET_ONE_NAME" "$envs" "$username" "$password" "$user_canonical_id" "$owner_canonical_id"; then
|
||||
if ! create_bucket_and_check_acl "$bucket_name" "$envs" "$username" "$password" "$user_canonical_id" "$owner_canonical_id"; then
|
||||
log 2 "error creating bucket and checking ACL"
|
||||
return 1
|
||||
fi
|
||||
@@ -111,4 +118,120 @@ create_bucket_and_check_acl() {
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
get_bucket_prefix() {
|
||||
if ! check_param_count_v2 "bucket prefix or name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" == "true" ]; then
|
||||
# remove date/time suffix
|
||||
prefix="$(echo "$1" | awk '{print substr($0, 1, length($0)-15)}')"
|
||||
else
|
||||
prefix="$1"
|
||||
fi
|
||||
echo "$prefix"
|
||||
}
|
||||
|
||||
setup_bucket_v2() {
|
||||
log 6 "setup_bucket_v2 '$1'"
|
||||
if ! check_param_count_v2 "bucket prefix or name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! prefix=$(get_bucket_prefix "$1" 2>&1); then
|
||||
log 2 "error getting prefix: $prefix"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket prefix: $prefix"
|
||||
if ! bucket_cleanup_if_bucket_exists_v2 "$prefix"; then
|
||||
log 2 "error cleaning up bucket(s), if it/they exist(s)"
|
||||
return 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" == "false" ]; then
|
||||
return 0
|
||||
fi
|
||||
if ! create_bucket_rest_expect_success "$1" ""; then
|
||||
log 2 "error creating bucket '$1'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: client, bucket name(s)
|
||||
# return 0 for success, 1 for failure
|
||||
setup_buckets() {
|
||||
if ! check_param_count_gt "minimum of 1 bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! setup_bucket "$name"; then
|
||||
log 2 "error setting up bucket $name"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_buckets_v2() {
|
||||
if ! check_param_count_gt "minimum of 1 bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! setup_bucket_v2 "$name"; then
|
||||
log 2 "error setting up bucket $name"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
get_bucket_name() {
|
||||
if ! check_param_count_v2 "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" == "false" ]; then
|
||||
echo "$1"
|
||||
return 0
|
||||
fi
|
||||
echo "$1-$(date +%Y%m%d%H%M%S)"
|
||||
}
|
||||
|
||||
setup_bucket_object_lock_enabled_v2() {
|
||||
if ! check_param_count_v2 "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! prefix=$(get_bucket_prefix "$1" 2>&1); then
|
||||
log 2 "error getting prefix: $prefix"
|
||||
return 1
|
||||
fi
|
||||
if ! bucket_cleanup_if_bucket_exists_v2 "$prefix"; then
|
||||
log 2 "error cleaning up bucket"
|
||||
return 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" == "true" ]; then
|
||||
if ! create_bucket_object_lock_enabled "$1"; then
|
||||
log 2 "error creating bucket '$1' with object lock enabled"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_object_lock_enabled() {
|
||||
if ! check_param_count "setup_bucket_object_lock_enabled" "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! bucket_cleanup_if_bucket_exists "$1"; then
|
||||
log 2 "error cleaning up bucket"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# in static bucket config, bucket will still exist
|
||||
if ! bucket_exists "$1"; then
|
||||
if ! create_bucket_object_lock_enabled "$1"; then
|
||||
log 2 "error creating bucket with object lock enabled"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
142
tests/drivers/delete_bucket/delete_bucket_rest.sh
Normal file
142
tests/drivers/delete_bucket/delete_bucket_rest.sh
Normal file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/commands/list_buckets.sh
|
||||
source ./tests/drivers/list_buckets/list_buckets_rest.sh
|
||||
|
||||
delete_buckets_with_prefix() {
|
||||
if ! check_param_count_v2 "bucket prefix" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [ "$1" == "" ]; then
|
||||
log 2 "delete_buckets_with_prefix requires non-empty prefix"
|
||||
return 1
|
||||
fi
|
||||
if ! list_buckets_rest "PREFIX=$1" "parse_bucket_list"; then
|
||||
log 2 "error listing buckets with prefix"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "buckets: ${bucket_array[*]}"
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if ! delete_bucket_recursive "$bucket"; then
|
||||
log 2 "error with recursive bucket delete of bucket '$bucket'"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
cleanup_buckets() {
|
||||
if ! bucket_cleanup_if_bucket_exists_v2 "$BUCKET_ONE_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_ONE_NAME or contents"
|
||||
fi
|
||||
if ! bucket_cleanup_if_bucket_exists_v2 "$BUCKET_TWO_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_TWO_NAME or contents"
|
||||
fi
|
||||
}
|
||||
|
||||
# params: client, bucket name
|
||||
# return 0 for success, 1 for error
|
||||
bucket_cleanup() {
|
||||
log 6 "bucket_cleanup"
|
||||
if ! check_param_count "bucket_cleanup" "bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
if ! reset_bucket "$1"; then
|
||||
log 2 "error deleting bucket contents"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 5 "bucket contents, policy, ACL deletion success"
|
||||
return 0
|
||||
fi
|
||||
if ! delete_bucket_recursive "$1"; then
|
||||
log 2 "error with recursive bucket delete"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket deletion success"
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: client, bucket name
|
||||
# return 0 for success, 1 for error
|
||||
bucket_cleanup_if_bucket_exists() {
|
||||
log 6 "bucket_cleanup_if_bucket_exists"
|
||||
if ! check_param_count_gt "bucket name, bucket known to exist (optional)" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "$2" == "false" ]; then
|
||||
log 5 "skipping cleanup, since bucket doesn't exist"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ "$2" == "true" ] || bucket_exists "$1"; then
|
||||
if ! bucket_cleanup "$1"; then
|
||||
log 2 "error deleting bucket and/or contents"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket and/or bucket data deletion success"
|
||||
return 0
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
bucket_cleanup_if_bucket_exists_v2() {
|
||||
log 6 "bucket_cleanup_if_bucket_exists_v2"
|
||||
if ! check_param_count_gt "bucket name or prefix" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if [[ "$RECREATE_BUCKETS" == "false" ]]; then
|
||||
if ! bucket_exists "$1"; then
|
||||
log 2 "When RECREATE_BUCKETS isn't set to \"true\", bucket with full env name should be pre-created by user"
|
||||
return 1
|
||||
fi
|
||||
if ! reset_bucket "$1"; then
|
||||
log 2 "error resetting bucket before tests"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
if ! delete_buckets_with_prefix "$1"; then
|
||||
log 2 "error deleting buckets with prefix '$1'"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: bucket name
|
||||
# return 0 if able to delete recursively, 1 if not
|
||||
delete_bucket_recursive() {
|
||||
log 6 "delete_bucket_recursive '$1'"
|
||||
if ! check_param_count "delete_bucket_recursive_s3api" "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! reset_bucket "$1"; then
|
||||
log 2 "error clearing bucket (s3api)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! delete_bucket_rest "$1"; then
|
||||
log 2 "error deleting bucket"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
delete_tags_and_verify_deletion() {
|
||||
if ! check_param_count_v2 "bucket name" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! send_rest_go_command "204" \
|
||||
"-method" "DELETE" "-bucketName" "$1" "-query" "tagging="; then
|
||||
log 2 "error sending tag deletion command"
|
||||
return 1
|
||||
fi
|
||||
if ! verify_no_bucket_tags_rest "$1"; then
|
||||
log 2 "error verifying no bucket tags"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -36,14 +36,14 @@ list_and_delete_objects() {
|
||||
fi
|
||||
done
|
||||
|
||||
if ! delete_old_versions "$1"; then
|
||||
if ! delete_old_versions_base64 "$1"; then
|
||||
log 2 "error deleting old version"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_old_versions() {
|
||||
delete_old_versions_base64() {
|
||||
if ! check_param_count "delete_old_versions" "bucket" 1 $#; then
|
||||
return 1
|
||||
fi
|
||||
@@ -53,45 +53,52 @@ delete_old_versions() {
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "versions: $versions"
|
||||
version_keys=()
|
||||
version_ids=()
|
||||
|
||||
if ! parse_version_data_by_type "rest" "$2"; then
|
||||
if ! parse_base64_versions_rest; then
|
||||
log 2 "error parsing version data"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 5 "version keys: ${version_keys[*]}"
|
||||
log 5 "version IDs: ${version_ids[*]}"
|
||||
for idx in "${!version_keys[@]}"; do
|
||||
if ! delete_object_version_with_or_without_retention "$1"; then
|
||||
log 5 "base64 versions: ${base64_pairs[*]}"
|
||||
for pair in "${base64_pairs[@]}"; do
|
||||
log 5 "pair: $pair"
|
||||
if ! delete_object_version_with_or_without_retention_base64 "$1" "$pair"; then
|
||||
log 2 "error deleting version with or without retention"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
delete_object_version_with_or_without_retention() {
|
||||
if ! check_param_count "delete_object_version_with_or_without_retention" "bucket" 1 $#; then
|
||||
delete_object_version_with_or_without_retention_base64() {
|
||||
if ! check_param_count_v2 "bucket, key/value pair" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
IFS=":" read -ra key_and_id <<< "$2"
|
||||
log 5 "key and ID: ${key_and_id[*]}"
|
||||
if ! key=$(printf '%s' "${key_and_id[0]}" | base64 --decode 2>&1); then
|
||||
log 2 "error decoding key: $key"
|
||||
return 1
|
||||
fi
|
||||
if ! id=$(printf '%s' "${key_and_id[1]}" | base64 --decode 2>&1); then
|
||||
log 2 "error decoding ID: $id"
|
||||
return 1
|
||||
fi
|
||||
log 5 "idx: $idx"
|
||||
log 5 "version ID: ${version_ids[$idx]}"
|
||||
# shellcheck disable=SC2154
|
||||
if [ "$lock_config_exists" == "true" ]; then
|
||||
if ! check_remove_legal_hold_versions "$1" "${version_keys[$idx]}" "${version_ids[$idx]}"; then
|
||||
if ! check_remove_legal_hold_versions "$1" "$key" "$id"; then
|
||||
log 2 "error checking, removing legal hold versions"
|
||||
return 1
|
||||
fi
|
||||
if ! delete_object_version_rest_bypass_retention "$1" "${version_keys[$idx]}" "${version_ids[$idx]}"; then
|
||||
if ! delete_object_version_rest_bypass_retention "$1" "$key" "$id"; then
|
||||
log 2 "error deleting object version, bypassing retention"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
if ! delete_object_version_rest "$1" "${version_keys[$idx]}" "${version_ids[$idx]}"; then
|
||||
if ! delete_object_version_rest "$1" "$key" "$id"; then
|
||||
log 2 "error deleting object version"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
log 5 "successfully deleted version with key '${version_keys[$idx]}', id '${version_ids[$idx]}'"
|
||||
log 5 "successfully deleted version with key '$key', id '$id'"
|
||||
return 0
|
||||
}
|
||||
|
||||
124
tests/drivers/file.sh
Normal file
124
tests/drivers/file.sh
Normal file
@@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/drivers/params.sh
|
||||
|
||||
setup_bucket_and_file() {
|
||||
if ! check_param_count_v2 "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_file_base "$1" "setup_bucket_and_files" "$2"; then
|
||||
log 2 "error setting up bucket and file"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_file_v2() {
|
||||
if ! check_param_count_v2 "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_file_base "$1" "setup_bucket_and_files_v2" "$2"; then
|
||||
log 2 "error setting up bucket and files"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_file_base() {
|
||||
if ! check_param_count_v2 "bucket, function, file name" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! "$2" "$1" "$3"; then
|
||||
log 2 "error setting up bucket and file"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_files() {
|
||||
if ! check_param_count_gt "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_files_base "$1" "setup_bucket" "${@:2}"; then
|
||||
log 2 "error setting up bucket and files"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_files_v2() {
|
||||
if ! check_param_count_gt "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_files_base "$1" "setup_bucket_v2" "${@:2}"; then
|
||||
log 2 "error setting up bucket and files"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_files_base() {
|
||||
if ! check_param_count_gt "bucket, setup bucket function, file name" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! "$2" "$1"; then
|
||||
log 2 "error setting up bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! create_test_files "${@:3}"; then
|
||||
log 2 "error creating test files"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_large_file_base() {
|
||||
if ! check_param_count_v2 "bucket, file name, function" 3 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! "$3" "$1"; then
|
||||
log 2 "error setting up bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! create_large_file "$2"; then
|
||||
log 2 "error creating large file"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_large_file() {
|
||||
if ! check_param_count_v2 "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_large_file_base "$1" "$2" "setup_bucket"; then
|
||||
log 2 "error setting up bucket and large file"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_bucket_and_large_file_v2() {
|
||||
if ! check_param_count_v2 "bucket, file name" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! setup_bucket_and_large_file_base "$1" "$2" "setup_bucket_v2"; then
|
||||
log 2 "error setting up bucket and large file"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -49,7 +49,7 @@ get_check_acl_after_first_put() {
|
||||
log 2 "'get_check_acl_after_first_put' requires client, bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_acl "$1" "$BUCKET_ONE_NAME"; then
|
||||
if ! get_bucket_acl "$1" "$2"; then
|
||||
log 2 "error retrieving second ACL"
|
||||
return 1
|
||||
fi
|
||||
@@ -74,7 +74,7 @@ get_check_acl_after_second_put() {
|
||||
log 2 "'get_check_acl_after_second_put' requires client, bucket"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_acl "$1" "$BUCKET_ONE_NAME"; then
|
||||
if ! get_bucket_acl "$1" "$2"; then
|
||||
log 2 "error retrieving third ACL"
|
||||
return 1
|
||||
fi
|
||||
@@ -100,14 +100,3 @@ get_check_acl_after_second_put() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_direct_display_name() {
|
||||
if ! display_name=$(echo "$owner" | xmllint --xpath '//*[local-name()="DisplayName"]/text()' - 2>&1); then
|
||||
log 2 "error getting display name: $display_name"
|
||||
return 1
|
||||
fi
|
||||
if [ "$display_name" != "$DIRECT_DISPLAY_NAME" ]; then
|
||||
log 2 "display name mismatch (expected '$DIRECT_DISPLAY_NAME', actual '$display_name')"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -116,3 +116,14 @@ get_and_check_acl_rest() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_direct_display_name() {
|
||||
if ! display_name=$(echo "$owner" | xmllint --xpath '//*[local-name()="DisplayName"]/text()' - 2>&1); then
|
||||
log 2 "error getting display name: $display_name"
|
||||
return 1
|
||||
fi
|
||||
if [ "$display_name" != "$DIRECT_DISPLAY_NAME" ]; then
|
||||
log 2 "display name mismatch (expected '$DIRECT_DISPLAY_NAME', actual '$display_name')"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
44
tests/drivers/get_bucket_tagging/get_bucket_tagging.sh
Normal file
44
tests/drivers/get_bucket_tagging/get_bucket_tagging.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
check_bucket_tags_empty() {
|
||||
if ! check_param_count_v2 "command type, bucket" 2 $#; then
|
||||
return 2
|
||||
fi
|
||||
if ! get_bucket_tagging "$1" "$2"; then
|
||||
log 2 "failed to get tags"
|
||||
return 2
|
||||
fi
|
||||
check_tags_empty "$1" || local check_result=$?
|
||||
# shellcheck disable=SC2086
|
||||
return $check_result
|
||||
}
|
||||
|
||||
verify_no_bucket_tags() {
|
||||
if ! check_param_count_v2 "command type, bucket" 2 $#; then
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_tagging "$1" "$2"; then
|
||||
log 2 "error retrieving bucket tagging"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "$tags" != "" ]]; then
|
||||
log 2 "tags should be empty, but are: $tags"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user