Compare commits

..

1 Commits

Author SHA1 Message Date
Ben McClelland
db9cefa27c feat: add plugin backend
This new plugin backend allows loading a shared object to
implement the backend storage logic. See the following for
caveats with Go plugins: https://pkg.go.dev/plugin#hdr-Warnings.

This will also need cgo enabled within the builds, which is
currently disabled for github releases. So this will require
building the gateway from source for now.
2025-04-11 11:02:14 -07:00
206 changed files with 5253 additions and 13749 deletions

25
.github/SECURITY.md vendored
View File

@@ -1,25 +0,0 @@
# Security Policy
## Reporting a Vulnerability
If you discover a security vulnerability in `versitygw`, we strongly encourage you to report it privately and responsibly.
Please do **not** create public issues or pull requests that contain details about the vulnerability.
Instead, report the issue using GitHub's private **Security Advisories** feature:
- Go to [versitygw's Security Advisories page](https://github.com/versity/versitygw/security/advisories)
- Click on **"Report a vulnerability"**
We aim to respond within **2 business days** and work with you to quickly resolve the issue.
## Supported Versions
| Version | Supported |
| --------------- | --------- |
| Latest (v1.x.x) | ✅ |
| Older versions | ❌ |
## Responsible Disclosure
We appreciate responsible disclosures and are committed to fixing vulnerabilities in a timely manner. Thank you for helping keep `versitygw` secure.

View File

@@ -1,5 +1,5 @@
name: azurite functional tests
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,5 @@
name: docker bats tests
permissions: {}
on: pull_request
jobs:

View File

@@ -1,4 +1,5 @@
name: Publish Docker image
on:
release:
types: [published]

View File

@@ -1,5 +1,5 @@
name: functional tests
permissions: {}
on: pull_request
jobs:

View File

@@ -1,10 +1,9 @@
name: general
permissions: {}
on: pull_request
jobs:
build:
name: Go Basic Checks
name: Build
runs-on: ubuntu-latest
steps:
@@ -24,6 +23,9 @@ jobs:
run: |
go get -v -t -d ./...
- name: Build
run: make
- name: Test
run: go test -coverprofile profile.txt -race -v -timeout 30s -tags=github ./...
@@ -33,26 +35,4 @@ jobs:
- name: Run govulncheck
run: govulncheck ./...
shell: bash
verify-build:
name: Verify Build Targets
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
os: [darwin, freebsd, linux]
arch: [amd64, arm64]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: Build for ${{ matrix.os }}/${{ matrix.arch }}
run: |
GOOS=${{ matrix.os }} GOARCH=${{ matrix.arch }} go build -o versitygw-${{ matrix.os }}-${{ matrix.arch }} cmd/versitygw/*.go
shell: bash

View File

@@ -1,12 +1,16 @@
name: goreleaser
permissions:
contents: write
on:
push:
# run only against tags
tags:
- '*'
permissions:
contents: write
# packages: write
# issues: write
jobs:
goreleaser:
runs-on: ubuntu-latest
@@ -25,10 +29,10 @@ jobs:
go-version: stable
- name: Run Releaser
uses: goreleaser/goreleaser-action@v6
uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser
version: '~> v2'
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}

View File

@@ -1,13 +0,0 @@
name: host style tests
permissions: {}
on: pull_request
jobs:
build-and-run:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: run host-style tests
run: make test-host-style

View File

@@ -1,5 +1,4 @@
name: shellcheck
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,4 @@
name: staticcheck
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,4 @@
name: system tests
permissions: {}
on: pull_request
jobs:
build:
@@ -13,85 +12,66 @@ jobs:
IAM_TYPE: folder
RUN_SET: "mc-non-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "mc, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "mc-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "REST, posix, non-static, base|acl|multipart, folder IAM"
- set: "REST, posix, non-static, all, folder IAM"
IAM_TYPE: folder
RUN_SET: "rest-base,rest-acl,rest-multipart"
RUN_SET: "rest"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "REST, posix, non-static, chunked|checksum|versioning|bucket, folder IAM"
IAM_TYPE: folder
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3, posix, non-file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-non-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, policy, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, user, non-static, s3 IAM"
IAM_TYPE: s3
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, bucket, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, multipart, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-multipart"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, object, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-object"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, policy, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, user, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
# TODO fix/debug s3 gateway
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
@@ -108,19 +88,16 @@ jobs:
IAM_TYPE: folder
RUN_SET: "s3cmd-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3cmd, posix, non-user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-non-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3cmd, posix, user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
steps:
- name: Check out code into the Go module directory
@@ -129,7 +106,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "stable"
go-version: 'stable'
id: go
- name: Get Dependencies
@@ -145,7 +122,6 @@ jobs:
- name: Install s3cmd
run: |
sudo apt-get update
sudo apt-get install s3cmd
- name: Install mc
@@ -153,10 +129,9 @@ jobs:
curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o /usr/local/bin/mc
chmod 755 /usr/local/bin/mc
- name: Install xml libraries (for rest)
- name: Install xmllint (for rest)
run: |
sudo apt-get update
sudo apt-get install libxml2-utils xmlstarlet
sudo apt-get install libxml2-utils
# see https://github.com/versity/versitygw/issues/1034
- name: Install AWS cli
@@ -175,7 +150,6 @@ jobs:
RUN_VERSITYGW: true
BACKEND: ${{ matrix.BACKEND }}
RECREATE_BUCKETS: ${{ matrix.RECREATE_BUCKETS }}
DELETE_BUCKETS_AFTER_TEST: ${{ matrix.DELETE_BUCKETS_AFTER_TEST }}
CERT: ${{ github.workspace }}/cert.pem
KEY: ${{ github.workspace }}/versitygw.pem
LOCAL_FOLDER: /tmp/gw
@@ -199,8 +173,6 @@ jobs:
COMMAND_LOG: command.log
TIME_LOG: time.log
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
AUTOGENERATE_USERS: true
USER_AUTOGENERATION_PREFIX: github-actions-test-
run: |
make testbin
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST

View File

@@ -1,5 +1,3 @@
version: 2
before:
hooks:
- go mod tidy
@@ -25,7 +23,7 @@ builds:
- -X=main.Build={{.Commit}} -X=main.BuildTime={{.Date}} -X=main.Version={{.Version}}
archives:
- formats: [ 'tar.gz' ]
- format: tar.gz
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
{{ .ProjectName }}_v{{ .Version }}_
@@ -45,7 +43,7 @@ archives:
# use zip for windows archives
format_overrides:
- goos: windows
formats: [ 'zip' ]
format: zip
# Additional files/globs you want to add to the archive.
#
@@ -60,7 +58,7 @@ checksum:
name_template: 'checksums.txt'
snapshot:
version_template: "{{ incpatch .Version }}-{{.ShortCommit}}"
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
@@ -88,7 +86,7 @@ nfpms:
license: Apache 2.0
ids:
builds:
- versitygw
formats:

View File

@@ -72,11 +72,6 @@ dist:
rm -f VERSION
gzip -f $(TARFILE)
.PHONY: snapshot
snapshot:
# brew install goreleaser/tap/goreleaser
goreleaser release --snapshot --skip publish --clean
# Creates and runs S3 gateway instance in a docker container
.PHONY: up-posix
up-posix:
@@ -96,9 +91,3 @@ up-azurite:
.PHONY: up-app
up-app:
$(DOCKERCOMPOSE) up
# Run the host-style tests in docker containers
.PHONY: test-host-style
test-host-style:
docker compose -f tests/host-style-tests/docker-compose.yml up --build --abort-on-container-exit --exit-code-from test

View File

@@ -1,201 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"context"
"encoding/json"
"errors"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
)
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
// Verify destination bucket access
if err := VerifyAccess(ctx, be, opts); err != nil {
return err
}
// Verify source bucket access
srcBucket, srcObject, found := strings.Cut(copySource, "/")
if !found {
return s3err.GetAPIError(s3err.ErrInvalidCopySource)
}
// Get source bucket ACL
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
if err != nil {
return err
}
var srcBucketAcl ACL
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
return err
}
if err := VerifyAccess(ctx, be, AccessOptions{
Acl: srcBucketAcl,
AclPermission: PermissionRead,
IsRoot: opts.IsRoot,
Acc: opts.Acc,
Bucket: srcBucket,
Object: srcObject,
Action: GetObjectAction,
}); err != nil {
return err
}
return nil
}
type AccessOptions struct {
Acl ACL
AclPermission Permission
IsRoot bool
Acc Account
Bucket string
Object string
Action Action
Readonly bool
IsBucketPublic bool
}
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
// Skip the access check for public buckets
if opts.IsBucketPublic {
return nil
}
if opts.Readonly {
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
if policyErr != nil {
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
return policyErr
}
} else {
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
}
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
return err
}
return nil
}
// Detects if the action is policy related
// e.g.
// 'GetBucketPolicy', 'PutBucketPolicy'
func isPolicyAction(action Action) bool {
return action == GetBucketPolicyAction || action == PutBucketPolicyAction
}
// VerifyPublicAccess checks if the bucket is publically accessible by ACL or Policy
func VerifyPublicAccess(ctx context.Context, be backend.Backend, action Action, permission Permission, bucket, object string) error {
// ACL disabled
policy, err := be.GetBucketPolicy(ctx, bucket)
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
return err
}
if err == nil {
err = VerifyPublicBucketPolicy(policy, bucket, object, action)
if err == nil {
// if ACLs are disabled, and the bucket grants public access,
// policy actions should return 'MethodNotAllowed'
if isPolicyAction(action) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil
}
}
// if the action is not in the ACL whitelist the access is denied
_, ok := publicACLAllowedActions[action]
if !ok {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
err = VerifyPublicBucketACL(ctx, be, bucket, action, permission)
if err != nil {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
return nil
}
func MayCreateBucket(acct Account, isRoot bool) error {
if isRoot {
return nil
}
if acct.Role == RoleUser {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
return nil
}
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
// Owner check
if acct.Access == acl.Owner {
return nil
}
// Root user has access over almost everything
if isRoot {
return nil
}
// Admin user case
if acct.Role == RoleAdmin {
return nil
}
// Return access denied in all other cases
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
type PublicACLAllowedActions map[Action]struct{}
var publicACLAllowedActions PublicACLAllowedActions = PublicACLAllowedActions{
ListBucketAction: struct{}{},
PutObjectAction: struct{}{},
ListBucketMultipartUploadsAction: struct{}{},
DeleteObjectAction: struct{}{},
ListBucketVersionsAction: struct{}{},
GetObjectAction: struct{}{},
GetObjectAttributesAction: struct{}{},
GetObjectAclAction: struct{}{},
}

View File

@@ -33,17 +33,6 @@ type ACL struct {
Grantees []Grantee
}
// IsPublic specifies if the acl grants public read access
func (acl *ACL) IsPublic(permission Permission) bool {
for _, grt := range acl.Grantees {
if grt.Permission == permission && grt.Type == types.TypeGroup && grt.Access == "all-users" {
return true
}
}
return false
}
type Grantee struct {
Permission Permission
Access string
@@ -446,50 +435,118 @@ func verifyACL(acl ACL, access string, permission Permission) error {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
// Verifies if the bucket acl grants public access
func VerifyPublicBucketACL(ctx context.Context, be backend.Backend, bucket string, action Action, permission Permission) error {
aclBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{
Bucket: &bucket,
})
if err != nil {
return err
func MayCreateBucket(acct Account, isRoot bool) error {
if isRoot {
return nil
}
acl, err := ParseACL(aclBytes)
if err != nil {
return err
}
if !acl.IsPublic(permission) {
return ErrAccessDenied
if acct.Role == RoleUser {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
return nil
}
// UpdateBucketACLOwner sets default ACL with new owner and removes
// any previous bucket policy that was in place
func UpdateBucketACLOwner(ctx context.Context, be backend.Backend, bucket, newOwner string) error {
acl := ACL{
Owner: newOwner,
Grantees: []Grantee{
{
Permission: PermissionFullControl,
Access: newOwner,
Type: types.TypeCanonicalUser,
},
},
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
// Owner check
if acct.Access == acl.Owner {
return nil
}
result, err := json.Marshal(acl)
if err != nil {
return fmt.Errorf("marshal ACL: %w", err)
// Root user has access over almost everything
if isRoot {
return nil
}
err = be.PutBucketAcl(ctx, bucket, result)
// Admin user case
if acct.Role == RoleAdmin {
return nil
}
// Return access denied in all other cases
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
type AccessOptions struct {
Acl ACL
AclPermission Permission
IsRoot bool
Acc Account
Bucket string
Object string
Action Action
Readonly bool
}
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
if opts.Readonly {
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
if policyErr != nil {
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
return policyErr
}
} else {
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
}
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
return err
}
return nil
}
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
// Verify destination bucket access
if err := VerifyAccess(ctx, be, opts); err != nil {
return err
}
// Verify source bucket access
srcBucket, srcObject, found := strings.Cut(copySource, "/")
if !found {
return s3err.GetAPIError(s3err.ErrInvalidCopySource)
}
// Get source bucket ACL
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
if err != nil {
return err
}
return be.DeleteBucketPolicy(ctx, bucket)
var srcBucketAcl ACL
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
return err
}
if err := VerifyAccess(ctx, be, AccessOptions{
Acl: srcBucketAcl,
AclPermission: PermissionRead,
IsRoot: opts.IsRoot,
Acc: opts.Acc,
Bucket: srcBucket,
Object: srcObject,
Action: GetObjectAction,
}); err != nil {
return err
}
return nil
}

View File

@@ -22,8 +22,6 @@ import (
"github.com/versity/versitygw/s3err"
)
var ErrAccessDenied = errors.New("access denied")
type policyErr string
func (p policyErr) Error() string {
@@ -91,24 +89,6 @@ func (bp *BucketPolicy) isAllowed(principal string, action Action, resource stri
return isAllowed
}
// isPublic checks if the bucket policy statements contain
// an entity granting public access
func (bp *BucketPolicy) isPublic(resource string, action Action) bool {
var isAllowed bool
for _, statement := range bp.Statement {
if statement.isPublic(resource, action) {
switch statement.Effect {
case BucketPolicyAccessTypeAllow:
isAllowed = true
case BucketPolicyAccessTypeDeny:
return false
}
}
}
return isAllowed
}
type BucketPolicyItem struct {
Effect BucketPolicyAccessType `json:"Effect"`
Principals Principals `json:"Principal"`
@@ -154,11 +134,6 @@ func (bpi *BucketPolicyItem) findMatch(principal string, action Action, resource
return false
}
// isPublic checks if the bucket policy statemant grants public access
func (bpi *BucketPolicyItem) isPublic(resource string, action Action) bool {
return bpi.Principals.IsPublic() && bpi.Actions.FindMatch(action) && bpi.Resources.FindMatch(resource)
}
func getMalformedPolicyError(err error) error {
return s3err.APIError{
Code: "MalformedPolicy",
@@ -208,22 +183,3 @@ func VerifyBucketPolicy(policy []byte, access, bucket, object string, action Act
return nil
}
// Checks if the bucket policy grants public access
func VerifyPublicBucketPolicy(policy []byte, bucket, object string, action Action) error {
var bucketPolicy BucketPolicy
if err := json.Unmarshal(policy, &bucketPolicy); err != nil {
return err
}
resource := bucket
if object != "" {
resource += "/" + object
}
if !bucketPolicy.isPublic(resource, action) {
return ErrAccessDenied
}
return nil
}

View File

@@ -91,7 +91,6 @@ var supportedActionList = map[Action]struct{}{
DeleteObjectTaggingAction: {},
ListBucketVersionsAction: {},
ListBucketAction: {},
GetBucketObjectLockConfigurationAction: {},
PutBucketObjectLockConfigurationAction: {},
GetObjectLegalHoldAction: {},
PutObjectLegalHoldAction: {},

View File

@@ -121,10 +121,3 @@ func (p Principals) Contains(userAccess string) bool {
_, found := p[userAccess]
return found
}
// Bucket policy grants public access, if it contains
// a wildcard match to all the users
func (p Principals) IsPublic() bool {
_, ok := p["*"]
return ok
}

View File

@@ -18,8 +18,6 @@ import (
"errors"
"fmt"
"time"
"github.com/versity/versitygw/s3err"
)
type Role string
@@ -59,19 +57,10 @@ type ListUserAccountsResult struct {
// Mutable props, which could be changed when updating an IAM account
type MutableProps struct {
Secret *string `json:"secret"`
Role Role `json:"role"`
UserID *int `json:"userID"`
GroupID *int `json:"groupID"`
}
func (m MutableProps) Validate() error {
if m.Role != "" && !m.Role.IsValid() {
return s3err.GetAPIError(s3err.ErrAdminInvalidUserRole)
}
return nil
}
func updateAcc(acc *Account, props MutableProps) {
if props.Secret != nil {
acc.Secret = *props.Secret
@@ -82,9 +71,6 @@ func updateAcc(acc *Account, props MutableProps) {
if props.UserID != nil {
acc.UserID = *props.UserID
}
if props.Role != "" {
acc.Role = props.Role
}
}
// IAMService is the interface for all IAM service implementations
@@ -121,7 +107,6 @@ type Opts struct {
LDAPGroupIdAtr string
VaultEndpointURL string
VaultSecretStoragePath string
VaultAuthMethod string
VaultMountPath string
VaultRootToken string
VaultRoleId string
@@ -167,7 +152,7 @@ func New(o *Opts) (IAMService, error) {
o.S3Endpoint, o.S3Bucket)
case o.VaultEndpointURL != "":
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultSecretStoragePath,
o.VaultAuthMethod, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
case o.IpaHost != "":

View File

@@ -290,49 +290,93 @@ func (s *IAMServiceInternal) readIAMData() ([]byte, error) {
func (s *IAMServiceInternal) storeIAM(update UpdateAcctFunc) error {
// We are going to be racing with other running gateways without any
// coordination. So the strategy here is to read the current file data,
// update the data, write back out to a temp file, then rename the
// temp file to the original file. This rename will replace the
// original file with the new file. This is atomic and should always
// allow for a consistent view of the data. There is a small
// window where the file could be read and then updated by
// another process. In this case any updates the other process did
// will be lost. This is a limitation of the internal IAM service.
// This should be rare, and even when it does happen should result
// in a valid IAM file, just without the other process's updates.
// coordination. So the strategy here is to read the current file data.
// If the file doesn't exist, then we assume someone else is currently
// updating the file. So we just need to keep retrying. We also need
// to make sure the data is consistent within a single update. So racing
// writes to a file would possibly leave this in some invalid state.
// We can get atomic updates with rename. If we read the data, update
// the data, write to a temp file, then rename the tempfile back to the
// data file. This should always result in a complete data image.
iamFname := filepath.Join(s.dir, iamFile)
backupFname := filepath.Join(s.dir, iamBackupFile)
// There is at least one unsolved failure mode here.
// If a gateway removes the data file and then crashes, all other
// gateways will retry forever thinking that the original will eventually
// write the file.
b, err := os.ReadFile(iamFname)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("read iam file: %w", err)
}
retries := 0
fname := filepath.Join(s.dir, iamFile)
// save copy of data
datacopy := make([]byte, len(b))
copy(datacopy, b)
for {
b, err := os.ReadFile(fname)
if errors.Is(err, fs.ErrNotExist) {
// racing with someone else updating
// keep retrying after backoff
retries++
if retries < maxretry {
time.Sleep(backoff)
continue
}
// make a backup copy in case something happens
err = s.writeUsingTempFile(b, backupFname)
if err != nil {
return fmt.Errorf("write backup iam file: %w", err)
}
// we have been unsuccessful trying to read the iam file
// so this must be the case where something happened and
// the file did not get updated successfully, and probably
// isn't going to be. The recovery procedure would be to
// copy the backup file into place of the original.
return fmt.Errorf("no iam file, needs backup recovery")
}
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("read iam file: %w", err)
}
b, err = update(b)
if err != nil {
return fmt.Errorf("update iam data: %w", err)
}
// reset retries on successful read
retries = 0
err = s.writeUsingTempFile(b, iamFname)
if err != nil {
return fmt.Errorf("write iam file: %w", err)
err = os.Remove(fname)
if errors.Is(err, fs.ErrNotExist) {
// racing with someone else updating
// keep retrying after backoff
time.Sleep(backoff)
continue
}
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove old iam file: %w", err)
}
// save copy of data
datacopy := make([]byte, len(b))
copy(datacopy, b)
// make a backup copy in case we crash before update
// this is after remove, so there is a small window something
// can go wrong, but the remove should barrier other gateways
// from trying to write backup at the same time. Only one
// gateway will successfully remove the file.
os.WriteFile(filepath.Join(s.dir, iamBackupFile), b, iamMode)
b, err = update(b)
if err != nil {
// update failed, try to write old data back out
os.WriteFile(fname, datacopy, iamMode)
return fmt.Errorf("update iam data: %w", err)
}
err = s.writeTempFile(b)
if err != nil {
// update failed, try to write old data back out
os.WriteFile(fname, datacopy, iamMode)
return err
}
break
}
return nil
}
func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
func (s *IAMServiceInternal) writeTempFile(b []byte) error {
fname := filepath.Join(s.dir, iamFile)
f, err := os.CreateTemp(s.dir, iamFile)
if err != nil {
return fmt.Errorf("create temp file: %w", err)
@@ -340,7 +384,6 @@ func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
defer os.Remove(f.Name())
_, err = f.Write(b)
f.Close()
if err != nil {
return fmt.Errorf("write temp file: %w", err)
}

View File

@@ -27,15 +27,11 @@ import (
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"slices"
"strconv"
"strings"
"syscall"
"time"
)
const IpaVersion = "2.254"
@@ -56,6 +52,7 @@ type IpaIAMService struct {
var _ IAMService = &IpaIAMService{}
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure, debug bool) (*IpaIAMService, error) {
ipa := IpaIAMService{
id: 0,
version: IpaVersion,
@@ -75,7 +72,6 @@ func NewIpaIAMService(rootAcc Account, host, vaultName, username, password strin
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
tr := &http.Transport{
TLSClientConfig: mTLSConfig,
Proxy: http.ProxyFromEnvironment,
}
ipa.client = http.Client{Jar: jar, Transport: tr}
@@ -106,7 +102,13 @@ func NewIpaIAMService(rootAcc Account, host, vaultName, username, password strin
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
isSupported := slices.Contains(vaultConfig.Wrapping_supported_algorithms, "aes-128-cbc")
isSupported := false
for _, algo := range vaultConfig.Wrapping_supported_algorithms {
if algo == "aes-128-cbc" {
isSupported = true
break
}
}
if !isSupported {
return nil,
@@ -224,8 +226,6 @@ func (ipa *IpaIAMService) Shutdown() error {
// Implementation
const requestRetries = 3
func (ipa *IpaIAMService) login() error {
form := url.Values{}
form.Set("user", ipa.username)
@@ -242,33 +242,17 @@ func (ipa *IpaIAMService) login() error {
req.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
var resp *http.Response
for i := range requestRetries {
resp, err = ipa.client.Do(req)
if err == nil {
break
}
// Check for transient network errors
if isRetryable(err) {
time.Sleep(time.Second * time.Duration(i+1))
continue
}
return fmt.Errorf("login POST to %s failed: %w", req.URL, err)
}
resp, err := ipa.client.Do(req)
if err != nil {
return fmt.Errorf("login POST to %s failed after retries: %w",
req.URL, err)
return err
}
defer resp.Body.Close()
if resp.StatusCode == 401 {
return errors.New("cannot login to FreeIPA: invalid credentials")
}
if resp.StatusCode != 200 {
return fmt.Errorf("cannot login to FreeIPA: status code %d",
resp.StatusCode)
return fmt.Errorf("cannot login to FreeIPA: status code %d", resp.StatusCode)
}
return nil
@@ -315,28 +299,11 @@ func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
httpReq.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
httpReq.Header.Set("Content-Type", "application/json")
var httpResp *http.Response
for i := range requestRetries {
httpResp, err = ipa.client.Do(httpReq)
if err == nil {
break
}
// Check for transient network errors
if isRetryable(err) {
time.Sleep(time.Second * time.Duration(i+1))
continue
}
return rpcResponse{}, fmt.Errorf("ipa request to %s failed: %w",
httpReq.URL, err)
}
httpResp, err := ipa.client.Do(httpReq)
if err != nil {
return rpcResponse{},
fmt.Errorf("ipa request to %s failed after retries: %w",
httpReq.URL, err)
return rpcResponse{}, err
}
defer httpResp.Body.Close()
bytes, err := io.ReadAll(httpResp.Body)
ipa.log(string(bytes))
if err != nil {
@@ -371,30 +338,6 @@ func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
}, nil
}
func isRetryable(err error) bool {
if err == nil {
return false
}
if errors.Is(err, io.EOF) {
return true
}
if err, ok := err.(net.Error); ok && err.Timeout() {
return true
}
if opErr, ok := err.(*net.OpError); ok {
if sysErr, ok := opErr.Err.(*syscall.Errno); ok {
if *sysErr == syscall.ECONNRESET {
return true
}
}
}
return false
}
func (ipa *IpaIAMService) newRequest(method string, args []string, dict map[string]any) (rpcRequest, error) {
id := ipa.id

View File

@@ -139,9 +139,6 @@ func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) e
if props.UserID != nil {
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
}
if props.Role != "" {
req.Replace(ld.roleAtr, []string{string(props.Role)})
}
err := ld.conn.Modify(req)
//TODO: Handle non existing user case

View File

@@ -19,7 +19,6 @@ import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
@@ -27,25 +26,20 @@ import (
"github.com/hashicorp/vault-client-go/schema"
)
const requestTimeout = 10 * time.Second
type VaultIAMService struct {
client *vault.Client
authReqOpts []vault.RequestOption
kvReqOpts []vault.RequestOption
reqOpts []vault.RequestOption
secretStoragePath string
rootAcc Account
creds schema.AppRoleLoginRequest
}
var _ IAMService = &VaultIAMService{}
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
authMethod, mountPath, rootToken, roleID, roleSecret, serverCert,
clientCert, clientCertKey string) (IAMService, error) {
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath, rootToken, roleID, roleSecret, serverCert, clientCert, clientCertKey string) (IAMService, error) {
opts := []vault.ClientOption{
vault.WithAddress(endpoint),
vault.WithRequestTimeout(requestTimeout),
// set request timeout to 10 secs
vault.WithRequestTimeout(10 * time.Second),
}
if serverCert != "" {
tls := vault.TLSConfiguration{}
@@ -68,21 +62,10 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
return nil, fmt.Errorf("init vault client: %w", err)
}
authReqOpts := []vault.RequestOption{}
// if auth method path is not specified, it defaults to "approle"
if authMethod != "" {
authReqOpts = append(authReqOpts, vault.WithMountPath(authMethod))
}
kvReqOpts := []vault.RequestOption{}
// if mount path is not specified, it defaults to "kv-v2"
reqOpts := []vault.RequestOption{}
// if mount path is not specified, it defaults to "approle"
if mountPath != "" {
kvReqOpts = append(kvReqOpts, vault.WithMountPath(mountPath))
}
creds := schema.AppRoleLoginRequest{
RoleId: roleID,
SecretId: roleSecret,
reqOpts = append(reqOpts, vault.WithMountPath(mountPath))
}
// Authentication
@@ -97,8 +80,12 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
return nil, fmt.Errorf("role id and role secret must both be specified")
}
resp, err := client.Auth.AppRoleLogin(context.Background(),
creds, authReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := client.Auth.AppRoleLogin(ctx, schema.AppRoleLoginRequest{
RoleId: roleID,
SecretId: roleSecret,
}, reqOpts...)
cancel()
if err != nil {
return nil, fmt.Errorf("approle authentication failure: %w", err)
}
@@ -112,77 +99,33 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
return &VaultIAMService{
client: client,
authReqOpts: authReqOpts,
kvReqOpts: kvReqOpts,
reqOpts: reqOpts,
secretStoragePath: secretStoragePath,
rootAcc: rootAcc,
creds: creds,
}, nil
}
func (vt *VaultIAMService) reAuthIfNeeded(err error) error {
if err == nil {
return nil
}
// Vault returns 403 for expired/revoked tokens
// pass all other errors back unchanged
if !vault.IsErrorStatus(err, http.StatusForbidden) {
return err
}
resp, authErr := vt.client.Auth.AppRoleLogin(context.Background(),
vt.creds, vt.authReqOpts...)
if authErr != nil {
return fmt.Errorf("vault re-authentication failure: %w", authErr)
}
if err := vt.client.SetToken(resp.Auth.ClientToken); err != nil {
return fmt.Errorf("vault re-authentication set token failure: %w", err)
}
return nil
}
func (vt *VaultIAMService) CreateAccount(account Account) error {
if vt.rootAcc.Access == account.Access {
return ErrUserExists
}
_, err := vt.client.Secrets.KvV2Write(context.Background(),
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
Data: map[string]any{
account.Access: account,
},
Options: map[string]any{
"cas": 0,
},
}, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err := vt.client.Secrets.KvV2Write(ctx, vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
Data: map[string]any{
account.Access: account,
},
Options: map[string]interface{}{
"cas": 0,
},
}, vt.reqOpts...)
cancel()
if err != nil {
if strings.Contains(err.Error(), "check-and-set") {
return ErrUserExists
}
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
return reauthErr
}
// retry once after re-auth
_, err = vt.client.Secrets.KvV2Write(context.Background(),
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
Data: map[string]any{
account.Access: account,
},
Options: map[string]any{
"cas": 0,
},
}, vt.kvReqOpts...)
if err != nil {
if strings.Contains(err.Error(), "check-and-set") {
return ErrUserExists
}
return err
}
return nil
return err
}
return nil
}
@@ -190,84 +133,66 @@ func (vt *VaultIAMService) GetUserAccount(access string) (Account, error) {
if vt.rootAcc.Access == access {
return vt.rootAcc, nil
}
resp, err := vt.client.Secrets.KvV2Read(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := vt.client.Secrets.KvV2Read(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
cancel()
if err != nil {
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
return Account{}, reauthErr
}
// retry once after re-auth
resp, err = vt.client.Secrets.KvV2Read(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
if err != nil {
return Account{}, err
}
return Account{}, err
}
acc, err := parseVaultUserAccount(resp.Data.Data, access)
if err != nil {
return Account{}, err
}
return acc, nil
}
func (vt *VaultIAMService) UpdateUserAccount(access string, props MutableProps) error {
//TODO: We need something like a transaction here ?
acc, err := vt.GetUserAccount(access)
if err != nil {
return err
}
updateAcc(&acc, props)
err = vt.DeleteUserAccount(access)
if err != nil {
return err
}
err = vt.CreateAccount(acc)
if err != nil {
return err
}
return nil
}
func (vt *VaultIAMService) DeleteUserAccount(access string) error {
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
cancel()
if err != nil {
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
return reauthErr
}
// retry once after re-auth
_, err = vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
if err != nil {
return err
}
return err
}
return nil
}
func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
resp, err := vt.client.Secrets.KvV2List(context.Background(),
vt.secretStoragePath, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := vt.client.Secrets.KvV2List(ctx, vt.secretStoragePath, vt.reqOpts...)
cancel()
if err != nil {
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
if vault.IsErrorStatus(err, http.StatusNotFound) {
return []Account{}, nil
}
return nil, reauthErr
}
// retry once after re-auth
resp, err = vt.client.Secrets.KvV2List(context.Background(),
vt.secretStoragePath, vt.kvReqOpts...)
if err != nil {
if vault.IsErrorStatus(err, http.StatusNotFound) {
return []Account{}, nil
}
return nil, err
if vault.IsErrorStatus(err, 404) {
return []Account{}, nil
}
return nil, err
}
accs := []Account{}
for _, acss := range resp.Data.Keys {
acc, err := vt.GetUserAccount(acss)
if err != nil {
@@ -275,6 +200,7 @@ func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
}
accs = append(accs, acc)
}
return accs, nil
}
@@ -285,8 +211,8 @@ func (vt *VaultIAMService) Shutdown() error {
var errInvalidUser error = errors.New("invalid user account entry in secrets engine")
func parseVaultUserAccount(data map[string]any, access string) (acc Account, err error) {
usrAcc, ok := data[access].(map[string]any)
func parseVaultUserAccount(data map[string]interface{}, access string) (acc Account, err error) {
usrAcc, ok := data[access].(map[string]interface{})
if !ok {
return acc, errInvalidUser
}

View File

@@ -136,7 +136,7 @@ func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResu
}
}
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend) error {
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass bool, be backend.Backend) error {
data, err := be.GetObjectLockConfiguration(ctx, bucket)
if err != nil {
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
@@ -211,11 +211,7 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
if err != nil {
return err
}
if isBucketPublic {
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
} else {
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
}
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
if err != nil {
return s3err.GetAPIError(s3err.ErrObjectLocked)
}
@@ -258,11 +254,7 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
if err != nil {
return err
}
if isBucketPublic {
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
} else {
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
}
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
if err != nil {
return s3err.GetAPIError(s3err.ErrObjectLocked)
}

View File

@@ -181,9 +181,11 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
return err
}
acl, err := auth.ParseACL(aclBytes)
if err != nil {
return err
var acl auth.ACL
if len(aclBytes) > 0 {
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return fmt.Errorf("unmarshal acl: %w", err)
}
}
if acl.Owner == acct.Access {
@@ -293,7 +295,7 @@ func (az *Azure) DeleteBucketOwnershipControls(ctx context.Context, bucket strin
}
func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
tags, err := backend.ParseObjectTags(getString(po.Tagging))
tags, err := parseTags(po.Tagging)
if err != nil {
return s3response.PutObjectOutput{}, err
}
@@ -416,7 +418,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
var opts *azblob.DownloadStreamOptions
if *input.Range != "" {
offset, count, isValid, err := backend.ParseObjectRange(*resp.ContentLength, *input.Range)
offset, count, isValid, err := backend.ParseGetObjectRange(*resp.ContentLength, *input.Range)
if err != nil {
return nil, err
}
@@ -505,26 +507,10 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
if err != nil {
return nil, azureErrToS3Err(err)
}
var size int64
if resp.ContentLength != nil {
size = *resp.ContentLength
}
startOffset, length, isValid, err := backend.ParseObjectRange(size, getString(input.Range))
if err != nil {
return nil, err
}
var contentRange string
if isValid {
contentRange = fmt.Sprintf("bytes %v-%v/%v",
startOffset, startOffset+length-1, size)
}
result := &s3.HeadObjectOutput{
ContentRange: &contentRange,
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentLength: &length,
AcceptRanges: resp.AcceptRanges,
ContentLength: resp.ContentLength,
ContentType: resp.ContentType,
ContentEncoding: resp.ContentEncoding,
ContentLanguage: resp.ContentLanguage,
@@ -605,9 +591,9 @@ func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s
return s3response.ListObjectsResult{}, azureErrToS3Err(err)
}
acl, err := auth.ParseACL(aclBytes)
if err != nil {
return s3response.ListObjectsResult{}, err
var acl auth.ACL
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return s3response.ListObjectsResult{}, fmt.Errorf("unmarshal acl: %w", err)
}
Pager:
@@ -708,9 +694,8 @@ func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input
return s3response.ListObjectsV2Result{}, azureErrToS3Err(err)
}
acl, err = auth.ParseACL(aclBytes)
if err != nil {
return s3response.ListObjectsV2Result{}, err
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return s3response.ListObjectsV2Result{}, fmt.Errorf("unmarshal acl: %w", err)
}
}
@@ -822,14 +807,14 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput
}, nil
}
func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
dstClient, err := az.getBlobClient(*input.Bucket, *input.Key)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource {
if input.MetadataDirective != types.MetadataDirectiveReplace {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
}
// Set object meta http headers
@@ -841,7 +826,7 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
BlobContentType: input.ContentType,
}, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
meta := input.Metadata
@@ -856,14 +841,14 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
// Set object metadata
_, err = dstClient.SetMetadata(ctx, parseMetadata(meta), nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
// Set object legal hold
if input.ObjectLockLegalHoldStatus != "" {
err = az.PutObjectLegalHold(ctx, *input.Bucket, *input.Key, "", input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
// Set object retention
@@ -877,28 +862,28 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
retParsed, err := json.Marshal(retention)
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("parse object retention: %w", err)
return nil, fmt.Errorf("parse object retention: %w", err)
}
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", true, retParsed)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
// Set object Tagging, if tagging directive is "REPLACE"
if input.TaggingDirective == types.TaggingDirectiveReplace {
tags, err := backend.ParseObjectTags(getString(input.Tagging))
tags, err := parseTags(input.Tagging)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
_, err = dstClient.SetTags(ctx, tags, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
LastModified: res.LastModified,
ETag: (*string)(res.ETag),
},
@@ -907,13 +892,13 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
srcBucket, srcObj, _, err := backend.ParseCopySource(*input.CopySource)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
// Get the source object
downloadResp, err := az.client.DownloadStream(ctx, srcBucket, srcObj, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
pInput := s3response.PutObjectInput{
@@ -951,28 +936,28 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
// Create the destination object
resp, err := az.PutObject(ctx, pInput)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
// Copy the object tagging, if tagging directive is "COPY"
if input.TaggingDirective == types.TaggingDirectiveCopy {
srcClient, err := az.getBlobClient(srcBucket, srcObj)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
res, err := srcClient.GetTags(ctx, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
_, err = dstClient.SetTags(ctx, parseAzTags(res.BlobTagSet), nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
ETag: &resp.ETag,
},
}, nil
@@ -1049,9 +1034,20 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
}
// parse object tags
tags, err := backend.ParseObjectTags(getString(input.Tagging))
if err != nil {
return s3response.InitiateMultipartUploadResult{}, err
tagsStr := getString(input.Tagging)
tags := map[string]string{}
if tagsStr != "" {
tagParts := strings.Split(tagsStr, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
}
if len(p[0]) > 128 || len(p[1]) > 256 {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
}
tags[p[0]] = p[1]
}
}
// set blob legal hold status in metadata
@@ -1091,7 +1087,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
// Create and empty blob in .sgwtmp/multipart/<uploadId>/<object hash>
// The blob indicates multipart upload initialization and holds the mp metadata
// e.g tagging, content-type, metadata, object lock status ...
_, err = az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts)
_, err := az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts)
if err != nil {
return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err)
}
@@ -1365,44 +1361,42 @@ func (az *Azure) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultip
// Copeies the multipart metadata from .sgwtmp namespace into the newly created blob
// Deletes the multipart upload 'blob' from .sgwtmp namespace
// It indicates the end of the multipart upload
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
var res s3response.CompleteMultipartUploadResult
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
tmpPath := createMetaTmpPath(*input.Key, *input.UploadId)
blobClient, err := az.getBlobClient(*input.Bucket, tmpPath)
if err != nil {
return res, "", err
return nil, err
}
props, err := blobClient.GetProperties(ctx, nil)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
tags, err := blobClient.GetTags(ctx, nil)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
if err != nil {
return res, "", err
return nil, err
}
blockIds := []string{}
blockList, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
if err != nil {
return res, "", azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
if len(blockList.UncommittedBlocks) != len(input.MultipartUpload.Parts) {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
uncommittedBlocks := map[int32]*blockblob.Block{}
for _, el := range blockList.UncommittedBlocks {
ptNumber, err := decodeBlockId(backend.GetStringFromPtr(el.Name))
if err != nil {
return res, "", fmt.Errorf("invalid block name: %w", err)
return nil, fmt.Errorf("invalid block name: %w", err)
}
uncommittedBlocks[int32(ptNumber)] = el
@@ -1414,35 +1408,35 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
last := len(blockList.UncommittedBlocks) - 1
for i, part := range input.MultipartUpload.Parts {
if part.PartNumber == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPartOrder)
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
block, ok := uncommittedBlocks[*part.PartNumber]
if !ok {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.ETag != *block.Name {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
// all parts except the last need to be greater, than
// the minimum allowed size (5 Mib)
if i < last && *block.Size < backend.MinPartSize {
return res, "", s3err.GetAPIError(s3err.ErrEntityTooSmall)
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
totalSize += *block.Size
blockIds = append(blockIds, *block.Name)
}
if input.MpuObjectSize != nil && totalSize != *input.MpuObjectSize {
return res, "", s3err.GetIncorrectMpObjectSizeErr(totalSize, *input.MpuObjectSize)
return nil, s3err.GetIncorrectMpObjectSizeErr(totalSize, *input.MpuObjectSize)
}
opts := &blockblob.CommitBlockListOptions{
@@ -1459,20 +1453,20 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
resp, err := client.CommitBlockList(ctx, blockIds, opts)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
// cleanup the multipart upload
_, err = blobClient.Delete(ctx, nil)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
return s3response.CompleteMultipartUploadResult{
return &s3.CompleteMultipartUploadOutput{
Bucket: input.Bucket,
Key: input.Key,
ETag: (*string)(resp.ETag),
}, "", nil
}, nil
}
func (az *Azure) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
@@ -1683,8 +1677,8 @@ func (az *Azure) GetObjectLegalHold(ctx context.Context, bucket, object, version
return &status, nil
}
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket, owner string) error {
return auth.UpdateBucketACLOwner(ctx, az, bucket, owner)
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
return az.PutBucketAcl(ctx, bucket, acl)
}
// The action actually returns the containers owned by the user, who initialized the gateway
@@ -1824,6 +1818,24 @@ func parseAzMetadata(m map[string]*string) map[string]string {
return meta
}
func parseTags(tagstr *string) (map[string]string, error) {
tagsStr := getString(tagstr)
tags := make(map[string]string)
if tagsStr != "" {
tagParts := strings.Split(tagsStr, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
tags[p[0]] = p[1]
}
}
return tags, nil
}
func parseAzTags(tagSet []*blob.Tags) map[string]string {
tags := map[string]string{}
for _, tag := range tagSet {
@@ -1964,9 +1976,11 @@ func (az *Azure) deleteContainerMetaData(ctx context.Context, bucket, key string
}
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
var acl auth.ACL
data, ok := meta[string(key)]
if !ok {
return &auth.ACL{}, nil
return &acl, nil
}
value, err := decodeString(*data)
@@ -1974,9 +1988,13 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
return nil, err
}
acl, err := auth.ParseACL(value)
if len(value) == 0 {
return &acl, nil
}
err = json.Unmarshal(value, &acl)
if err != nil {
return nil, err
return nil, fmt.Errorf("unmarshal acl: %w", err)
}
return &acl, nil

View File

@@ -40,7 +40,7 @@ func azErrToS3err(azErr *azcore.ResponseError) s3err.APIError {
case "BlobNotFound":
return s3err.GetAPIError(s3err.ErrNoSuchKey)
case "TagsTooLarge":
return s3err.GetAPIError(s3err.ErrInvalidTagValue)
return s3err.GetAPIError(s3err.ErrInvalidTag)
case "Requested Range Not Satisfiable":
return s3err.GetAPIError(s3err.ErrInvalidRange)
}

View File

@@ -52,7 +52,7 @@ type Backend interface {
// multipart operations
CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (_ s3response.CompleteMultipartUploadResult, versionid string, _ error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
@@ -65,7 +65,7 @@ type Backend interface {
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
@@ -96,10 +96,14 @@ type Backend interface {
GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error)
// non AWS actions
ChangeBucketOwner(_ context.Context, bucket, owner string) error
ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
}
// InterfaceVersion tracks changes to the Backend interface for plugins.
// Increment this when the Backend interface changes.
const InterfaceVersion = 1
type BackendUnsupported struct{}
var _ Backend = &BackendUnsupported{}
@@ -166,8 +170,8 @@ func (BackendUnsupported) DeleteBucketCors(_ context.Context, bucket string) err
func (BackendUnsupported) CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
return s3response.CompleteMultipartUploadResult{}, "", s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
@@ -200,8 +204,8 @@ func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
@@ -280,7 +284,7 @@ func (BackendUnsupported) GetObjectLegalHold(_ context.Context, bucket, object,
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, owner string) error {
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error) {

View File

@@ -17,17 +17,11 @@ package backend
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"io/fs"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
@@ -89,11 +83,11 @@ var (
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
)
// ParseObjectRange parses input range header and returns startoffset, length, isValid
// ParseGetObjectRange parses input range header and returns startoffset, length, isValid
// and error. If no endoffset specified, then length is set to the object size
// for invalid inputs, it returns no error, but isValid=false
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
func ParseGetObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
if acceptRange == "" {
return 0, size, false, nil
}
@@ -114,17 +108,15 @@ func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil && bRange[0] != "" {
if err != nil {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if bRange[1] == "" {
if bRange[0] == "" {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
return startOffset, size - startOffset, true, nil
}
@@ -133,22 +125,12 @@ func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error
return 0, size, false, nil
}
if startOffset > endOffset {
if endOffset < startOffset {
return 0, size, false, nil
}
// for ranges like 'bytes=-100' return the last bytes specified with 'endOffset'
if bRange[0] == "" {
endOffset = min(endOffset, size)
return size - endOffset, endOffset, true, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if endOffset >= size {
endOffset = size - 1
return startOffset, size - startOffset, true, nil
}
return startOffset, endOffset - startOffset + 1, true, nil
@@ -230,81 +212,27 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
}
// ParseObjectTags parses the url encoded input string into
// map[string]string with unescaped key/value pair
func ParseObjectTags(tagging string) (map[string]string, error) {
if tagging == "" {
// map[string]string key-value tag set
func ParseObjectTags(t string) (map[string]string, error) {
if t == "" {
return nil, nil
}
tagSet := make(map[string]string)
tagging := make(map[string]string)
for tagging != "" {
var tag string
tag, tagging, _ = strings.Cut(tagging, "&")
// if 'tag' before the first appearance of '&' is empty continue
if tag == "" {
continue
tagParts := strings.Split(t, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
key, value, found := strings.Cut(tag, "=")
// if key is empty, but "=" is present, return invalid url ecnoding err
if found && key == "" {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
if len(p[0]) > 128 || len(p[1]) > 256 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
// return invalid tag key, if the key is longer than 128
if len(key) > 128 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// return invalid tag value, if tag value is longer than 256
if len(value) > 256 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// query unescape tag key
key, err := url.QueryUnescape(key)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
// query unescape tag value
value, err = url.QueryUnescape(value)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
// check tag key to be valid
if !isValidTagComponent(key) {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// check tag value to be valid
if !isValidTagComponent(value) {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// duplicate keys are not allowed: return invalid url encoding err
_, ok := tagSet[key]
if ok {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
tagSet[key] = value
tagging[p[0]] = p[1]
}
return tagSet, nil
}
var validTagComponent = regexp.MustCompile(`^[a-zA-Z0-9:/_.\-+ ]+$`)
// isValidTagComponent matches strings which contain letters, decimal digits,
// and special chars: '/', '_', '-', '+', '.', ' ' (space)
func isValidTagComponent(str string) bool {
if str == "" {
return true
}
return validTagComponent.Match([]byte(str))
return tagging, nil
}
func GetMultipartMD5(parts []types.CompletedPart) string {
@@ -341,65 +269,3 @@ func (f *FileSectionReadCloser) Read(p []byte) (int, error) {
func (f *FileSectionReadCloser) Close() error {
return f.F.Close()
}
// MoveFile moves a file from source to destination.
func MoveFile(source, destination string, perm os.FileMode) error {
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
err := os.Rename(source, destination)
if err == nil || !errors.Is(err, syscall.EXDEV) {
return err
}
// Rename can fail if the source and destination are not on the same
// filesystem. The fallback is to copy the file and then remove the source.
// We need to be careful that the desination does not exist before copying
// to prevent any other simultaneous writes to the file.
sourceFile, err := os.Open(source)
if err != nil {
return fmt.Errorf("open source: %w", err)
}
defer sourceFile.Close()
var destFile *os.File
for {
destFile, err = os.OpenFile(destination, os.O_CREATE|os.O_EXCL|os.O_WRONLY, perm)
if err != nil {
if errors.Is(err, fs.ErrExist) {
if removeErr := os.Remove(destination); removeErr != nil {
return fmt.Errorf("remove existing destination: %w", removeErr)
}
continue
}
return fmt.Errorf("create destination: %w", err)
}
break
}
defer destFile.Close()
_, err = io.Copy(destFile, sourceFile)
if err != nil {
return fmt.Errorf("copy data: %w", err)
}
err = os.Remove(source)
if err != nil {
return fmt.Errorf("remove source: %w", err)
}
return nil
}
// GenerateEtag generates a new quoted etag from the provided hash.Hash
func GenerateEtag(h hash.Hash) string {
dataSum := h.Sum(nil)
return fmt.Sprintf("\"%s\"", hex.EncodeToString(dataSum[:]))
}
// AreEtagsSame compares 2 etags by ignoring quotes
func AreEtagsSame(e1, e2 string) bool {
return strings.Trim(e1, `"`) == strings.Trim(e2, `"`)
}

516
backend/plugin/plugin.go Normal file
View File

@@ -0,0 +1,516 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package vgwplugin
import (
"bufio"
"context"
"fmt"
"plugin"
"reflect"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
// The plugin backend is used to dynamically load a Go plugin at runtime.
// It loads the plugin and calls the InitPlugin function to initialize it.
// A config string option is passed to init the plugin, it is expected that the
// plugin will handle its own configuration and initialization from this.
// If the plugin cannot be loaded or initialized, it returns an error.
// The InitPlugin function should be defined in the plugin and should have
// the signature func(configfile string) (version int, err error).
// The plugin should also implement the backend.Backend interface functions.
// However, the plugin does not need to implement all functions of the
// backend.Backend interface. It can implement only the functions it needs.
// Any non-implemented functions will return an error indicating that
// the function is not implemented.
// The plugin file should be compiled with the same Go version as the
// application using it. The plugin file should be built with the
// -buildmode=plugin flag.
// Example: go build -buildmode=plugin -o myplugin.so myplugin.go
// See the following for caveats and details:
// https://pkg.go.dev/plugin#hdr-Warnings
// PluginBackend implements the backend.Backend interface using Go plugins.
type PluginBackend struct {
p *plugin.Plugin
}
// NewPluginBackend creates a new PluginBackend. The path parameter should
// point to the compiled plugin file (e.g., .so file).
func NewPluginBackend(path, config string) (*PluginBackend, error) {
p, err := plugin.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open plugin: %w", err)
}
initSymbol, err := p.Lookup("InitPlugin")
if err != nil {
return nil, fmt.Errorf("failed to lookup InitPlugin symbol: %w", err)
}
initFunc, ok := initSymbol.(func(string) (int, error))
if !ok {
return nil, fmt.Errorf("InitPlugin symbol is not a func() (int, error)")
}
version, err := initFunc(config)
if err != nil {
return nil, fmt.Errorf("InitPlugin failed: %w", err)
}
if version != backend.InterfaceVersion {
return nil, fmt.Errorf("plugin interface version mismatch: gateway %v, plugin %v",
backend.InterfaceVersion, version)
}
return &PluginBackend{p: p}, nil
}
func (p *PluginBackend) callPluginFunc(name string, args []any) ([]reflect.Value, error) {
symbol, err := p.p.Lookup(name)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
symbolValue := reflect.ValueOf(symbol)
if symbolValue.Kind() != reflect.Func {
return nil, fmt.Errorf("symbol %s is not a function", name)
}
numIn := symbolValue.Type().NumIn()
if len(args) != numIn {
return nil, fmt.Errorf("incorrect number of arguments for function %s, expected %d, got %d", name, numIn, len(args))
}
in := make([]reflect.Value, len(args))
for i := range args {
in[i] = reflect.ValueOf(args[i])
}
return symbolValue.Call(in), nil
}
func (p *PluginBackend) String() string { return "Plugin Gateway" }
func (p *PluginBackend) Shutdown() {}
func (p *PluginBackend) ListBuckets(ctx context.Context, input s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
results, err := p.callPluginFunc("ListBuckets", []any{ctx, input})
if err != nil {
return s3response.ListAllMyBucketsResult{}, err
}
return results[0].Interface().(s3response.ListAllMyBucketsResult), convertError(results[1])
}
func (p *PluginBackend) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
results, err := p.callPluginFunc("HeadBucket", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.HeadBucketOutput), convertError(results[1])
}
func (p *PluginBackend) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketAcl", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, defaultACL []byte) error {
_, err := p.callPluginFunc("CreateBucket", []any{ctx, input, defaultACL})
return err
}
func (p *PluginBackend) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
_, err := p.callPluginFunc("PutBucketAcl", []any{ctx, bucket, data})
return err
}
func (p *PluginBackend) DeleteBucket(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucket", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketVersioning(ctx context.Context, bucket string, status types.BucketVersioningStatus) error {
_, err := p.callPluginFunc("PutBucketVersioning", []any{ctx, bucket, status})
return err
}
func (p *PluginBackend) GetBucketVersioning(ctx context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
results, err := p.callPluginFunc("GetBucketVersioning", []any{ctx, bucket})
if err != nil {
return s3response.GetBucketVersioningOutput{}, err
}
return results[0].Interface().(s3response.GetBucketVersioningOutput), convertError(results[1])
}
func (p *PluginBackend) PutBucketPolicy(ctx context.Context, bucket string, policy []byte) error {
_, err := p.callPluginFunc("PutBucketPolicy", []any{ctx, bucket, policy})
return err
}
func (p *PluginBackend) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketPolicy", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketPolicy(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketPolicy", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketOwnershipControls(ctx context.Context, bucket string, ownership types.ObjectOwnership) error {
_, err := p.callPluginFunc("PutBucketOwnershipControls", []any{ctx, bucket, ownership})
return err
}
func (p *PluginBackend) GetBucketOwnershipControls(ctx context.Context, bucket string) (types.ObjectOwnership, error) {
results, err := p.callPluginFunc("GetBucketOwnershipControls", []any{ctx, bucket})
if err != nil {
return "", err
}
return results[0].Interface().(types.ObjectOwnership), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketOwnershipControls(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketOwnershipControls", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketCors(ctx context.Context, data []byte) error {
_, err := p.callPluginFunc("PutBucketCors", []any{ctx, data})
return err
}
func (p *PluginBackend) GetBucketCors(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketCors", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketCors(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketCors", []any{ctx, bucket})
return err
}
func (p *PluginBackend) CreateMultipartUpload(ctx context.Context, input s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
results, err := p.callPluginFunc("CreateMultipartUpload", []any{ctx, input})
if err != nil {
return s3response.InitiateMultipartUploadResult{}, err
}
return results[0].Interface().(s3response.InitiateMultipartUploadResult), convertError(results[1])
}
func (p *PluginBackend) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
results, err := p.callPluginFunc("CompleteMultipartUpload", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.CompleteMultipartUploadOutput), convertError(results[1])
}
func (p *PluginBackend) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
_, err := p.callPluginFunc("AbortMultipartUpload", []any{ctx, input})
return err
}
func (p *PluginBackend) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
results, err := p.callPluginFunc("ListMultipartUploads", []any{ctx, input})
if err != nil {
return s3response.ListMultipartUploadsResult{}, err
}
return results[0].Interface().(s3response.ListMultipartUploadsResult), convertError(results[1])
}
func (p *PluginBackend) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
results, err := p.callPluginFunc("ListParts", []any{ctx, input})
if err != nil {
return s3response.ListPartsResult{}, err
}
return results[0].Interface().(s3response.ListPartsResult), convertError(results[1])
}
func (p *PluginBackend) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
results, err := p.callPluginFunc("UploadPart", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.UploadPartOutput), convertError(results[1])
}
func (p *PluginBackend) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
results, err := p.callPluginFunc("UploadPartCopy", []any{ctx, input})
if err != nil {
return s3response.CopyPartResult{}, err
}
return results[0].Interface().(s3response.CopyPartResult), convertError(results[1])
}
func (p *PluginBackend) PutObject(ctx context.Context, input s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
results, err := p.callPluginFunc("PutObject", []any{ctx, input})
if err != nil {
return s3response.PutObjectOutput{}, err
}
return results[0].Interface().(s3response.PutObjectOutput), convertError(results[1])
}
func (p *PluginBackend) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
results, err := p.callPluginFunc("HeadObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.HeadObjectOutput), convertError(results[1])
}
func (p *PluginBackend) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
results, err := p.callPluginFunc("GetObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.GetObjectOutput), convertError(results[1])
}
func (p *PluginBackend) GetObjectAcl(ctx context.Context, input *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
results, err := p.callPluginFunc("GetObjectAcl", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.GetObjectAclOutput), convertError(results[1])
}
func (p *PluginBackend) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
results, err := p.callPluginFunc("GetObjectAttributes", []any{ctx, input})
if err != nil {
return s3response.GetObjectAttributesResponse{}, err
}
return results[0].Interface().(s3response.GetObjectAttributesResponse), convertError(results[1])
}
func (p *PluginBackend) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
results, err := p.callPluginFunc("CopyObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.CopyObjectOutput), convertError(results[1])
}
func (p *PluginBackend) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
results, err := p.callPluginFunc("ListObjects", []any{ctx, input})
if err != nil {
return s3response.ListObjectsResult{}, err
}
return results[0].Interface().(s3response.ListObjectsResult), convertError(results[1])
}
func (p *PluginBackend) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
results, err := p.callPluginFunc("ListObjectsV2", []any{ctx, input})
if err != nil {
return s3response.ListObjectsV2Result{}, err
}
return results[0].Interface().(s3response.ListObjectsV2Result), convertError(results[1])
}
func (p *PluginBackend) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
results, err := p.callPluginFunc("DeleteObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.DeleteObjectOutput), convertError(results[1])
}
func (p *PluginBackend) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
results, err := p.callPluginFunc("DeleteObjects", []any{ctx, input})
if err != nil {
return s3response.DeleteResult{}, err
}
return results[0].Interface().(s3response.DeleteResult), convertError(results[1])
}
func (p *PluginBackend) PutObjectAcl(ctx context.Context, input *s3.PutObjectAclInput) error {
_, err := p.callPluginFunc("PutObjectAcl", []any{ctx, input})
return err
}
func (p *PluginBackend) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
results, err := p.callPluginFunc("ListObjectVersions", []any{ctx, input})
if err != nil {
return s3response.ListVersionsResult{}, err
}
return results[0].Interface().(s3response.ListVersionsResult), convertError(results[1])
}
func (p *PluginBackend) RestoreObject(ctx context.Context, input *s3.RestoreObjectInput) error {
_, err := p.callPluginFunc("RestoreObject", []any{ctx, input})
return err
}
func (p *PluginBackend) SelectObjectContent(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer) {
results, err := p.callPluginFunc("SelectObjectContent", []any{ctx, input})
if err != nil {
return func(w *bufio.Writer) {}
}
return results[0].Interface().(func(w *bufio.Writer))
}
func (p *PluginBackend) GetBucketTagging(ctx context.Context, bucket string) (map[string]string, error) {
results, err := p.callPluginFunc("GetBucketTagging", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().(map[string]string), convertError(results[1])
}
func (p *PluginBackend) PutBucketTagging(ctx context.Context, bucket string, tags map[string]string) error {
_, err := p.callPluginFunc("PutBucketTagging", []any{ctx, bucket, tags})
return err
}
func (p *PluginBackend) DeleteBucketTagging(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketTagging", []any{ctx, bucket})
return err
}
func (p *PluginBackend) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
results, err := p.callPluginFunc("GetObjectTagging", []any{ctx, bucket, object})
if err != nil {
return nil, err
}
return results[0].Interface().(map[string]string), convertError(results[1])
}
func (p *PluginBackend) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
_, err := p.callPluginFunc("PutObjectTagging", []any{ctx, bucket, object, tags})
return err
}
func (p *PluginBackend) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
_, err := p.callPluginFunc("DeleteObjectTagging", []any{ctx, bucket, object})
return err
}
func (p *PluginBackend) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
_, err := p.callPluginFunc("PutObjectLockConfiguration", []any{ctx, bucket, config})
return err
}
func (p *PluginBackend) GetObjectLockConfiguration(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetObjectLockConfiguration", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
_, err := p.callPluginFunc("PutObjectRetention", []any{ctx, bucket, object, versionId, bypass, retention})
return err
}
func (p *PluginBackend) GetObjectRetention(ctx context.Context, bucket, object, versionId string) ([]byte, error) {
results, err := p.callPluginFunc("GetObjectRetention", []any{ctx, bucket, object, versionId})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) PutObjectLegalHold(ctx context.Context, bucket, object, versionId string, status bool) error {
_, err := p.callPluginFunc("PutObjectLegalHold", []any{ctx, bucket, object, versionId, status})
return err
}
func (p *PluginBackend) GetObjectLegalHold(ctx context.Context, bucket, object, versionId string) (*bool, error) {
results, err := p.callPluginFunc("GetObjectLegalHold", []any{ctx, bucket, object, versionId})
if err != nil {
return nil, err
}
val := results[0].Interface()
if val == nil {
return nil, convertError(results[1])
}
return val.(*bool), convertError(results[1])
}
func (p *PluginBackend) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
_, err := p.callPluginFunc("ChangeBucketOwner", []any{ctx, bucket, acl})
return err
}
func (p *PluginBackend) ListBucketsAndOwners(ctx context.Context) ([]s3response.Bucket, error) {
results, err := p.callPluginFunc("ListBucketsAndOwners", []any{ctx})
if err != nil {
return nil, err
}
return results[0].Interface().([]s3response.Bucket), convertError(results[1])
}
func convertError(result reflect.Value) error {
if result.IsNil() {
return nil
}
err, ok := result.Interface().(error)
if !ok {
return fmt.Errorf("expected error, got %T", result.Interface())
}
return err
}
var _ backend.Backend = &PluginBackend{}

File diff suppressed because it is too large Load Diff

View File

@@ -52,13 +52,9 @@ var (
defaultFilePerm uint32 = 0644
)
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool, forceNoTmpFile bool) (*tmpfile, error) {
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool) (*tmpfile, error) {
uid, gid, doChown := p.getChownIDs(acct)
if forceNoTmpFile {
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
}
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
// This can help reduce contention within the namespace (parent directories),
// etc. And will auto cleanup the inode on close if we never link this
@@ -72,7 +68,37 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
}
// O_TMPFILE not supported, try fallback
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
return nil, err
}
tmp := &tmpfile{
f: f,
bucket: bucket,
objname: obj,
size: size,
needsChown: doChown,
uid: uid,
gid: gid,
}
// falloc is best effort, its fine if this fails
if size > 0 && dofalloc {
tmp.falloc()
}
if doChown {
err := f.Chown(uid, gid)
if err != nil {
return nil, fmt.Errorf("set temp file ownership: %w", err)
}
}
return tmp, nil
}
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
@@ -106,46 +132,6 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
return tmp, nil
}
func (p *Posix) openMkTemp(dir, bucket, obj string, size int64, dofalloc bool, uid, gid int, doChown bool) (*tmpfile, error) {
err := backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, err
}
tmp := &tmpfile{
f: f,
bucket: bucket,
objname: obj,
size: size,
needsChown: doChown,
uid: uid,
gid: gid,
}
// falloc is best effort, its fine if this fails
if size > 0 && dofalloc {
tmp.falloc()
}
if doChown {
err := f.Chown(uid, gid)
if err != nil {
return nil, fmt.Errorf("set temp file ownership: %w", err)
}
}
return tmp, nil
}
func (tmp *tmpfile) falloc() error {
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
if err != nil {
@@ -236,9 +222,7 @@ func (tmp *tmpfile) fallbackLink() error {
objPath := filepath.Join(tmp.bucket, tmp.objname)
err = os.Rename(tempname, objPath)
if err != nil {
// rename only works for files within the same filesystem
// if this fails fallback to copy
return backend.MoveFile(tempname, objPath, fs.FileMode(defaultFilePerm))
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil

View File

@@ -38,7 +38,7 @@ type tmpfile struct {
size int64
}
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool, _ bool) (*tmpfile, error) {
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool) (*tmpfile, error) {
uid, gid, doChown := p.getChownIDs(acct)
// Create a temp file for upload while in progress (see link comments below).
@@ -80,17 +80,31 @@ func (tmp *tmpfile) link() error {
// this will no longer exist
defer os.Remove(tempname)
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
objPath := filepath.Join(tmp.bucket, tmp.objname)
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
// reset default file mode because CreateTemp uses 0600
tmp.f.Chmod(defaultFilePerm)
err := tmp.f.Close()
err = tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
return backend.MoveFile(tempname, objPath, defaultFilePerm)
err = os.Rename(tempname, objPath)
if err != nil {
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) Write(b []byte) (int, error) {

View File

@@ -36,11 +36,6 @@ func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
if s.endpoint != "" {
return s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = &s.endpoint
o.UsePathStyle = s.usePathStyle
// The http body stream is not seekable, so most operations cannot
// be retried. The error returned to the original client may be
// retried by the client.
o.Retryer = aws.NopRetryer{}
}), nil
}

View File

@@ -15,14 +15,20 @@
package s3proxy
import (
"bytes"
"context"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/service/s3"
@@ -34,12 +40,7 @@ import (
"github.com/versity/versitygw/s3response"
)
type metaPrefix string
const (
metaPrefixAcl metaPrefix = "vgw-meta-acl-"
metaPrefixPolicy metaPrefix = "vgw-meta-policy-"
)
const aclKey string = "versitygwAcl"
type S3Proxy struct {
backend.BackendUnsupported
@@ -50,48 +51,29 @@ type S3Proxy struct {
secret string
endpoint string
awsRegion string
metaBucket string
disableChecksum bool
sslSkipVerify bool
usePathStyle bool
debug bool
}
var _ backend.Backend = &S3Proxy{}
func NewWithClient(ctx context.Context, client *s3.Client, metaBucket string) (*S3Proxy, error) {
s := &S3Proxy{
metaBucket: metaBucket,
}
s.client = client
return s, s.validate(ctx)
}
func New(ctx context.Context, access, secret, endpoint, region, metaBucket string, disableChecksum, sslSkipVerify, usePathStyle, debug bool) (*S3Proxy, error) {
func New(access, secret, endpoint, region string, disableChecksum, sslSkipVerify, debug bool) (*S3Proxy, error) {
s := &S3Proxy{
access: access,
secret: secret,
endpoint: endpoint,
awsRegion: region,
metaBucket: metaBucket,
disableChecksum: disableChecksum,
sslSkipVerify: sslSkipVerify,
usePathStyle: usePathStyle,
debug: debug,
}
client, err := s.getClientWithCtx(ctx)
client, err := s.getClientWithCtx(context.Background())
if err != nil {
return nil, err
}
s.client = client
return s, s.validate(ctx)
}
func (s *S3Proxy) validate(ctx context.Context) error {
if s.metaBucket != "" && !s.bucketExists(ctx, s.metaBucket) {
return fmt.Errorf("the provided meta bucket doesn't exist")
}
return nil
return s, nil
}
func (s *S3Proxy) ListBuckets(ctx context.Context, input s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
@@ -106,33 +88,10 @@ func (s *S3Proxy) ListBuckets(ctx context.Context, input s3response.ListBucketsI
var buckets []s3response.ListAllMyBucketsEntry
for _, b := range output.Buckets {
if *b.Name == s.metaBucket {
continue
}
if input.IsAdmin || s.metaBucket == "" {
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
Name: *b.Name,
CreationDate: *b.CreationDate,
})
continue
}
data, err := s.getMetaBucketObjData(ctx, *b.Name, metaPrefixAcl, false)
if err != nil {
return s3response.ListAllMyBucketsResult{}, handleError(err)
}
acl, err := auth.ParseACL(data)
if err != nil {
return s3response.ListAllMyBucketsResult{}, err
}
if acl.Owner == input.Owner {
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
Name: *b.Name,
CreationDate: *b.CreationDate,
})
}
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
Name: *b.Name,
CreationDate: *b.CreationDate,
})
}
return s3response.ListAllMyBucketsResult{
@@ -171,52 +130,27 @@ func (s *S3Proxy) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
if input.GrantWriteACP != nil && *input.GrantWriteACP == "" {
input.GrantWriteACP = nil
}
if *input.Bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrBucketAlreadyExists)
}
acct, ok := ctx.Value("account").(auth.Account)
if !ok {
acct = auth.Account{}
}
if s.metaBucket != "" {
data, err := s.getMetaBucketObjData(ctx, *input.Bucket, metaPrefixAcl, true)
if err == nil {
acl, err := auth.ParseACL(data)
if err != nil {
return err
}
if acl.Owner == acct.Access {
return s3err.GetAPIError(s3err.ErrBucketAlreadyOwnedByYou)
}
return s3err.GetAPIError(s3err.ErrBucketAlreadyExists)
}
}
_, err := s.client.CreateBucket(ctx, input)
if err != nil {
return handleError(err)
}
// Store bucket default acl
if s.metaBucket != "" {
err = s.putMetaBucketObj(ctx, *input.Bucket, acl, metaPrefixAcl)
if err != nil {
// attempt to cleanup
_ = s.DeleteBucket(ctx, *input.Bucket)
return handleError(err)
}
}
var tagSet []types.Tag
tagSet = append(tagSet, types.Tag{
Key: backend.GetPtrFromString(aclKey),
Value: backend.GetPtrFromString(base64Encode(acl)),
})
return nil
_, err = s.client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: input.Bucket,
Tagging: &types.Tagging{
TagSet: tagSet,
},
})
return handleError(err)
}
func (s *S3Proxy) DeleteBucket(ctx context.Context, bucket string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.DeleteBucket(ctx, &s3.DeleteBucketInput{
Bucket: &bucket,
})
@@ -224,9 +158,6 @@ func (s *S3Proxy) DeleteBucket(ctx context.Context, bucket string) error {
}
func (s *S3Proxy) PutBucketOwnershipControls(ctx context.Context, bucket string, ownership types.ObjectOwnership) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.PutBucketOwnershipControls(ctx, &s3.PutBucketOwnershipControlsInput{
Bucket: &bucket,
OwnershipControls: &types.OwnershipControls{
@@ -241,9 +172,6 @@ func (s *S3Proxy) PutBucketOwnershipControls(ctx context.Context, bucket string,
}
func (s *S3Proxy) GetBucketOwnershipControls(ctx context.Context, bucket string) (types.ObjectOwnership, error) {
if bucket == s.metaBucket {
return "", s3err.GetAPIError(s3err.ErrAccessDenied)
}
var ownship types.ObjectOwnership
resp, err := s.client.GetBucketOwnershipControls(ctx, &s3.GetBucketOwnershipControlsInput{
Bucket: &bucket,
@@ -254,9 +182,6 @@ func (s *S3Proxy) GetBucketOwnershipControls(ctx context.Context, bucket string)
return resp.OwnershipControls.Rules[0].ObjectOwnership, nil
}
func (s *S3Proxy) DeleteBucketOwnershipControls(ctx context.Context, bucket string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.DeleteBucketOwnershipControls(ctx, &s3.DeleteBucketOwnershipControlsInput{
Bucket: &bucket,
})
@@ -264,9 +189,6 @@ func (s *S3Proxy) DeleteBucketOwnershipControls(ctx context.Context, bucket stri
}
func (s *S3Proxy) PutBucketVersioning(ctx context.Context, bucket string, status types.BucketVersioningStatus) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
Bucket: &bucket,
VersioningConfiguration: &types.VersioningConfiguration{
@@ -278,9 +200,6 @@ func (s *S3Proxy) PutBucketVersioning(ctx context.Context, bucket string, status
}
func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
if bucket == s.metaBucket {
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
out, err := s.client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
Bucket: &bucket,
})
@@ -292,9 +211,6 @@ func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (s3res
}
func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListVersionsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.Delimiter != nil && *input.Delimiter == "" {
input.Delimiter = nil
}
@@ -332,16 +248,13 @@ func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVe
NextVersionIdMarker: out.NextVersionIdMarker,
Prefix: out.Prefix,
VersionIdMarker: input.VersionIdMarker,
Versions: convertObjectVersions(out.Versions),
Versions: out.Versions,
}, nil
}
var defTime = time.Time{}
func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.CacheControl != nil && *input.CacheControl == "" {
input.CacheControl = nil
}
@@ -375,7 +288,7 @@ func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input s3response.Cr
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
if input.ObjectLockRetainUntilDate != nil && (*input.ObjectLockRetainUntilDate).Equal(defTime) {
if input.ObjectLockRetainUntilDate != nil && *input.ObjectLockRetainUntilDate == defTime {
input.ObjectLockRetainUntilDate = nil
}
if input.SSECustomerAlgorithm != nil && *input.SSECustomerAlgorithm == "" {
@@ -452,12 +365,7 @@ func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input s3response.Cr
}, nil
}
func (s *S3Proxy) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
var res s3response.CompleteMultipartUploadResult
if *input.Bucket == s.metaBucket {
return res, "", s3err.GetAPIError(s3err.ErrAccessDenied)
}
func (s *S3Proxy) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
if input.ChecksumCRC32 != nil && *input.ChecksumCRC32 == "" {
input.ChecksumCRC32 = nil
}
@@ -495,37 +403,15 @@ func (s *S3Proxy) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
input.SSECustomerKeyMD5 = nil
}
var versionid string
out, err := s.client.CompleteMultipartUpload(ctx, input)
if out != nil {
res = s3response.CompleteMultipartUploadResult{
Location: out.Location,
Bucket: out.Bucket,
Key: out.Key,
ETag: out.ETag,
ChecksumCRC32: out.ChecksumCRC32,
ChecksumCRC32C: out.ChecksumCRC32C,
ChecksumCRC64NVME: out.ChecksumCRC64NVME,
ChecksumSHA1: out.ChecksumSHA1,
ChecksumSHA256: out.ChecksumSHA256,
ChecksumType: &out.ChecksumType,
}
if out.VersionId != nil {
versionid = *out.VersionId
}
}
return res, versionid, handleError(err)
return out, handleError(err)
}
func (s *S3Proxy) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
if *input.Bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
if input.IfMatchInitiatedTime != nil && (*input.IfMatchInitiatedTime).Equal(defTime) {
if input.IfMatchInitiatedTime != nil && *input.IfMatchInitiatedTime == defTime {
input.IfMatchInitiatedTime = nil
}
_, err := s.client.AbortMultipartUpload(ctx, input)
@@ -533,9 +419,6 @@ func (s *S3Proxy) AbortMultipartUpload(ctx context.Context, input *s3.AbortMulti
}
func (s *S3Proxy) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListMultipartUploadsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.Delimiter != nil && *input.Delimiter == "" {
input.Delimiter = nil
}
@@ -604,9 +487,6 @@ func (s *S3Proxy) ListMultipartUploads(ctx context.Context, input *s3.ListMultip
}
func (s *S3Proxy) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -681,9 +561,6 @@ func (s *S3Proxy) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3re
}
func (s *S3Proxy) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ChecksumCRC32 != nil && *input.ChecksumCRC32 == "" {
input.ChecksumCRC32 = nil
}
@@ -724,19 +601,16 @@ func (s *S3Proxy) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s
}
func (s *S3Proxy) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.CopySourceIfMatch != nil && *input.CopySourceIfMatch == "" {
input.CopySourceIfMatch = nil
}
if input.CopySourceIfModifiedSince != nil && (*input.CopySourceIfModifiedSince).Equal(defTime) {
if input.CopySourceIfModifiedSince != nil && *input.CopySourceIfModifiedSince == defTime {
input.CopySourceIfModifiedSince = nil
}
if input.CopySourceIfNoneMatch != nil && *input.CopySourceIfNoneMatch == "" {
input.CopySourceIfNoneMatch = nil
}
if input.CopySourceIfUnmodifiedSince != nil && (*input.CopySourceIfUnmodifiedSince).Equal(defTime) {
if input.CopySourceIfUnmodifiedSince != nil && *input.CopySourceIfUnmodifiedSince == defTime {
input.CopySourceIfUnmodifiedSince = nil
}
if input.CopySourceRange != nil && *input.CopySourceRange == "" {
@@ -784,9 +658,6 @@ func (s *S3Proxy) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyIn
}
func (s *S3Proxy) PutObject(ctx context.Context, input s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.CacheControl != nil && *input.CacheControl == "" {
input.CacheControl = nil
}
@@ -941,22 +812,19 @@ func (s *S3Proxy) PutObject(ctx context.Context, input s3response.PutObjectInput
}
func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
if input.IfMatch != nil && *input.IfMatch == "" {
input.IfMatch = nil
}
if input.IfModifiedSince != nil && (*input.IfModifiedSince).Equal(defTime) {
if input.IfModifiedSince != nil && *input.IfModifiedSince == defTime {
input.IfModifiedSince = nil
}
if input.IfNoneMatch != nil && *input.IfNoneMatch == "" {
input.IfNoneMatch = nil
}
if input.IfUnmodifiedSince != nil && (*input.IfUnmodifiedSince).Equal(defTime) {
if input.IfUnmodifiedSince != nil && *input.IfUnmodifiedSince == defTime {
input.IfUnmodifiedSince = nil
}
if input.PartNumber != nil && *input.PartNumber == 0 {
@@ -980,7 +848,7 @@ func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
if input.ResponseContentType != nil && *input.ResponseContentType == "" {
input.ResponseContentType = nil
}
if input.ResponseExpires != nil && (*input.ResponseExpires).Equal(defTime) {
if input.ResponseExpires != nil && *input.ResponseExpires == defTime {
input.ResponseExpires = nil
}
if input.SSECustomerAlgorithm != nil && *input.SSECustomerAlgorithm == "" {
@@ -1001,22 +869,19 @@ func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
}
func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
if input.IfMatch != nil && *input.IfMatch == "" {
input.IfMatch = nil
}
if input.IfModifiedSince != nil && (*input.IfModifiedSince).Equal(defTime) {
if input.IfModifiedSince != nil && *input.IfModifiedSince == defTime {
input.IfModifiedSince = nil
}
if input.IfNoneMatch != nil && *input.IfNoneMatch == "" {
input.IfNoneMatch = nil
}
if input.IfUnmodifiedSince != nil && (*input.IfUnmodifiedSince).Equal(defTime) {
if input.IfUnmodifiedSince != nil && *input.IfUnmodifiedSince == defTime {
input.IfUnmodifiedSince = nil
}
if input.PartNumber != nil && *input.PartNumber == 0 {
@@ -1040,7 +905,7 @@ func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
if input.ResponseContentType != nil && *input.ResponseContentType == "" {
input.ResponseContentType = nil
}
if input.ResponseExpires != nil && (*input.ResponseExpires).Equal(defTime) {
if input.ResponseExpires != nil && *input.ResponseExpires == defTime {
input.ResponseExpires = nil
}
if input.SSECustomerAlgorithm != nil && *input.SSECustomerAlgorithm == "" {
@@ -1065,9 +930,6 @@ func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
}
func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
if *input.Bucket == s.metaBucket {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -1126,10 +988,7 @@ func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAt
}, handleError(err)
}
func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if input.CacheControl != nil && *input.CacheControl == "" {
input.CacheControl = nil
}
@@ -1148,13 +1007,13 @@ func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInp
if input.CopySourceIfMatch != nil && *input.CopySourceIfMatch == "" {
input.CopySourceIfMatch = nil
}
if input.CopySourceIfModifiedSince != nil && (*input.CopySourceIfModifiedSince).Equal(defTime) {
if input.CopySourceIfModifiedSince != nil && *input.CopySourceIfModifiedSince == defTime {
input.CopySourceIfModifiedSince = nil
}
if input.CopySourceIfNoneMatch != nil && *input.CopySourceIfNoneMatch == "" {
input.CopySourceIfNoneMatch = nil
}
if input.CopySourceIfUnmodifiedSince != nil && (*input.CopySourceIfUnmodifiedSince).Equal(defTime) {
if input.CopySourceIfUnmodifiedSince != nil && *input.CopySourceIfUnmodifiedSince == defTime {
input.CopySourceIfUnmodifiedSince = nil
}
if input.CopySourceSSECustomerAlgorithm != nil && *input.CopySourceSSECustomerAlgorithm == "" {
@@ -1187,7 +1046,7 @@ func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInp
if input.GrantWriteACP != nil && *input.GrantWriteACP == "" {
input.GrantWriteACP = nil
}
if input.ObjectLockRetainUntilDate != nil && (*input.ObjectLockRetainUntilDate).Equal(defTime) {
if input.ObjectLockRetainUntilDate != nil && *input.ObjectLockRetainUntilDate == defTime {
input.ObjectLockRetainUntilDate = nil
}
if input.SSECustomerAlgorithm != nil && *input.SSECustomerAlgorithm == "" {
@@ -1264,39 +1123,10 @@ func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInp
StorageClass: input.StorageClass,
TaggingDirective: input.TaggingDirective,
})
if err != nil {
return s3response.CopyObjectOutput{}, handleError(err)
}
if out.CopyObjectResult == nil {
out.CopyObjectResult = &types.CopyObjectResult{}
}
return s3response.CopyObjectOutput{
BucketKeyEnabled: out.BucketKeyEnabled,
CopyObjectResult: &s3response.CopyObjectResult{
ChecksumCRC32: out.CopyObjectResult.ChecksumCRC32,
ChecksumCRC32C: out.CopyObjectResult.ChecksumCRC32C,
ChecksumCRC64NVME: out.CopyObjectResult.ChecksumCRC64NVME,
ChecksumSHA1: out.CopyObjectResult.ChecksumSHA1,
ChecksumSHA256: out.CopyObjectResult.ChecksumSHA256,
ChecksumType: out.CopyObjectResult.ChecksumType,
ETag: out.CopyObjectResult.ETag,
LastModified: out.CopyObjectResult.LastModified,
},
CopySourceVersionId: out.CopySourceVersionId,
Expiration: out.Expiration,
SSECustomerAlgorithm: out.SSECustomerAlgorithm,
SSECustomerKeyMD5: out.SSECustomerKeyMD5,
SSEKMSEncryptionContext: out.SSEKMSEncryptionContext,
SSEKMSKeyId: out.SSEKMSKeyId,
ServerSideEncryption: out.ServerSideEncryption,
VersionId: out.VersionId,
}, handleError(err)
return out, handleError(err)
}
func (s *S3Proxy) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.Delimiter != nil && *input.Delimiter == "" {
input.Delimiter = nil
}
@@ -1334,9 +1164,6 @@ func (s *S3Proxy) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (
}
func (s *S3Proxy) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ContinuationToken != nil && *input.ContinuationToken == "" {
input.ContinuationToken = nil
}
@@ -1378,16 +1205,13 @@ func (s *S3Proxy) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Inpu
}
func (s *S3Proxy) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
if input.IfMatch != nil && *input.IfMatch == "" {
input.IfMatch = nil
}
if input.IfMatchLastModifiedTime != nil && (*input.IfMatchLastModifiedTime).Equal(defTime) {
if input.IfMatchLastModifiedTime != nil && *input.IfMatchLastModifiedTime == defTime {
input.IfMatchLastModifiedTime = nil
}
if input.IfMatchSize != nil && *input.IfMatchSize == 0 {
@@ -1405,9 +1229,6 @@ func (s *S3Proxy) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput)
}
func (s *S3Proxy) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.DeleteResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -1431,22 +1252,77 @@ func (s *S3Proxy) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInpu
}
func (s *S3Proxy) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
data, err := s.getMetaBucketObjData(ctx, *input.Bucket, metaPrefixAcl, false)
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
tagout, err := s.client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
Bucket: input.Bucket,
})
if err != nil {
var ae smithy.APIError
if errors.As(err, &ae) {
// sdk issue workaround for missing NoSuchTagSet error type
// https://github.com/aws/aws-sdk-go-v2/issues/2878
if strings.Contains(ae.ErrorCode(), "NoSuchTagSet") {
return []byte{}, nil
}
if strings.Contains(ae.ErrorCode(), "NotImplemented") {
return []byte{}, nil
}
}
return nil, handleError(err)
}
return data, nil
for _, tag := range tagout.TagSet {
if *tag.Key == aclKey {
acl, err := base64Decode(*tag.Value)
if err != nil {
return nil, handleError(err)
}
return acl, nil
}
}
return []byte{}, nil
}
func (s *S3Proxy) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
return handleError(s.putMetaBucketObj(ctx, bucket, data, metaPrefixAcl))
tagout, err := s.client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
Bucket: &bucket,
})
if err != nil {
return handleError(err)
}
var found bool
for i, tag := range tagout.TagSet {
if *tag.Key == aclKey {
tagout.TagSet[i] = types.Tag{
Key: backend.GetPtrFromString(aclKey),
Value: backend.GetPtrFromString(base64Encode(data)),
}
found = true
break
}
}
if !found {
tagout.TagSet = append(tagout.TagSet, types.Tag{
Key: backend.GetPtrFromString(aclKey),
Value: backend.GetPtrFromString(base64Encode(data)),
})
}
_, err = s.client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &types.Tagging{
TagSet: tagout.TagSet,
},
})
return handleError(err)
}
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
tagging := &types.Tagging{
TagSet: []types.Tag{},
}
@@ -1466,9 +1342,6 @@ func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, t
}
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
if bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
output, err := s.client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
Bucket: &bucket,
Key: &object,
@@ -1486,9 +1359,6 @@ func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (
}
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
Bucket: &bucket,
Key: &object,
@@ -1497,29 +1367,34 @@ func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string
}
func (s *S3Proxy) PutBucketPolicy(ctx context.Context, bucket string, policy []byte) error {
return handleError(s.putMetaBucketObj(ctx, bucket, policy, metaPrefixPolicy))
_, err := s.client.PutBucketPolicy(ctx, &s3.PutBucketPolicyInput{
Bucket: &bucket,
Policy: backend.GetPtrFromString(string(policy)),
})
return handleError(err)
}
func (s *S3Proxy) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, error) {
data, err := s.getMetaBucketObjData(ctx, bucket, metaPrefixPolicy, false)
policy, err := s.client.GetBucketPolicy(ctx, &s3.GetBucketPolicyInput{
Bucket: &bucket,
})
if err != nil {
return nil, handleError(err)
}
return data, nil
result := []byte{}
if policy.Policy != nil {
result = []byte(*policy.Policy)
}
return result, nil
}
func (s *S3Proxy) DeleteBucketPolicy(ctx context.Context, bucket string) error {
key := getMetaKey(bucket, metaPrefixPolicy)
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: &s.metaBucket,
Key: &key,
_, err := s.client.DeleteBucketPolicy(ctx, &s3.DeleteBucketPolicyInput{
Bucket: &bucket,
})
if err != nil && !areErrSame(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
return handleError(err)
}
return nil
return handleError(err)
}
func (s *S3Proxy) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
@@ -1547,140 +1422,86 @@ func (s *S3Proxy) GetObjectLegalHold(ctx context.Context, bucket, object, versio
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (s *S3Proxy) ChangeBucketOwner(ctx context.Context, bucket, owner string) error {
return auth.UpdateBucketACLOwner(ctx, s, bucket, owner)
func (s *S3Proxy) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
var acll auth.ACL
if err := json.Unmarshal(acl, &acll); err != nil {
return fmt.Errorf("unmarshal acl: %w", err)
}
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", s.endpoint, bucket, acll.Owner), nil)
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
signer := v4.NewSigner()
hashedPayload := sha256.Sum256([]byte{})
hexPayload := hex.EncodeToString(hashedPayload[:])
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: s.access, SecretAccessKey: s.secret}, req, hexPayload, "s3", s.awsRegion, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
if resp.StatusCode > 300 {
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
defer resp.Body.Close()
return fmt.Errorf("%v", string(body))
}
return nil
}
func (s *S3Proxy) ListBucketsAndOwners(ctx context.Context) ([]s3response.Bucket, error) {
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-buckets", s.endpoint), nil)
if err != nil {
return []s3response.Bucket{}, fmt.Errorf("failed to send the request: %w", err)
}
signer := v4.NewSigner()
hashedPayload := sha256.Sum256([]byte{})
hexPayload := hex.EncodeToString(hashedPayload[:])
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: s.access, SecretAccessKey: s.secret}, req, hexPayload, "s3", s.awsRegion, time.Now())
if signErr != nil {
return []s3response.Bucket{}, fmt.Errorf("failed to sign the request: %w", err)
}
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
return []s3response.Bucket{}, fmt.Errorf("failed to send the request: %w", err)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return []s3response.Bucket{}, err
}
defer resp.Body.Close()
var buckets []s3response.Bucket
paginator := s3.NewListBucketsPaginator(s.client, &s3.ListBucketsInput{})
for paginator.HasMorePages() {
page, err := paginator.NextPage(ctx)
if err != nil {
return nil, handleError(err)
}
for _, bucket := range page.Buckets {
if *bucket.Name == s.metaBucket {
continue
}
aclJSON, err := s.getMetaBucketObjData(ctx, *bucket.Name, metaPrefixAcl, false)
if err != nil {
return nil, handleError(err)
}
acl, err := auth.ParseACL(aclJSON)
if err != nil {
return buckets, fmt.Errorf("parse acl tag: %w", err)
}
buckets = append(buckets, s3response.Bucket{
Name: *bucket.Name,
Owner: acl.Owner,
})
}
if err := json.Unmarshal(body, &buckets); err != nil {
return []s3response.Bucket{}, err
}
return buckets, nil
}
func (s *S3Proxy) bucketExists(ctx context.Context, bucket string) bool {
_, err := s.client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: &bucket,
})
return err == nil
}
func (s *S3Proxy) putMetaBucketObj(ctx context.Context, bucket string, data []byte, prefix metaPrefix) error {
// if meta bucket is not provided, return successful response
if s.metaBucket == "" {
return nil
}
key := getMetaKey(bucket, prefix)
// store the provided bucket acl/policy as an object in meta bucket
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: &s.metaBucket,
Key: &key,
Body: bytes.NewReader(data),
})
return err
}
// set checkExists to true if using to check for existence of bucket, in
// this case it will not return default acl/policy if the metadata does
// not exist
func (s *S3Proxy) getMetaBucketObjData(ctx context.Context, bucket string, prefix metaPrefix, checkExists bool) ([]byte, error) {
// return default bahviour of get bucket policy/acl, if meta bucket is not provided
if s.metaBucket == "" {
switch prefix {
case metaPrefixAcl:
return []byte{}, nil
case metaPrefixPolicy:
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)
}
}
key := getMetaKey(bucket, prefix)
// get meta bucket object
res, err := s.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &s.metaBucket,
Key: &key,
})
if areErrSame(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
if checkExists {
return nil, err
}
switch prefix {
case metaPrefixAcl:
// If bucket acl is not found, return default acl
return []byte{}, nil
case metaPrefixPolicy:
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)
}
}
if err != nil {
return nil, err
}
data, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("read meta object data: %w", err)
}
return data, nil
}
// Checks if the provided err is a type of smithy.APIError
// and if the error code and message match with the provided apiErr
func areErrSame(err error, apiErr s3err.APIError) bool {
if err == nil {
return false
}
var ae smithy.APIError
if errors.As(err, &ae) {
if ae.ErrorCode() != apiErr.Code {
return false
}
// 404 errors are not well serialized by aws-sdk-go-v2
if ae.ErrorCode() != "NoSuchKey" && ae.ErrorMessage() != apiErr.Description {
return false
}
return true
}
return false
}
// generates meta object key with bucket name and meta prefix
func getMetaKey(bucket string, prefix metaPrefix) string {
return string(prefix) + bucket
}
func handleError(err error) error {
if err == nil {
return nil
@@ -1701,6 +1522,18 @@ func handleError(err error) error {
return err
}
func base64Encode(input []byte) string {
return base64.StdEncoding.EncodeToString(input)
}
func base64Decode(encoded string) ([]byte, error) {
decoded, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return nil, err
}
return decoded, nil
}
func convertObjects(objs []types.Object) []s3response.Object {
result := make([]s3response.Object, 0, len(objs))
@@ -1720,24 +1553,3 @@ func convertObjects(objs []types.Object) []s3response.Object {
return result
}
func convertObjectVersions(versions []types.ObjectVersion) []s3response.ObjectVersion {
result := make([]s3response.ObjectVersion, 0, len(versions))
for _, v := range versions {
result = append(result, s3response.ObjectVersion{
ChecksumAlgorithm: v.ChecksumAlgorithm,
ChecksumType: v.ChecksumType,
ETag: v.ETag,
IsLatest: v.IsLatest,
Key: v.Key,
LastModified: v.LastModified,
Owner: v.Owner,
RestoreStatus: v.RestoreStatus,
Size: v.Size,
StorageClass: v.StorageClass,
VersionId: v.VersionId,
})
}
return result
}

View File

@@ -193,22 +193,23 @@ func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
// ioctl to not have to read and copy the part data to the final object. This
// saves a read and write cycle for all mutlipart uploads.
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
acct, ok := ctx.Value("account").(auth.Account)
if !ok {
acct = auth.Account{}
}
var res s3response.CompleteMultipartUploadResult
if input.Bucket == nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
if input.Key == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if input.UploadId == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchUpload)
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if input.MultipartUpload == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidRequest)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
bucket := *input.Bucket
@@ -218,22 +219,22 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return res, "", fmt.Errorf("stat bucket: %w", err)
return nil, fmt.Errorf("stat bucket: %w", err)
}
sum, err := s.checkUploadIDExists(bucket, object, uploadID)
if err != nil {
return res, "", err
return nil, err
}
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
checksums, err := s.retrieveChecksums(nil, bucket, filepath.Join(objdir, uploadID))
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get mp checksums: %w", err)
return nil, fmt.Errorf("get mp checksums: %w", err)
}
// ChecksumType should be the same as specified on CreateMultipartUpload
@@ -243,7 +244,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
checksumType = types.ChecksumType("null")
}
return res, "", s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
return nil, s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
}
// check all parts ok
@@ -254,13 +255,13 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
var partNumber int32
for i, part := range parts {
if part.PartNumber == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPartOrder)
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
@@ -269,14 +270,14 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
fullPartPath := filepath.Join(bucket, partObjPath)
fi, err := os.Lstat(fullPartPath)
if err != nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
totalsize += fi.Size()
// all parts except the last need to be greater, thena
// the minimum allowed size (5 Mib)
if i < last && fi.Size() < backend.MinPartSize {
return res, "", s3err.GetAPIError(s3err.ErrEntityTooSmall)
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
b, err := s.meta.RetrieveAttribute(nil, bucket, partObjPath, etagkey)
@@ -284,24 +285,24 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
if err != nil {
etag = ""
}
if parts[i].ETag == nil || !backend.AreEtagsSame(etag, *parts[i].ETag) {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
if parts[i].ETag == nil || etag != *parts[i].ETag {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
partChecksum, err := s.retrieveChecksums(nil, bucket, partObjPath)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get part checksum: %w", err)
return nil, fmt.Errorf("get part checksum: %w", err)
}
// If checksum has been provided on mp initalization
err = validatePartChecksum(partChecksum, part)
if err != nil {
return res, "", err
return nil, err
}
}
if input.MpuObjectSize != nil && totalsize != *input.MpuObjectSize {
return res, "", s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
return nil, s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
}
// use totalsize=0 because we wont be writing to the file, only moving
@@ -309,22 +310,22 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
f, err := s.openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, 0, acct)
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return res, "", s3err.GetAPIError(s3err.ErrQuotaExceeded)
return nil, s3err.GetAPIError(s3err.ErrQuotaExceeded)
}
return res, "", fmt.Errorf("open temp file: %w", err)
return nil, fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
for _, part := range parts {
if part.PartNumber == nil || *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
fullPartPath := filepath.Join(bucket, partObjPath)
pf, err := os.Open(fullPartPath)
if err != nil {
return res, "", fmt.Errorf("open part %v: %v", *part.PartNumber, err)
return nil, fmt.Errorf("open part %v: %v", *part.PartNumber, err)
}
// scoutfs move data is a metadata only operation that moves the data
@@ -333,7 +334,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
err = moveData(pf, f.File())
pf.Close()
if err != nil {
return res, "", fmt.Errorf("move blocks part %v: %v", *part.PartNumber, err)
return nil, fmt.Errorf("move blocks part %v: %v", *part.PartNumber, err)
}
}
@@ -342,7 +343,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
objMeta := s.loadUserMetaData(bucket, upiddir, userMetaData)
err = s.storeObjectMetadata(f.File(), bucket, object, objMeta)
if err != nil {
return res, "", err
return nil, err
}
objname := filepath.Join(bucket, object)
@@ -351,50 +352,50 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
uid, gid, doChown := s.getChownIDs(acct)
err = backend.MkdirAll(dir, uid, gid, doChown, s.newDirPerm)
if err != nil {
return res, "", err
return nil, err
}
}
for k, v := range userMetaData {
err = s.meta.StoreAttribute(f.File(), bucket, object, fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
if err != nil {
return res, "", fmt.Errorf("set user attr %q: %w", k, err)
return nil, fmt.Errorf("set user attr %q: %w", k, err)
}
}
// load and set tagging
tagging, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, tagHdr)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object tagging: %w", err)
return nil, fmt.Errorf("get object tagging: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, tagHdr, tagging)
if err != nil {
return res, "", fmt.Errorf("set object tagging: %w", err)
return nil, fmt.Errorf("set object tagging: %w", err)
}
}
// load and set legal hold
lHold, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectLegalHoldKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object legal hold: %w", err)
return nil, fmt.Errorf("get object legal hold: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectLegalHoldKey, lHold)
if err != nil {
return res, "", fmt.Errorf("set object legal hold: %w", err)
return nil, fmt.Errorf("set object legal hold: %w", err)
}
}
// load and set retention
ret, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectRetentionKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object retention: %w", err)
return nil, fmt.Errorf("get object retention: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectRetentionKey, ret)
if err != nil {
return res, "", fmt.Errorf("set object retention: %w", err)
return nil, fmt.Errorf("set object retention: %w", err)
}
}
@@ -403,12 +404,12 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
err = s.meta.StoreAttribute(f.File(), bucket, object, etagkey, []byte(s3MD5))
if err != nil {
return res, "", fmt.Errorf("set etag attr: %w", err)
return nil, fmt.Errorf("set etag attr: %w", err)
}
err = f.link()
if err != nil {
return res, "", fmt.Errorf("link object in namespace: %w", err)
return nil, fmt.Errorf("link object in namespace: %w", err)
}
// cleanup tmp dirs
@@ -417,11 +418,11 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
// for same object name outstanding
os.Remove(filepath.Join(bucket, objdir))
return s3response.CompleteMultipartUploadResult{
return &s3.CompleteMultipartUploadOutput{
Bucket: &bucket,
ETag: &s3MD5,
Key: &object,
}, "", nil
}, nil
}
func (s *ScoutFS) storeObjectMetadata(f *os.File, bucket, object string, m objectMetadata) error {
@@ -727,6 +728,9 @@ func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
}
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
if input.Bucket == nil {
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
bucket := *input.Bucket
prefix := ""
if input.Prefix != nil {
@@ -763,17 +767,20 @@ func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (
return s3response.ListObjectsResult{
CommonPrefixes: results.CommonPrefixes,
Contents: results.Objects,
Delimiter: backend.GetPtrFromString(delim),
Marker: backend.GetPtrFromString(marker),
NextMarker: backend.GetPtrFromString(results.NextMarker),
Prefix: backend.GetPtrFromString(prefix),
Delimiter: &delim,
IsTruncated: &results.Truncated,
Marker: &marker,
MaxKeys: &maxkeys,
Name: &bucket,
NextMarker: &results.NextMarker,
Prefix: &prefix,
}, nil
}
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
if input.Bucket == nil {
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
bucket := *input.Bucket
prefix := ""
if input.Prefix != nil {
@@ -781,11 +788,7 @@ func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Inpu
}
marker := ""
if input.ContinuationToken != nil {
if input.StartAfter != nil {
marker = max(*input.StartAfter, *input.ContinuationToken)
} else {
marker = *input.ContinuationToken
}
marker = *input.ContinuationToken
}
delim := ""
if input.Delimiter != nil {
@@ -811,20 +814,16 @@ func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Inpu
return s3response.ListObjectsV2Result{}, fmt.Errorf("walk %v: %w", bucket, err)
}
count := int32(len(results.Objects))
return s3response.ListObjectsV2Result{
CommonPrefixes: results.CommonPrefixes,
Contents: results.Objects,
Delimiter: &delim,
IsTruncated: &results.Truncated,
ContinuationToken: &marker,
MaxKeys: &maxkeys,
Name: &bucket,
KeyCount: &count,
Delimiter: backend.GetPtrFromString(delim),
ContinuationToken: backend.GetPtrFromString(marker),
NextContinuationToken: backend.GetPtrFromString(results.NextMarker),
Prefix: backend.GetPtrFromString(prefix),
StartAfter: backend.GetPtrFromString(*input.StartAfter),
NextContinuationToken: &results.NextMarker,
Prefix: &prefix,
}, nil
}

View File

@@ -23,7 +23,6 @@ import (
"os"
"path/filepath"
"strconv"
"syscall"
"golang.org/x/sys/unix"
@@ -32,7 +31,6 @@ import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
"github.com/versity/versitygw/s3err"
)
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
@@ -92,9 +90,6 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
// file descriptor into the namespace.
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, err
}
@@ -155,20 +150,10 @@ func (tmp *tmpfile) link() error {
}
defer dirf.Close()
for {
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if errors.Is(err, fs.ErrExist) {
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
continue
}
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
break
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
err = tmp.f.Close()

View File

@@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"io/fs"
"sort"
"strings"
"syscall"
@@ -37,38 +38,10 @@ type GetObjFunc func(path string, d fs.DirEntry) (s3response.Object, error)
var ErrSkipObj = errors.New("skip this object")
// map to store object common prefixes
type cpMap map[string]int
func (c cpMap) Add(key string) {
_, ok := c[key]
if !ok {
c[key] = len(c)
}
}
// Len returns the length of the map
func (c cpMap) Len() int {
return len(c)
}
// CpArray converts the map into a sorted []types.CommonPrefixes array
func (c cpMap) CpArray() []types.CommonPrefix {
commonPrefixes := make([]types.CommonPrefix, c.Len())
for cp, i := range c {
pfx := cp
commonPrefixes[i] = types.CommonPrefix{
Prefix: &pfx,
}
}
return commonPrefixes
}
// Walk walks the supplied fs.FS and returns results compatible with list
// objects responses
func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
cpmap := cpMap{}
cpmap := make(map[string]struct{})
var objects []s3response.Object
// if max is 0, it should return empty non-truncated result
@@ -147,7 +120,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipAll
}
objects = append(objects, dirobj)
if (len(objects) + cpmap.Len()) == int(max) {
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
@@ -201,7 +174,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
objects = append(objects, obj)
if (len(objects) + cpmap.Len()) == int(max) {
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
@@ -245,7 +218,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipAll
}
objects = append(objects, obj)
if (len(objects) + cpmap.Len()) == int(max) {
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
@@ -271,8 +244,8 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
truncated = true
return fs.SkipAll
}
cpmap.Add(cpref)
if (len(objects) + cpmap.Len()) == int(max) {
cpmap[cpref] = struct{}{}
if (len(objects) + len(cpmap)) == int(max) {
newMarker = cpref
pastMax = true
}
@@ -287,12 +260,25 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return WalkResults{}, err
}
var commonPrefixStrings []string
for k := range cpmap {
commonPrefixStrings = append(commonPrefixStrings, k)
}
sort.Strings(commonPrefixStrings)
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
for _, cp := range commonPrefixStrings {
pfx := cp
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
Prefix: &pfx,
})
}
if !truncated {
newMarker = ""
}
return WalkResults{
CommonPrefixes: cpmap.CpArray(),
CommonPrefixes: commonPrefixes,
Objects: objects,
Truncated: truncated,
NextMarker: newMarker,
@@ -310,7 +296,7 @@ func contains(a string, strs []string) bool {
type WalkVersioningResults struct {
CommonPrefixes []types.CommonPrefix
ObjectVersions []s3response.ObjectVersion
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
Truncated bool
NextMarker string
@@ -318,7 +304,7 @@ type WalkVersioningResults struct {
}
type ObjVersionFuncResult struct {
ObjectVersions []s3response.ObjectVersion
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
NextVersionIdMarker string
Truncated bool
@@ -329,8 +315,8 @@ type GetVersionsFunc func(path, versionIdMarker string, pastVersionIdMarker *boo
// WalkVersions walks the supplied fs.FS and returns results compatible with
// ListObjectVersions action response
func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyMarker, versionIdMarker string, max int, getObj GetVersionsFunc, skipdirs []string) (WalkVersioningResults, error) {
cpmap := cpMap{}
var objects []s3response.ObjectVersion
cpmap := make(map[string]struct{})
var objects []types.ObjectVersion
var delMarkers []types.DeleteMarkerEntry
var pastMarker bool
@@ -385,11 +371,11 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
if delimiter == "/" &&
prefix != path+"/" &&
strings.HasPrefix(path+"/", prefix) {
cpmap.Add(path + "/")
cpmap[path+"/"] = struct{}{}
return fs.SkipDir
}
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -416,7 +402,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
if delimiter == "" {
// If no delimiter specified, then all files with matching
// prefix are included in results
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -459,7 +445,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
suffix := strings.TrimPrefix(path, prefix)
before, _, found := strings.Cut(suffix, delimiter)
if !found {
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -481,8 +467,8 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
// Common prefixes are a set, so should not have duplicates.
// These are abstractly a "directory", so need to include the
// delimiter at the end.
cpmap.Add(prefix + before + delimiter)
if (len(objects) + cpmap.Len()) == int(max) {
cpmap[prefix+before+delimiter] = struct{}{}
if (len(objects) + len(cpmap)) == int(max) {
nextMarker = path
truncated = true
@@ -495,8 +481,21 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
return WalkVersioningResults{}, err
}
var commonPrefixStrings []string
for k := range cpmap {
commonPrefixStrings = append(commonPrefixStrings, k)
}
sort.Strings(commonPrefixStrings)
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
for _, cp := range commonPrefixStrings {
pfx := cp
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
Prefix: &pfx,
})
}
return WalkVersioningResults{
CommonPrefixes: cpmap.CpArray(),
CommonPrefixes: commonPrefixes,
ObjectVersions: objects,
DelMarkers: delMarkers,
Truncated: truncated,

View File

@@ -100,11 +100,6 @@ func adminCommand() *cli.Command {
Usage: "secret access key for the new user",
Aliases: []string{"s"},
},
&cli.StringFlag{
Name: "role",
Usage: "the new user role",
Aliases: []string{"r"},
},
&cli.IntFlag{
Name: "user-id",
Usage: "userID for the new user",
@@ -316,14 +311,8 @@ func deleteUser(ctx *cli.Context) error {
}
func updateUser(ctx *cli.Context) error {
access, secret, userId, groupId, role := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id"), auth.Role(ctx.String("role"))
access, secret, userId, groupId := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id")
props := auth.MutableProps{}
if ctx.IsSet("role") {
if !role.IsValid() {
return fmt.Errorf("invalid user role: %v", role)
}
props.Role = role
}
if ctx.IsSet("secret") {
props.Secret = &secret
}

View File

@@ -50,7 +50,6 @@ var (
logWebhookURL, accessLog string
adminLogFile string
healthPath string
virtualDomain string
debug bool
pprof string
quiet bool
@@ -61,10 +60,10 @@ var (
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
ldapUserIdAtr, ldapGroupIdAtr string
vaultEndpointURL, vaultSecretStoragePath string
vaultAuthMethod, vaultMountPath string
vaultRootToken, vaultRoleId string
vaultRoleSecret, vaultServerCert string
vaultClientCert, vaultClientCertKey string
vaultMountPath, vaultRootToken string
vaultRoleId, vaultRoleSecret string
vaultServerCert, vaultClientCert string
vaultClientCertKey string
s3IamAccess, s3IamSecret string
s3IamRegion, s3IamBucket string
s3IamEndpoint string
@@ -228,13 +227,6 @@ func initFlags() []cli.Flag {
Destination: &quiet,
Aliases: []string{"q"},
},
&cli.StringFlag{
Name: "virtual-domain",
Usage: "enables the virtual host style bucket addressing with the specified arg as the base domain",
EnvVars: []string{"VGW_VIRTUAL_DOMAIN"},
Destination: &virtualDomain,
Aliases: []string{"vd"},
},
&cli.StringFlag{
Name: "access-log",
Usage: "enable server access logging to specified file",
@@ -380,12 +372,6 @@ func initFlags() []cli.Flag {
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_PATH"},
Destination: &vaultSecretStoragePath,
},
&cli.StringFlag{
Name: "iam-vault-auth-method",
Usage: "vault server auth method",
EnvVars: []string{"VGW_IAM_VAULT_AUTH_METHOD"},
Destination: &vaultAuthMethod,
},
&cli.StringFlag{
Name: "iam-vault-mount-path",
Usage: "vault server mount path",
@@ -539,19 +525,19 @@ func initFlags() []cli.Flag {
},
&cli.StringFlag{
Name: "ipa-user",
Usage: "Username used to connect to FreeIPA (requires permissions to read user vault contents)",
Usage: "Username used to connect to FreeIPA. Needs permissions to read user vault contents",
EnvVars: []string{"VGW_IPA_USER"},
Destination: &ipaUser,
},
&cli.StringFlag{
Name: "ipa-password",
Usage: "Password of the user used to connect to FreeIPA",
Usage: "Password of the user used to connect to FreeIPA.",
EnvVars: []string{"VGW_IPA_PASSWORD"},
Destination: &ipaPassword,
},
&cli.BoolFlag{
Name: "ipa-insecure",
Usage: "Disable verify TLS certificate of FreeIPA server",
Usage: "Verify TLS certificate of FreeIPA server. Default is 'true'.",
EnvVars: []string{"VGW_IPA_INSECURE"},
Destination: &ipaInsecure,
},
@@ -617,9 +603,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
if readonly {
opts = append(opts, s3api.WithReadOnly())
}
if virtualDomain != "" {
opts = append(opts, s3api.WithHostStyle(virtualDomain))
}
admApp := fiber.New(fiber.Config{
AppName: "versitygw",
@@ -664,7 +647,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
LDAPGroupIdAtr: ldapGroupIdAtr,
VaultEndpointURL: vaultEndpointURL,
VaultSecretStoragePath: vaultSecretStoragePath,
VaultAuthMethod: vaultAuthMethod,
VaultMountPath: vaultMountPath,
VaultRootToken: vaultRootToken,
VaultRoleId: vaultRoleId,

View File

@@ -15,60 +15,50 @@
package main
import (
"errors"
"fmt"
"plugin"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/plugins"
vgwplugin "github.com/versity/versitygw/backend/plugin"
)
var (
pluginPath string
pluginConfig string
)
func pluginCommand() *cli.Command {
return &cli.Command{
Name: "plugin",
Usage: "load a backend from a plugin",
Description: "Runs a s3 gateway and redirects the requests to the backend defined in the plugin",
Action: runPluginBackend,
Usage: "plugin storage backend",
Description: `This tells the gateway to load the backend from a dynamic runtime plugin.`,
Action: runPlugin,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "config",
Usage: "location of the config file",
Aliases: []string{"c"},
Name: "file",
Usage: "path to plugin shared object file",
Value: "",
Required: true,
EnvVars: []string{"VGW_PLUGIN_FILE"},
Destination: &pluginPath,
Aliases: []string{"f"},
},
&cli.StringFlag{
Name: "config",
Usage: "configuration option for the plugin",
Value: "",
Required: true,
EnvVars: []string{"VGW_PLUGIN_CONFIG"},
Destination: &pluginConfig,
Aliases: []string{"c"},
},
},
}
}
func runPluginBackend(ctx *cli.Context) error {
if ctx.NArg() == 0 {
return fmt.Errorf("no plugin file provided to be loaded")
}
pluginPath := ctx.Args().Get(0)
config := ctx.String("config")
p, err := plugin.Open(pluginPath)
func runPlugin(ctx *cli.Context) error {
be, err := vgwplugin.NewPluginBackend(pluginPath, pluginConfig)
if err != nil {
return err
return fmt.Errorf("init plugin backend: %w", err)
}
backendSymbol, err := p.Lookup("Backend")
if err != nil {
return err
}
backendPluginPtr, ok := backendSymbol.(*plugins.BackendPlugin)
if !ok {
return errors.New("plugin is not of type *plugins.BackendPlugin")
}
if backendPluginPtr == nil {
return errors.New("variable Backend is nil")
}
be, err := (*backendPluginPtr).New(config)
if err != nil {
return err
}
return runGateway(ctx.Context, be)
}

View File

@@ -31,7 +31,6 @@ var (
dirPerms uint
sidecar string
nometa bool
forceNoTmpFile bool
)
func posixCommand() *cli.Command {
@@ -94,12 +93,6 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
EnvVars: []string{"VGW_META_NONE"},
Destination: &nometa,
},
&cli.BoolFlag{
Name: "disableotmp",
Usage: "disable O_TMPFILE support for new objects",
EnvVars: []string{"VGW_DISABLE_OTMP"},
Destination: &forceNoTmpFile,
},
},
}
}
@@ -120,12 +113,11 @@ func runPosix(ctx *cli.Context) error {
}
opts := posix.PosixOpts{
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
NewDirPerm: fs.FileMode(dirPerms),
ForceNoTmpFile: forceNoTmpFile,
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
NewDirPerm: fs.FileMode(dirPerms),
}
var ms meta.MetadataStorer

View File

@@ -26,10 +26,8 @@ var (
s3proxySecret string
s3proxyEndpoint string
s3proxyRegion string
s3proxyMetaBucket string
s3proxyDisableChecksum bool
s3proxySslSkipVerify bool
s3proxyUsePathStyle bool
s3proxyDebug bool
)
@@ -73,12 +71,6 @@ to an s3 storage backend service.`,
EnvVars: []string{"VGW_S3_REGION"},
Destination: &s3proxyRegion,
},
&cli.StringFlag{
Name: "meta-bucket",
Usage: "s3 service meta bucket to store buckets acl/policy",
EnvVars: []string{"VGW_S3_META_BUCKET"},
Destination: &s3proxyMetaBucket,
},
&cli.BoolFlag{
Name: "disable-checksum",
Usage: "disable gateway to server object checksums",
@@ -93,13 +85,6 @@ to an s3 storage backend service.`,
Value: false,
Destination: &s3proxySslSkipVerify,
},
&cli.BoolFlag{
Name: "use-path-style",
Usage: "use path style addressing for s3 proxy",
EnvVars: []string{"VGW_S3_USE_PATH_STYLE"},
Value: false,
Destination: &s3proxyUsePathStyle,
},
&cli.BoolFlag{
Name: "debug",
Usage: "output extra debug tracing",
@@ -112,8 +97,8 @@ to an s3 storage backend service.`,
}
func runS3(ctx *cli.Context) error {
be, err := s3proxy.New(ctx.Context, s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
s3proxyMetaBucket, s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyUsePathStyle, s3proxyDebug)
be, err := s3proxy.New(s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyDebug)
if err != nil {
return fmt.Errorf("init s3 backend: %w", err)
}

View File

@@ -34,7 +34,7 @@ var (
totalReqs int
upload bool
download bool
hostStyle bool
pathStyle bool
checksumDisable bool
versioningEnabled bool
azureTests bool
@@ -74,12 +74,6 @@ func initTestFlags() []cli.Flag {
Destination: &endpoint,
Aliases: []string{"e"},
},
&cli.BoolFlag{
Name: "host-style",
Usage: "Use host-style bucket addressing",
Value: false,
Destination: &hostStyle,
},
&cli.BoolFlag{
Name: "debug",
Usage: "enable debug mode",
@@ -130,11 +124,6 @@ func initTestCommands() []*cli.Command {
},
},
},
{
Name: "scoutfs",
Usage: "Tests scoutfs full flow",
Action: getAction(integration.TestScoutfs),
},
{
Name: "iam",
Usage: "Tests iam service",
@@ -197,6 +186,12 @@ func initTestCommands() []*cli.Command {
Value: 1,
Destination: &concurrency,
},
&cli.BoolFlag{
Name: "pathStyle",
Usage: "Use Pathstyle bucket addressing",
Value: false,
Destination: &pathStyle,
},
&cli.BoolFlag{
Name: "checksumDis",
Usage: "Disable server checksum",
@@ -228,8 +223,8 @@ func initTestCommands() []*cli.Command {
if debug {
opts = append(opts, integration.WithDebug())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
if pathStyle {
opts = append(opts, integration.WithPathStyle())
}
if checksumDisable {
opts = append(opts, integration.WithDisableChecksum())
@@ -292,9 +287,6 @@ func initTestCommands() []*cli.Command {
if checksumDisable {
opts = append(opts, integration.WithDisableChecksum())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s3conf := integration.NewS3Conf(opts...)
@@ -324,9 +316,6 @@ func getAction(tf testFunc) func(*cli.Context) error {
if azureTests {
opts = append(opts, integration.WithAzureMode())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s := integration.NewS3Conf(opts...)
tf(s)
@@ -362,9 +351,6 @@ func extractIntTests() (commands []*cli.Command) {
if versioningEnabled {
opts = append(opts, integration.WithVersioningEnabled())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s := integration.NewS3Conf(opts...)
err := testFunc(s)

View File

@@ -99,26 +99,6 @@ ROOT_SECRET_ACCESS_KEY=
# endpoint is unauthenticated, and returns a 200 status for GET.
#VGW_HEALTH=
# Enable VGW_READ_ONLY to only allow read operations to the S3 server. No write
# operations will be allowed.
#VGW_READ_ONLY=false
# The VGW_VIRTUAL_DOMAIN option enables the virtual host style bucket
# addressing. The path style addressing is the default, and remains enabled
# even when virtual host style is enabled. The VGW_VIRTUAL_DOMAIN option
# specifies the domain name that will be used for the virtual host style
# addressing. For virtual addressing, access to a bucket is in the request
# form:
# https://<bucket>.<VGW_VIRTUAL_DOMAIN>/
# for example: https://mybucket.example.com/ where
# VGW_VIRTUAL_DOMAIN=example.com
# and all subdomains of VGW_VIRTUAL_DOMAIN should be reserved for buckets.
# This means that virtual host addressing will generally require a DNS
# entry for each bucket that needs to be accessed.
# The default path style request is of the form:
# https://<VGW_ENDPOINT>/<bucket>
#VGW_VIRTUAL_DOMAIN=
###############
# Access Logs #
###############
@@ -260,24 +240,6 @@ ROOT_SECRET_ACCESS_KEY=
#VGW_IAM_LDAP_USER_ID_ATR=
#VGW_IAM_LDAP_GROUP_ID_ATR=
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
# in an external FreeIPA service. Currently the FreeIPA IAM service only
# supports account retrieval. Creating and modifying accounts must be done
# outside of the versitygw service.
# FreeIPA server url e.g. https://ipa.example.test
#VGW_IPA_HOST=
# A name of the user vault containing their secret
#VGW_IPA_VAULT_NAME=
# Username used to connect to FreeIPA (requires permissions to read user vault
# contents)
#VGW_IPA_USER=
# Password of the user used to connect to FreeIPA
#VGW_IPA_PASSWORD=
# Disable verify TLS certificate of FreeIPA server
#VGW_IPA_INSECURE=false
# FreeIPA IAM debug output
#VGW_IPA_DEBUG=false
###############
# IAM caching #
###############
@@ -355,40 +317,6 @@ ROOT_SECRET_ACCESS_KEY=
# as any parent directories automatically created with object uploads.
#VGW_DIR_PERMS=0755
# To enable object versions, the VGW_VERSIONING_DIR option must be set to the
# directory that will be used to store the object versions. The version
# directory must NOT be a subdirectory of the VGW_BACKEND_ARG directory.
#VGW_VERSIONING_DIR=
# The gateway uses xattrs to store metadata for objects by default. For systems
# that do not support xattrs, the VGW_META_SIDECAR option can be set to a
# directory that will be used to store the metadata for objects. This is
# currently experimental, and may have issues for some edge cases.
#VGW_META_SIDECAR=
# The VGW_META_NONE option will disable the metadata functionality for the
# gateway. This will cause the gateway to not store any metadata for objects
# or buckets. This include bucket ACLs and Policy. This may be useful for
# read only access to pre-existing data where the gateway should not modify
# the data. It is recommened to enable VGW_READ_ONLY (Global Options) along
# with this.
#VGW_META_NONE=false
# The gateway will use O_TMPFILE for writing objects while uploading and
# link the file to the final object name when the upload is complete if the
# filesystem supports O_TMPFILE. This creates an atomic object creation
# that is not visible to other clients or racing uploads until the upload
# is complete. This will not work if there is a different filesystem mounted
# below the bucket level than where the bucket resides. The VGW_DISABLE_OTMP
# option can be set to true to disable this functionality and force the fallback
# mode when O_TMPFILE is not available. This fallback will create a temporary
# file in the bucket directory and rename it to the final object name when
# the upload is complete if the final location is in the same filesystem, or
# copy the file to the final location if the final location is in a different
# filesystem. This fallback mode is still atomic, but may be less efficient
# than O_TMPFILE when the data needs to be copied into the final location.
#VGW_DISABLE_OTMP=false
###########
# scoutfs #
###########

82
go.mod
View File

@@ -5,73 +5,73 @@ go 1.23.0
toolchain go1.24.1
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
github.com/DataDog/datadog-go/v5 v5.6.0
github.com/aws/aws-sdk-go-v2 v1.36.5
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
github.com/aws/smithy-go v1.22.4
github.com/go-ldap/ldap/v3 v3.4.11
github.com/gofiber/fiber/v2 v2.52.8
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1
github.com/aws/smithy-go v1.22.3
github.com/go-ldap/ldap/v3 v3.4.10
github.com/gofiber/fiber/v2 v2.52.6
github.com/google/go-cmp v0.7.0
github.com/google/uuid v1.6.0
github.com/hashicorp/vault-client-go v0.4.3
github.com/nats-io/nats.go v1.43.0
github.com/oklog/ulid/v2 v2.1.1
github.com/pkg/xattr v0.4.12
github.com/segmentio/kafka-go v0.4.48
github.com/nats-io/nats.go v1.41.0
github.com/oklog/ulid/v2 v2.1.0
github.com/pkg/xattr v0.4.10
github.com/segmentio/kafka-go v0.4.47
github.com/smira/go-statsd v1.3.4
github.com/urfave/cli/v2 v2.27.7
github.com/valyala/fasthttp v1.63.0
github.com/urfave/cli/v2 v2.27.6
github.com/valyala/fasthttp v1.60.0
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
golang.org/x/sync v0.16.0
golang.org/x/sys v0.34.0
golang.org/x/sync v0.13.0
golang.org/x/sys v0.32.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.11.0 // indirect
)
require (
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.17
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/andybalholm/brotli v1.1.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.13
github.com/aws/aws-sdk-go-v2/credentials v1.17.66
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect

214
go.sum
View File

@@ -1,15 +1,15 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 h1:Bg8m3nq/X1DeePkAbCfb6ml6F3F0IunEhE8TMh+lY48=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
@@ -23,50 +23,50 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/aws/aws-sdk-go-v2 v1.36.5 h1:0OF9RiEMEdDdZEMqF9MRjevyxAQcf6gY+E7vwBILFj0=
github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0=
github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8=
github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0=
github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83 h1:08otkOELsIi0toRRGMytlJhOctcN8xfKfKFR2NXz3kE=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83/go.mod h1:dGsGb2wI8JDWeMAhjVPP+z+dqvYjL6k6o+EujcRNk5c=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8=
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
github.com/aws/aws-sdk-go-v2/config v1.29.13 h1:RgdPqWoE8nPpIekpVpDJsBckbqT4Liiaq9f35pbTh1Y=
github.com/aws/aws-sdk-go-v2/config v1.29.13/go.mod h1:NI28qs/IOUIRhsR7GQ/JdexoqRN9tDxkIrYZq0SOF44=
github.com/aws/aws-sdk-go-v2/credentials v1.17.66 h1:aKpEKaTy6n4CEJeYI1MNj97oSDLi4xro3UzQfwf5RWE=
github.com/aws/aws-sdk-go-v2/credentials v1.17.66/go.mod h1:xQ5SusDmHb/fy55wU0QqTy0yNfLqxzec59YcsRZB+rI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71 h1:s43gLuY+zGmtpx+KybfFP4IckopmTfDOPdlf/L++N5I=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71/go.mod h1:KH6wWmY3O3c/jVAjHk0MGzVAFDxkOSt42Eoe4ZO4ge0=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U=
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 h1:5Y75q0RPQoAbieyOuGLhjV9P3txvYgXv2lg0UwJOfmE=
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w=
github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw=
github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1 h1:2Ku1xwAohSSXHR1tpAnyVDSQSxoDMA+/NZBytW+f4qg=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18 h1:xz7WvTMfSStb9Y8NpCT82FXLNC3QasqBfuAFHY4Pk5g=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -74,29 +74,33 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
github.com/gofiber/fiber/v2 v2.52.8 h1:xl4jJQ0BV5EJTA2aWiKw/VddRpHrKeZLF0QPUxqn0x4=
github.com/gofiber/fiber/v2 v2.52.8/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
@@ -113,8 +117,8 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
@@ -128,14 +132,14 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug=
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
@@ -143,12 +147,12 @@ github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFu
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/xattr v0.4.12 h1:rRTkSyFNTRElv6pkA3zpjHpQ90p/OdHQC1GmGh1aTjM=
github.com/pkg/xattr v0.4.12/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -156,8 +160,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/segmentio/kafka-go v0.4.48 h1:9jyu9CWK4W5W+SroCe8EffbrRZVqAOkuaLd/ApID4Vs=
github.com/segmentio/kafka-go v0.4.48/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smira/go-statsd v1.3.4 h1:kBYWcLSGT+qC6JVbvfz48kX7mQys32fjDOPrfmsSx2c=
github.com/smira/go-statsd v1.3.4/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
@@ -166,17 +170,18 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.63.0 h1:DisIL8OjB7ul2d7cBaMRcKTQDYnrGy56R4FCiuDP0Ns=
github.com/valyala/fasthttp v1.63.0/go.mod h1:REc4IeW+cAEyLrRPa5A81MIjvz0QE1laoTX2EaPHKJM=
github.com/valyala/fasthttp v1.60.0 h1:kBRYS0lOhVJ6V+bYN8PqAHELKHtXqwq9zNMLKx1MBsw=
github.com/valyala/fasthttp v1.60.0/go.mod h1:iY4kDgV3Gc6EqhRZ8icqcmlG6bqhcDXfuHgTO4FXCvc=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
@@ -194,28 +199,46 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -231,14 +254,23 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -246,19 +278,25 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,35 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package plugins
import "github.com/versity/versitygw/backend"
// BackendPlugin defines an interface for creating backend
// implementation instances.
// Plugins implementing this interface can be built as shared
// libraries using Go's plugin system (to build use `go build -buildmode=plugin`).
// The shared library should export an instance of
// this interface in a variable named `Backend`.
type BackendPlugin interface {
// New creates and initializes a new backend.Backend instance.
// The config parameter specifies the path of the file containing
// the configuration for the backend.
//
// Implementations of this method should perform the necessary steps to
// establish a connection to the underlying storage system or service
// (e.g., network storage system, distributed storage system, cloud storage)
// and configure it according to the provided configuration.
New(config string) (backend.Backend, error)
}

View File

@@ -15,10 +15,13 @@
package controllers
import (
"encoding/json"
"encoding/xml"
"fmt"
"net/http"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
@@ -97,16 +100,7 @@ func (c AdminController) UpdateUser(ctx *fiber.Ctx) error {
})
}
err := props.Validate()
if err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminInvalidUserRole),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
})
}
err = c.iam.UpdateUserAccount(access, props)
err := c.iam.UpdateUserAccount(access, props)
if err != nil {
if strings.Contains(err.Error(), "user not found") {
err = s3err.GetAPIError(s3err.ErrAdminUserNotFound)
@@ -169,7 +163,27 @@ func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
})
}
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, owner)
acl := auth.ACL{
Owner: owner,
Grantees: []auth.Grantee{
{
Permission: auth.PermissionFullControl,
Access: owner,
Type: types.TypeCanonicalUser,
},
},
}
aclParsed, err := json.Marshal(acl)
if err != nil {
return SendResponse(ctx, fmt.Errorf("failed to marshal the bucket acl: %w", err),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
})
}
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, aclParsed)
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,

View File

@@ -324,7 +324,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
}
adminController := AdminController{
be: &BackendMock{
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket, owner string) error {
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
return nil
},
},

View File

@@ -26,13 +26,13 @@ var _ backend.Backend = &BackendMock{}
// AbortMultipartUploadFunc: func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error {
// panic("mock out the AbortMultipartUpload method")
// },
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, owner string) error {
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
// panic("mock out the ChangeBucketOwner method")
// },
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
// panic("mock out the CompleteMultipartUpload method")
// },
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
// panic("mock out the CopyObject method")
// },
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
@@ -196,13 +196,13 @@ type BackendMock struct {
AbortMultipartUploadFunc func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error
// ChangeBucketOwnerFunc mocks the ChangeBucketOwner method.
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, owner string) error
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, acl []byte) error
// CompleteMultipartUploadFunc mocks the CompleteMultipartUpload method.
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error)
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
// CopyObjectFunc mocks the CopyObject method.
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
// CreateBucketFunc mocks the CreateBucket method.
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error
@@ -369,8 +369,8 @@ type BackendMock struct {
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
// Owner is the owner argument value.
Owner string
// ACL is the acl argument value.
ACL []byte
}
// CompleteMultipartUpload holds details about calls to the CompleteMultipartUpload method.
CompleteMultipartUpload []struct {
@@ -864,23 +864,23 @@ func (mock *BackendMock) AbortMultipartUploadCalls() []struct {
}
// ChangeBucketOwner calls ChangeBucketOwnerFunc.
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, owner string) error {
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, acl []byte) error {
if mock.ChangeBucketOwnerFunc == nil {
panic("BackendMock.ChangeBucketOwnerFunc: method is nil but Backend.ChangeBucketOwner was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
Owner string
ACL []byte
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
Owner: owner,
ACL: acl,
}
mock.lockChangeBucketOwner.Lock()
mock.calls.ChangeBucketOwner = append(mock.calls.ChangeBucketOwner, callInfo)
mock.lockChangeBucketOwner.Unlock()
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, owner)
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, acl)
}
// ChangeBucketOwnerCalls gets all the calls that were made to ChangeBucketOwner.
@@ -890,12 +890,12 @@ func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, buck
func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
ContextMoqParam context.Context
Bucket string
Owner string
ACL []byte
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
Owner string
ACL []byte
}
mock.lockChangeBucketOwner.RLock()
calls = mock.calls.ChangeBucketOwner
@@ -904,7 +904,7 @@ func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
}
// CompleteMultipartUpload calls CompleteMultipartUploadFunc.
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
if mock.CompleteMultipartUploadFunc == nil {
panic("BackendMock.CompleteMultipartUploadFunc: method is nil but Backend.CompleteMultipartUpload was just called")
}
@@ -940,7 +940,7 @@ func (mock *BackendMock) CompleteMultipartUploadCalls() []struct {
}
// CopyObject calls CopyObjectFunc.
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if mock.CopyObjectFunc == nil {
panic("BackendMock.CopyObjectFunc: method is nil but Backend.CopyObject was just called")
}

File diff suppressed because it is too large Load Diff

View File

@@ -32,7 +32,6 @@ import (
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -100,7 +99,8 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("isDebug", false)
return ctx.Next()
})
app.Get("/", s3ApiController.ListBuckets)
@@ -116,7 +116,8 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("isDebug", false)
return ctx.Next()
})
appErr.Get("/", s3ApiControllerErr.ListBuckets)
@@ -219,9 +220,10 @@ func TestS3ApiController_GetActions(t *testing.T) {
},
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Get("/:bucket/:key/*", s3ApiController.GetActions)
@@ -411,9 +413,10 @@ func TestS3ApiController_ListActions(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
@@ -435,9 +438,10 @@ func TestS3ApiController_ListActions(t *testing.T) {
}
appError := fiber.New()
appError.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
appError.Get("/:bucket", s3ApiControllerError.ListActions)
@@ -703,9 +707,10 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
}
// Mock ctx.Locals
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{Owner: "valid access"})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{Owner: "valid access"})
return ctx.Next()
})
app.Put("/:bucket", s3ApiController.PutBucketActions)
@@ -884,6 +889,15 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
{
name: "Create-bucket-invalid-bucket-name",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/aa", nil),
},
wantErr: false,
statusCode: 400,
},
{
name: "Create-bucket-success",
app: app,
@@ -960,9 +974,9 @@ func TestS3ApiController_PutActions(t *testing.T) {
PutObjectAclFunc: func(context.Context, *s3.PutObjectAclInput) error {
return nil
},
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{},
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{},
}, nil
},
PutObjectFunc: func(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
@@ -989,9 +1003,10 @@ func TestS3ApiController_PutActions(t *testing.T) {
},
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Put("/:bucket/:key/*", s3ApiController.PutActions)
@@ -1277,9 +1292,10 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
@@ -1362,9 +1378,10 @@ func TestS3ApiController_DeleteObjects(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Post("/:bucket", s3ApiController.DeleteObjects)
@@ -1441,9 +1458,10 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Delete("/:bucket/:key/*", s3ApiController.DeleteActions)
@@ -1464,9 +1482,10 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
}}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
appErr.Delete("/:bucket/:key/*", s3ApiControllerErr.DeleteActions)
@@ -1546,10 +1565,11 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
utils.ContextKeyRegion.Set(ctx, "us-east-1")
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
ctx.Locals("region", "us-east-1")
return ctx.Next()
})
@@ -1563,16 +1583,17 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
return acldata, nil
},
HeadBucketFunc: func(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrBucketNotEmpty)
return nil, s3err.GetAPIError(3)
},
},
}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
utils.ContextKeyRegion.Set(ctx, "us-east-1")
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
ctx.Locals("region", "us-east-1")
return ctx.Next()
})
@@ -1649,9 +1670,10 @@ func TestS3ApiController_HeadObject(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Head("/:bucket/:key/*", s3ApiController.HeadObject)
@@ -1671,9 +1693,10 @@ func TestS3ApiController_HeadObject(t *testing.T) {
}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
appErr.Head("/:bucket/:key/*", s3ApiControllerErr.HeadObject)
@@ -1742,8 +1765,8 @@ func TestS3ApiController_CreateActions(t *testing.T) {
RestoreObjectFunc: func(context.Context, *s3.RestoreObjectInput) error {
return nil
},
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
return s3response.CompleteMultipartUploadResult{}, "", nil
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return &s3.CompleteMultipartUploadOutput{}, nil
},
CreateMultipartUploadFunc: func(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, nil
@@ -1775,9 +1798,10 @@ func TestS3ApiController_CreateActions(t *testing.T) {
`
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Post("/:bucket/:key/*", s3ApiController.CreateActions)

View File

@@ -1,226 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package debuglogger
import (
"fmt"
"log"
"net/http"
"strings"
"sync/atomic"
"github.com/gofiber/fiber/v2"
)
type Color string
const (
green Color = "\033[32m"
yellow Color = "\033[33m"
blue Color = "\033[34m"
Purple Color = "\033[0;35m"
reset = "\033[0m"
borderChar = "─"
boxWidth = 120
)
// Logs http request details: headers, body, params, query args
func LogFiberRequestDetails(ctx *fiber.Ctx) {
// Log the full request url
fullURL := ctx.Protocol() + "://" + ctx.Hostname() + ctx.OriginalURL()
fmt.Printf("%s[URL]: %s%s\n", green, fullURL, reset)
// log request headers
wrapInBox(green, "REQUEST HEADERS", boxWidth, func() {
for key, value := range ctx.Request().Header.All() {
printWrappedLine(yellow, string(key), string(value))
}
})
// skip request body log for PutObject and UploadPart
skipBodyLog := isLargeDataAction(ctx)
if !skipBodyLog {
body := ctx.Request().Body()
if len(body) != 0 {
printBoxTitleLine(blue, "REQUEST BODY", boxWidth, false)
fmt.Printf("%s%s%s\n", blue, body, reset)
printHorizontalBorder(blue, boxWidth, false)
}
}
if ctx.Request().URI().QueryArgs().Len() != 0 {
for key, value := range ctx.Request().URI().QueryArgs().All() {
log.Printf("%s: %s", key, value)
}
}
}
// Logs http response details: body, headers
func LogFiberResponseDetails(ctx *fiber.Ctx) {
wrapInBox(green, "RESPONSE HEADERS", boxWidth, func() {
for key, value := range ctx.Response().Header.All() {
printWrappedLine(yellow, string(key), string(value))
}
})
_, ok := ctx.Locals("skip-res-body-log").(bool)
if !ok {
body := ctx.Response().Body()
if len(body) != 0 {
PrintInsideHorizontalBorders(blue, "RESPONSE BODY", string(body), boxWidth)
}
}
}
var debugEnabled atomic.Bool
// SetDebugEnabled sets the debug mode
func SetDebugEnabled() {
debugEnabled.Store(true)
}
// Logf is the same as 'fmt.Printf' with debug prefix,
// a color added and '\n' at the end
func Logf(format string, v ...any) {
if !debugEnabled.Load() {
return
}
debugPrefix := "[DEBUG]: "
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
}
// Infof prints out green info block with [INFO]: prefix
func Infof(format string, v ...any) {
if !debugEnabled.Load() {
return
}
debugPrefix := "[INFO]: "
fmt.Printf(string(green)+debugPrefix+format+reset+"\n", v...)
}
// PrintInsideHorizontalBorders prints the text inside horizontal
// border and title in the center of upper border
func PrintInsideHorizontalBorders(color Color, title, text string, width int) {
if !debugEnabled.Load() {
return
}
printBoxTitleLine(color, title, width, false)
fmt.Printf("%s%s%s\n", color, text, reset)
printHorizontalBorder(color, width, false)
}
// Prints out box title either with closing characters or not: "┌", "┐"
// e.g ┌────────────────[ RESPONSE HEADERS ]────────────────┐
func printBoxTitleLine(color Color, title string, length int, closing bool) {
leftCorner, rightCorner := "┌", "┐"
if !closing {
leftCorner, rightCorner = borderChar, borderChar
}
// Calculate how many border characters are needed
titleFormatted := fmt.Sprintf("[ %s ]", title)
borderSpace := length - len(titleFormatted) - 2 // 2 for corners
leftLen := borderSpace / 2
rightLen := borderSpace - leftLen
// Build the line
line := leftCorner +
strings.Repeat(borderChar, leftLen) +
titleFormatted +
strings.Repeat(borderChar, rightLen) +
rightCorner
fmt.Println(string(color) + line + reset)
}
// Prints out a horizontal line either with closing characters or not: "└", "┘"
func printHorizontalBorder(color Color, length int, closing bool) {
leftCorner, rightCorner := "└", "┘"
if !closing {
leftCorner, rightCorner = borderChar, borderChar
}
line := leftCorner + strings.Repeat(borderChar, length-2) + rightCorner + reset
fmt.Println(string(color) + line)
}
// wrapInBox wraps the output of a function call (fn) inside a styled box with a title.
func wrapInBox(color Color, title string, length int, fn func()) {
printBoxTitleLine(color, title, length, true)
fn()
printHorizontalBorder(color, length, true)
}
// returns the provided string length
// defaulting to 13 for exceeding lengths
func getLen(str string) int {
if len(str) < 13 {
return 13
}
return len(str)
}
// prints a formatted key-value pair within a box layout,
// wrapping the value text if it exceeds the allowed width.
func printWrappedLine(keyColor Color, key, value string) {
prefix := fmt.Sprintf("%s│%s %s%-13s%s : ", green, reset, keyColor, key, reset)
prefixLen := len(prefix) - len(green) - len(reset) - len(keyColor) - len(reset)
// the actual prefix size without colors
actualPrefixLen := getLen(key) + 5
lineWidth := boxWidth - prefixLen
valueLines := wrapText(value, lineWidth)
for i, line := range valueLines {
if i == 0 {
if len(line) < lineWidth {
line += strings.Repeat(" ", lineWidth-len(line))
}
fmt.Printf("%s%s%s %s│%s\n", prefix, reset, line, green, reset)
} else {
line = strings.Repeat(" ", actualPrefixLen-2) + line
if len(line) < boxWidth-4 {
line += strings.Repeat(" ", boxWidth-len(line)-4)
}
fmt.Printf("%s│ %s%s %s│%s\n", green, reset, line, green, reset)
}
}
}
// wrapText splits the input text into lines of at most `width` characters each.
func wrapText(text string, width int) []string {
var lines []string
for len(text) > width {
lines = append(lines, text[:width])
text = text[width:]
}
if text != "" {
lines = append(lines, text)
}
return lines
}
// TODO: remove this and use utils.IsBidDataAction after refactoring
// and creating 'internal' package
func isLargeDataAction(ctx *fiber.Ctx) bool {
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
return true
}
}
return false
}

View File

@@ -24,7 +24,6 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
@@ -35,6 +34,7 @@ var (
func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fiber.Handler {
return func(ctx *fiber.Ctx) error {
isRoot, acct := ctx.Locals("isRoot").(bool), ctx.Locals("account").(auth.Account)
path := ctx.Path()
pathParts := strings.Split(path, "/")
bucket := pathParts[1]
@@ -53,7 +53,6 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
!ctx.Request().URI().QueryArgs().Has("object-lock") &&
!ctx.Request().URI().QueryArgs().Has("ownershipControls") &&
!ctx.Request().URI().QueryArgs().Has("cors") {
isRoot, acct := utils.ContextKeyIsRoot.Get(ctx).(bool), utils.ContextKeyAccount.Get(ctx).(auth.Account)
if err := auth.MayCreateBucket(acct, isRoot); err != nil {
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
}
@@ -78,10 +77,10 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
// if owner is not set, set default owner to root account
if parsedAcl.Owner == "" {
parsedAcl.Owner = utils.ContextKeyRootAccessKey.Get(ctx).(string)
parsedAcl.Owner = ctx.Locals("rootAccess").(string)
}
utils.ContextKeyParsedAcl.Set(ctx, parsedAcl)
ctx.Locals("parsedAcl", parsedAcl)
return ctx.Next()
}
}

View File

@@ -21,14 +21,13 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
func IsAdmin(logger s3log.AuditLogger) fiber.Handler {
return func(ctx *fiber.Ctx) error {
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
acct := ctx.Locals("account").(auth.Account)
if acct.Role != auth.RoleAdmin {
path := ctx.Path()
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminAccessDenied),

View File

@@ -33,8 +33,7 @@ import (
)
const (
iso8601Format = "20060102T150405Z"
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
iso8601Format = "20060102T150405Z"
)
type RootUserConfig struct {
@@ -46,15 +45,14 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
acct := accounts{root: root, iam: iam}
return func(ctx *fiber.Ctx) error {
// The bucket is public, no need to check this signature
if utils.ContextKeyPublicBucket.IsSet(ctx) {
return ctx.Next()
}
// If ContextKeyAuthenticated is set in context locals, it means it was presigned url case
if utils.ContextKeyAuthenticated.IsSet(ctx) {
// If account is set in context locals, it means it was presigned url case
_, ok := ctx.Locals("account").(auth.Account)
if ok {
return ctx.Next()
}
ctx.Locals("region", region)
ctx.Locals("startTime", time.Now())
authorization := ctx.Get("Authorization")
if authorization == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty), logger, mm)
@@ -73,7 +71,8 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}, logger, mm)
}
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
ctx.Locals("isRoot", authData.Access == root.Access)
ctx.Locals("rootAccess", root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
@@ -82,8 +81,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
utils.ContextKeyAccount.Set(ctx, account)
ctx.Locals("account", account)
// Check X-Amz-Date header
date := ctx.Get("X-Amz-Date")
@@ -107,16 +105,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
return sendResponse(ctx, err, logger, mm)
}
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if utils.IsBigDataAction(ctx) {
// for streaming PUT actions, authorization is deferred
@@ -138,18 +126,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
return ctx.Next()
}
// Content-Length has to be set for data uploads: PutObject, UploadPart
if contentLengthStr == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
}
// the upload limit for big data actions: PutObject, UploadPart
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
if contentLength > maxObjSizeLimit {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
}
return ctx.Next()
@@ -166,6 +142,15 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}
}
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength, debug)
if err != nil {
return sendResponse(ctx, err, logger, mm)

View File

@@ -18,15 +18,14 @@ import (
"io"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/utils"
)
func wrapBodyReader(ctx *fiber.Ctx, wr func(io.Reader) io.Reader) {
r, ok := utils.ContextKeyBodyReader.Get(ctx).(io.Reader)
r, ok := ctx.Locals("body-reader").(io.Reader)
if !ok {
r = ctx.Request().BodyStream()
}
r = wr(r)
utils.ContextKeyBodyReader.Set(ctx, r)
ctx.Locals("body-reader", r)
}

View File

@@ -1,58 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"net/http"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
// BucketObjectNameValidator extracts and validates
// the bucket and object names from the request URI.
func BucketObjectNameValidator(l s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
// skip the check for admin apis
if ctx.Method() == http.MethodPatch {
return ctx.Next()
}
path := ctx.Path()
// skip the check if the operation isn't bucket/object scoped
// e.g ListBuckets
if path == "/" {
return ctx.Next()
}
bucket, object := parsePath(path)
// check if the provided bucket name is valid
if !utils.IsValidBucketName(bucket) {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketName), l, mm)
}
// check if the provided object name is valid
// skip for empty objects: e.g bucket operations: HeadBucket...
if object != "" && !utils.IsObjectNameValid(object) {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrBadRequest), l, mm)
}
return ctx.Next()
}
}

View File

@@ -1,40 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"fmt"
"strings"
"github.com/gofiber/fiber/v2"
)
// HostStyleParser is a middleware which parses the bucket name
// from the 'Host' header and appends in the request URL path
func HostStyleParser(virtualDomain string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
host := string(ctx.Request().Host())
// the host should match this pattern: '<bucket_name>.<virtual_domain>'
bucket, _, found := strings.Cut(host, "."+virtualDomain)
if !found || bucket == "" {
return ctx.Next()
}
path := ctx.Path()
pathStyleUrl := fmt.Sprintf("/%v%v", bucket, path)
ctx.Path(pathStyleUrl)
return ctx.Next()
}
}

View File

@@ -15,15 +15,30 @@
package middlewares
import (
"fmt"
"log"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/debuglogger"
)
func DebugLogger() fiber.Handler {
func RequestLogger(isDebug bool) fiber.Handler {
return func(ctx *fiber.Ctx) error {
debuglogger.LogFiberRequestDetails(ctx)
err := ctx.Next()
debuglogger.LogFiberResponseDetails(ctx)
return err
ctx.Locals("isDebug", isDebug)
if isDebug {
log.Println("Request headers: ")
ctx.Request().Header.VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
if ctx.Request().URI().QueryArgs().Len() != 0 {
fmt.Println()
log.Println("Request query arguments: ")
ctx.Request().URI().QueryArgs().VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
}
}
return ctx.Next()
}
}

View File

@@ -16,7 +16,7 @@ package middlewares
import (
"io"
"strconv"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
@@ -30,24 +30,20 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
acct := accounts{root: root, iam: iam}
return func(ctx *fiber.Ctx) error {
// The bucket is public, no need to check this signature
if utils.ContextKeyPublicBucket.IsSet(ctx) {
return ctx.Next()
}
if ctx.Query("X-Amz-Signature") == "" {
return ctx.Next()
}
// Set in the context the "authenticated" key, in case the authentication succeeds,
// otherwise the middleware will return the caucht error
utils.ContextKeyAuthenticated.Set(ctx, true)
ctx.Locals("region", region)
ctx.Locals("startTime", time.Now())
authData, err := utils.ParsePresignedURIParts(ctx)
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
ctx.Locals("isRoot", authData.Access == root.Access)
ctx.Locals("rootAccess", root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
@@ -56,28 +52,9 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
utils.ContextKeyAccount.Set(ctx, account)
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
ctx.Locals("account", account)
if utils.IsBigDataAction(ctx) {
// Content-Length has to be set for data uploads: PutObject, UploadPart
if contentLengthStr == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
}
// the upload limit for big data actions: PutObject, UploadPart
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
if contentLength > maxObjSizeLimit {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
}
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
return utils.NewPresignedAuthReader(ctx, r, authData, account.Secret, debug)
})

View File

@@ -1,298 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"io"
"strings"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
func AuthorizePublicBucketAccess(be backend.Backend, l s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
// skip for auhtneicated requests
if ctx.Query("X-Amz-Algorithm") != "" || ctx.Get("Authorization") != "" {
return ctx.Next()
}
bucket, object := parsePath(ctx.Path())
action, permission, err := detectS3Action(ctx, object == "")
if err != nil {
return sendResponse(ctx, err, l, mm)
}
err = auth.VerifyPublicAccess(ctx.Context(), be, action, permission, bucket, object)
if err != nil {
return sendResponse(ctx, err, l, mm)
}
if utils.IsBigDataAction(ctx) {
payloadType := ctx.Get("X-Amz-Content-Sha256")
if utils.IsUnsignedStreamingPayload(payloadType) {
checksumType, err := utils.ExtractChecksumType(ctx)
if err != nil {
return sendResponse(ctx, err, l, mm)
}
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr io.Reader
cr, err = utils.NewUnsignedChunkReader(r, checksumType)
return cr
})
if err != nil {
return sendResponse(ctx, err, l, mm)
}
}
}
utils.ContextKeyPublicBucket.Set(ctx, true)
return ctx.Next()
}
}
func detectS3Action(ctx *fiber.Ctx, isBucketAction bool) (auth.Action, auth.Permission, error) {
path := ctx.Path()
// ListBuckets is not publically available
if path == "/" {
//TODO: Still not clear what kind of error should be returned in this case(ListBuckets)
return "", auth.PermissionRead, s3err.GetAPIError(s3err.ErrAccessDenied)
}
queryArgs := ctx.Context().QueryArgs()
switch ctx.Method() {
case fiber.MethodPatch:
// Admin apis should always be protected
return "", "", s3err.GetAPIError(s3err.ErrAccessDenied)
case fiber.MethodHead:
// HeadBucket
if isBucketAction {
return auth.ListBucketAction, auth.PermissionRead, nil
}
// HeadObject
return auth.GetObjectAction, auth.PermissionRead, nil
case fiber.MethodGet:
if isBucketAction {
if queryArgs.Has("tagging") {
// GetBucketTagging
return auth.GetBucketTaggingAction, auth.PermissionRead, nil
} else if queryArgs.Has("ownershipControls") {
// GetBucketOwnershipControls
return auth.GetBucketOwnershipControlsAction, auth.PermissionRead, s3err.GetAPIError(s3err.ErrAnonymousGetBucketOwnership)
} else if queryArgs.Has("versioning") {
// GetBucketVersioning
return auth.GetBucketVersioningAction, auth.PermissionRead, nil
} else if queryArgs.Has("policy") {
// GetBucketPolicy
return auth.GetBucketPolicyAction, auth.PermissionRead, nil
} else if queryArgs.Has("cors") {
// GetBucketCors
return auth.GetBucketCorsAction, auth.PermissionRead, nil
} else if queryArgs.Has("versions") {
// ListObjectVersions
return auth.ListBucketVersionsAction, auth.PermissionRead, nil
} else if queryArgs.Has("object-lock") {
// GetObjectLockConfiguration
return auth.GetBucketObjectLockConfigurationAction, auth.PermissionReadAcp, nil
} else if queryArgs.Has("acl") {
// GetBucketAcl
return auth.GetBucketAclAction, auth.PermissionRead, nil
} else if queryArgs.Has("uploads") {
// ListMultipartUploads
return auth.ListBucketMultipartUploadsAction, auth.PermissionRead, nil
} else if queryArgs.GetUintOrZero("list-type") == 2 {
// ListObjectsV2
return auth.ListBucketAction, auth.PermissionRead, nil
}
// All the other requests are considerd as ListObjects in the router
// no matter what kind of query arguments are provided apart from the ones above
return auth.ListBucketAction, auth.PermissionRead, nil
}
if queryArgs.Has("tagging") {
// GetObjectTagging
return auth.GetObjectTaggingAction, auth.PermissionRead, nil
} else if queryArgs.Has("retention") {
// GetObjectRetention
return auth.GetObjectRetentionAction, auth.PermissionRead, nil
} else if queryArgs.Has("legal-hold") {
// GetObjectLegalHold
return auth.GetObjectLegalHoldAction, auth.PermissionReadAcp, nil
} else if queryArgs.Has("acl") {
// GetObjectAcl
return auth.GetObjectAclAction, auth.PermissionRead, nil
} else if queryArgs.Has("attributes") {
// GetObjectAttributes
return auth.GetObjectAttributesAction, auth.PermissionRead, nil
} else if queryArgs.Has("uploadId") {
// ListParts
return auth.ListMultipartUploadPartsAction, auth.PermissionRead, nil
}
// All the other requests are considerd as GetObject in the router
// no matter what kind of query arguments are provided apart from the ones above
if queryArgs.Has("versionId") {
return auth.GetObjectVersionAction, auth.PermissionRead, nil
}
return auth.GetObjectAction, auth.PermissionRead, nil
case fiber.MethodPut:
if isBucketAction {
if queryArgs.Has("tagging") {
// PutBucketTagging
return auth.PutBucketTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("ownershipControls") {
// PutBucketOwnershipControls
return auth.PutBucketOwnershipControlsAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousPutBucketOwnership)
}
if queryArgs.Has("versioning") {
// PutBucketVersioning
return auth.PutBucketVersioningAction, auth.PermissionWrite, nil
}
if queryArgs.Has("object-lock") {
// PutObjectLockConfiguration
return auth.PutBucketObjectLockConfigurationAction, auth.PermissionWrite, nil
}
if queryArgs.Has("cors") {
// PutBucketCors
return auth.PutBucketCorsAction, auth.PermissionWrite, nil
}
if queryArgs.Has("policy") {
// PutBucketPolicy
return auth.PutBucketPolicyAction, auth.PermissionWrite, nil
}
if queryArgs.Has("acl") {
// PutBucketAcl
return auth.PutBucketAclAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
// All the other rquestes are considered as 'CreateBucket' in the router
return "", "", s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
if queryArgs.Has("tagging") {
// PutObjectTagging
return auth.PutObjectTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("retention") {
// PutObjectRetention
return auth.PutObjectRetentionAction, auth.PermissionWrite, nil
}
if queryArgs.Has("legal-hold") {
// PutObjectLegalHold
return auth.PutObjectLegalHoldAction, auth.PermissionWrite, nil
}
if queryArgs.Has("acl") {
// PutObjectAcl
return auth.PutObjectAclAction, auth.PermissionWriteAcp, s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
if queryArgs.Has("uploadId") && queryArgs.Has("partNumber") {
if ctx.Get("X-Amz-Copy-Source") != "" {
// UploadPartCopy
//TODO: Add public access check for copy-source
// Return AccessDenied for now
return auth.PutObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAccessDenied)
}
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
// UploadPart
return auth.PutObjectAction, auth.PermissionWrite, nil
}
if ctx.Get("X-Amz-Copy-Source") != "" {
return auth.PutObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousCopyObject)
}
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
// All the other requests are considered as 'PutObject' in the router
return auth.PutObjectAction, auth.PermissionWrite, nil
case fiber.MethodPost:
if isBucketAction {
// DeleteObjects
// FIXME: should be fixed with https://github.com/versity/versitygw/issues/1327
// Return AccessDenied for now
return auth.DeleteObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if queryArgs.Has("restore") {
return auth.RestoreObjectAction, auth.PermissionWrite, nil
}
if queryArgs.Has("select") && ctx.Query("select-type") == "2" {
// SelectObjectContent
return auth.GetObjectAction, auth.PermissionRead, s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
if queryArgs.Has("uploadId") {
// CompleteMultipartUpload
return auth.PutObjectAction, auth.PermissionWrite, nil
}
// All the other requests are considered as 'CreateMultipartUpload' in the router
return "", "", s3err.GetAPIError(s3err.ErrAnonymousCreateMp)
case fiber.MethodDelete:
if isBucketAction {
if queryArgs.Has("tagging") {
// DeleteBucketTagging
return auth.PutBucketTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("ownershipControls") {
// DeleteBucketOwnershipControls
return auth.PutBucketOwnershipControlsAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousPutBucketOwnership)
}
if queryArgs.Has("policy") {
// DeleteBucketPolicy
return auth.PutBucketPolicyAction, auth.PermissionWrite, nil
}
if queryArgs.Has("cors") {
// DeleteBucketCors
return auth.PutBucketCorsAction, auth.PermissionWrite, nil
}
// All the other requests are considered as 'DeleteBucket' in the router
return auth.DeleteBucketAction, auth.PermissionWrite, nil
}
if queryArgs.Has("tagging") {
// DeleteObjectTagging
return auth.PutObjectTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("uploadId") {
// AbortMultipartUpload
return auth.AbortMultipartUploadAction, auth.PermissionWrite, nil
}
// All the other requests are considered as 'DeleteObject' in the router
return auth.DeleteObjectAction, auth.PermissionWrite, nil
default:
// In no action is detected, return AccessDenied ?
return "", "", s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
// parsePath extracts the bucket and object names from the path
func parsePath(path string) (string, string) {
p := strings.TrimPrefix(path, "/")
bucket, object, _ := strings.Cut(p, "/")
return bucket, object
}

View File

@@ -1,37 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3api/utils"
)
func SetDefaultValues(root RootUserConfig, region string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
// These are necessary for the server access logs
utils.ContextKeyRegion.Set(ctx, region)
utils.ContextKeyStartTime.Set(ctx, time.Now())
utils.ContextKeyRootAccessKey.Set(ctx, root.Access)
// Set the account and isRoot to some defulat values, to avoid panics
// in case of public buckets
utils.ContextKeyAccount.Set(ctx, auth.Account{})
utils.ContextKeyIsRoot.Set(ctx, false)
return ctx.Next()
}
}

View File

@@ -26,7 +26,7 @@ import (
func DecodeURL(logger s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
unescp, err := url.PathUnescape(string(ctx.Request().URI().PathOriginal()))
unescp, err := url.QueryUnescape(string(ctx.Request().URI().PathOriginal()))
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger, MetricsMng: mm})
}

View File

@@ -42,7 +42,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
app.Patch("/delete-user", middlewares.IsAdmin(logger), adminController.DeleteUser)
// UpdateUser admin api
app.Patch("/update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
app.Patch("update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
// ListUsers admin api
app.Patch("/list-users", middlewares.IsAdmin(logger), adminController.ListUsers)

View File

@@ -29,16 +29,15 @@ import (
)
type S3ApiServer struct {
app *fiber.App
backend backend.Backend
router *S3ApiRouter
port string
cert *tls.Certificate
quiet bool
debug bool
readonly bool
health string
virtualDomain string
app *fiber.App
backend backend.Backend
router *S3ApiRouter
port string
cert *tls.Certificate
quiet bool
debug bool
readonly bool
health string
}
func New(
@@ -77,25 +76,7 @@ func New(
})
}
app.Use(middlewares.DecodeURL(l, mm))
// initialize host-style parser in virtual domain is specified
if server.virtualDomain != "" {
app.Use(middlewares.HostStyleParser(server.virtualDomain))
}
// initilaze the default value setter middleware
app.Use(middlewares.SetDefaultValues(root, region))
// initialize the debug logger in debug mode
if server.debug {
app.Use(middlewares.DebugLogger())
}
// initialize the bucket/object name validator
app.Use(middlewares.BucketObjectNameValidator(l, mm))
// Public buckets access checker
app.Use(middlewares.AuthorizePublicBucketAccess(be, l, mm))
app.Use(middlewares.RequestLogger(server.debug))
// Authentication middlewares
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, mm, region, server.debug))
@@ -140,11 +121,6 @@ func WithReadOnly() Option {
return func(s *S3ApiServer) { s.readonly = true }
}
// WithHostStyle enabled host-style bucket addressing on the server
func WithHostStyle(virtualDomain string) Option {
return func(s *S3ApiServer) { s.virtualDomain = virtualDomain }
}
func (sa *S3ApiServer) Serve() (err error) {
if sa.cert != nil {
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)

View File

@@ -18,19 +18,13 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
)
const (
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
)
type payloadType string
const (
@@ -88,28 +82,11 @@ func (c checksumType) isValid() bool {
c == checksumTypeCrc64nvme
}
// Extracts and validates the checksum type from the 'X-Amz-Trailer' header
func ExtractChecksumType(ctx *fiber.Ctx) (checksumType, error) {
trailer := ctx.Get("X-Amz-Trailer")
chType := checksumType(strings.ToLower(trailer))
if chType != "" && !chType.isValid() {
debuglogger.Logf("invalid value for 'X-Amz-Trailer': %v", chType)
return "", s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
}
return chType, nil
}
// IsSpecialPayload checks for special authorization types
func IsSpecialPayload(str string) bool {
return specialValues[payloadType(str)]
}
// Checks if the provided string is unsigned payload trailer type
func IsUnsignedStreamingPayload(str string) bool {
return payloadType(str) == payloadTypeStreamingUnsignedTrailer
}
// IsChunkEncoding checks for streaming/unsigned authorization types
func IsStreamingPayload(str string) bool {
pt := payloadType(str)
@@ -119,36 +96,18 @@ func IsStreamingPayload(str string) bool {
}
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
decContLengthStr := ctx.Get("X-Amz-Decoded-Content-Length")
if decContLengthStr == "" {
debuglogger.Logf("missing required header 'X-Amz-Decoded-Content-Length'")
return nil, s3err.GetAPIError(s3err.ErrMissingContentLength)
decContLength := ctx.Get("X-Amz-Decoded-Content-Length")
if decContLength == "" {
return nil, s3err.GetAPIError(s3err.ErrMissingDecodedContentLength)
}
decContLength, err := strconv.ParseInt(decContLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
debuglogger.Logf("invalid value for 'X-Amz-Decoded-Content-Length': %v", decContLengthStr)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if decContLength > maxObjSizeLimit {
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, maxObjSizeLimit)
return nil, s3err.GetAPIError(s3err.ErrEntityTooLarge)
}
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
if !contentSha256.isValid() {
//TODO: Add proper APIError
debuglogger.Logf("invalid value for 'X-Amz-Content-Sha256': %v", contentSha256)
return nil, fmt.Errorf("invalid x-amz-content-sha256: %v", string(contentSha256))
}
checksumType, err := ExtractChecksumType(ctx)
if err != nil {
return nil, err
}
if contentSha256 != payloadTypeStreamingSigned && checksumType == "" {
debuglogger.Logf("empty value for required trailer header 'X-Amz-Trailer': %v", checksumType)
checksumType := checksumType(strings.ToLower(ctx.Get("X-Amz-Trailer")))
if contentSha256 != payloadTypeStreamingSigned && !checksumType.isValid() {
return nil, s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
}
@@ -163,7 +122,6 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER
default:
debuglogger.Logf("unsupported chunk reader algorithm: %v", contentSha256)
return nil, getPayloadTypeNotSupportedErr(contentSha256)
}
}

View File

@@ -1,65 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"github.com/gofiber/fiber/v2"
)
// Region, StartTime, IsRoot, Account, AccessKey context locals
// are set to defualut values in middlewares.SetDefaultValues
// to avoid the nil interface conversions
type ContextKey string
const (
ContextKeyRegion ContextKey = "region"
ContextKeyStartTime ContextKey = "start-time"
ContextKeyIsRoot ContextKey = "is-root"
ContextKeyRootAccessKey ContextKey = "root-access-key"
ContextKeyAccount ContextKey = "account"
ContextKeyAuthenticated ContextKey = "authenticated"
ContextKeyPublicBucket ContextKey = "public-bucket"
ContextKeyParsedAcl ContextKey = "parsed-acl"
ContextKeySkipResBodyLog ContextKey = "skip-res-body-log"
ContextKeyBodyReader ContextKey = "body-reader"
)
func (ck ContextKey) Values() []ContextKey {
return []ContextKey{
ContextKeyRegion,
ContextKeyStartTime,
ContextKeyIsRoot,
ContextKeyRootAccessKey,
ContextKeyAccount,
ContextKeyAuthenticated,
ContextKeyPublicBucket,
ContextKeyParsedAcl,
ContextKeySkipResBodyLog,
ContextKeyBodyReader,
}
}
func (ck ContextKey) Set(ctx *fiber.Ctx, val any) {
ctx.Locals(string(ck), val)
}
func (ck ContextKey) IsSet(ctx *fiber.Ctx) bool {
val := ctx.Locals(string(ck))
return val != nil
}
func (ck ContextKey) Get(ctx *fiber.Ctx) any {
return ctx.Locals(string(ck))
}

View File

@@ -1,180 +0,0 @@
// Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
// Jean-loup Gailly Mark Adler
// jloup@gzip.org madler@alumni.caltech.edu
// Original implementation is from
// https://github.com/vimeo/go-util/blob/8cd4c737f091d9317f72b25df78ce6cf869f7d30/crc32combine/crc32combine.go
// extended for crc64 support.
// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration.
// Used uint for unsigned long. Used uint32 for input arguments in order to match
// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib)
package utils
import (
"hash/crc64"
)
const crc64NVME = 0x9a6c_9329_ac4b_c9b5
var crc64NVMETable = crc64.MakeTable(crc64NVME)
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
var sum uint64
for vec != 0 {
if vec&1 != 0 {
sum ^= mat[0]
}
vec >>= 1
mat = mat[1:]
}
return sum
}
func gf2MatrixSquare(square, mat []uint64) {
if len(square) != len(mat) {
panic("square matrix size mismatch")
}
for n := range mat {
square[n] = gf2MatrixTimes(mat, mat[n])
}
}
// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32
// hash values crc1 and crc2. poly represents the generator polynomial
// and len2 specifies the byte length that the crc2 hash covers.
func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 {
// degenerate case (also disallow negative lengths)
if len2 <= 0 {
return crc1
}
even := make([]uint64, 32) // even-power-of-two zeros operator
odd := make([]uint64, 32) // odd-power-of-two zeros operator
// put operator for one zero bit in odd
odd[0] = uint64(poly) // CRC-32 polynomial
row := uint64(1)
for n := 1; n < 32; n++ {
odd[n] = row
row <<= 1
}
// put operator for two zero bits in even
gf2MatrixSquare(even, odd)
// put operator for four zero bits in odd
gf2MatrixSquare(odd, even)
// apply len2 zeros to crc1 (first square will put the operator for one
// zero byte, eight zero bits, in even)
crc1n := uint64(crc1)
for {
// apply zeros operator for this bit of len2
gf2MatrixSquare(even, odd)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(even, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
// another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd, even)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(odd, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
}
// return combined crc
crc1n ^= uint64(crc2)
return uint32(crc1n)
}
// crc64Combine returns the combined CRC-64 hash value of the two passed CRC-64
// hash values crc1 and crc2. poly represents the generator polynomial
// and len2 specifies the byte length that the crc2 hash covers.
func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 {
// degenerate case (also disallow negative lengths)
if len2 <= 0 {
return crc1
}
even := make([]uint64, 64) // even-power-of-two zeros operator
odd := make([]uint64, 64) // odd-power-of-two zeros operator
// put operator for one zero bit in odd
odd[0] = poly // CRC-64 polynomial
row := uint64(1)
for n := 1; n < 64; n++ {
odd[n] = row
row <<= 1
}
// put operator for two zero bits in even
gf2MatrixSquare(even, odd)
// put operator for four zero bits in odd
gf2MatrixSquare(odd, even)
// apply len2 zeros to crc1 (first square will put the operator for one
// zero byte, eight zero bits, in even)
crc1n := crc1
for {
// apply zeros operator for this bit of len2
gf2MatrixSquare(even, odd)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(even, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
// another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd, even)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(odd, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
}
// return combined crc
crc1n ^= crc2
return crc1n
}

View File

@@ -1,57 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"hash/crc32"
"hash/crc64"
"testing"
)
func TestCRC32Combine(t *testing.T) {
data := []byte("The quick brown fox jumps over the lazy dog")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
var poly uint32 = crc32.IEEE
tab := crc32.MakeTable(poly)
crc1 := crc32.Checksum(part1, tab)
crc2 := crc32.Checksum(part2, tab)
combined := crc32Combine(poly, crc1, crc2, int64(len(part2)))
full := crc32.Checksum(data, tab)
if combined != full {
t.Errorf("crc32Combine failed: got %08x, want %08x", combined, full)
}
}
func TestCRC64Combine(t *testing.T) {
data := []byte("The quick brown fox jumps over the lazy dog")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
var poly uint64 = crc64NVME
tab := crc64NVMETable
crc1 := crc64.Checksum(part1, tab)
crc2 := crc64.Checksum(part2, tab)
combined := crc64Combine(poly, crc1, crc2, int64(len(part2)))
full := crc64.Checksum(data, tab)
if combined != full {
t.Errorf("crc64Combine failed: got %016x, want %016x", combined, full)
}
}

View File

@@ -26,6 +26,7 @@ import (
"hash/crc32"
"hash/crc64"
"io"
"math/bits"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
@@ -88,7 +89,7 @@ func NewHashReader(r io.Reader, expectedSum string, ht HashType) (*HashReader, e
case HashTypeCRC32C:
hash = crc32.New(crc32.MakeTable(crc32.Castagnoli))
case HashTypeCRC64NVME:
hash = crc64.New(crc64NVMETable)
hash = crc64.New(crc64.MakeTable(bits.Reverse64(0xad93d23594c93659)))
case HashTypeNone:
hash = noop{}
default:
@@ -184,7 +185,7 @@ func (hr *HashReader) Type() HashType {
return hr.hashType
}
// Base64SumString converts the hash bytes to the b64 encoded string checksum value
// Md5SumString converts the hash bytes to the string checksum value
func Base64SumString(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
@@ -197,108 +198,6 @@ func (n noop) Reset() {}
func (n noop) Size() int { return 0 }
func (n noop) BlockSize() int { return 1 }
// IsChecksumComposable tests if the final foll object crc can be calculated
// based on the part crc values.
func IsChecksumComposable(algo types.ChecksumAlgorithm) bool {
switch algo {
case types.ChecksumAlgorithmCrc32, types.ChecksumAlgorithmCrc32c, types.ChecksumAlgorithmCrc64nvme:
return true
default:
return false
}
}
// AddCRCChecksum calculates the composite CRC checksum after adding the part crc.
// Only CRC32, CRC32C, and CRC64NVME are supported. The input checksums must be base64-encoded strings.
func AddCRCChecksum(algo types.ChecksumAlgorithm, crc, partCrc string, partLen int64) (string, error) {
switch algo {
case types.ChecksumAlgorithmCrc32:
data, err := base64.StdEncoding.DecodeString(partCrc)
if err != nil {
return "", fmt.Errorf("base64 decode partCrc: %w", err)
}
if len(data) != 4 {
return "", fmt.Errorf("invalid crc32 part checksum length: %d", len(data))
}
currentCRC, err := base64.StdEncoding.DecodeString(crc)
if err != nil {
return "", fmt.Errorf("base64 decode crc: %w", err)
}
if len(currentCRC) != 4 {
return "", fmt.Errorf("invalid crc32 checksum length: %d", len(currentCRC))
}
currentVal := uint32(currentCRC[0])<<24 | uint32(currentCRC[1])<<16 | uint32(currentCRC[2])<<8 | uint32(currentCRC[3])
val := uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
composite := crc32Combine(crc32.IEEE, currentVal, val, partLen)
out := []byte{
byte(composite >> 24),
byte(composite >> 16),
byte(composite >> 8),
byte(composite),
}
return base64.StdEncoding.EncodeToString(out), nil
case types.ChecksumAlgorithmCrc32c:
data, err := base64.StdEncoding.DecodeString(partCrc)
if err != nil {
return "", fmt.Errorf("base64 decode partCrc: %w", err)
}
if len(data) != 4 {
return "", fmt.Errorf("invalid crc32 part checksum length: %d", len(data))
}
currentCRC, err := base64.StdEncoding.DecodeString(crc)
if err != nil {
return "", fmt.Errorf("base64 decode crc: %w", err)
}
if len(currentCRC) != 4 {
return "", fmt.Errorf("invalid crc32 checksum length: %d", len(currentCRC))
}
currentVal := uint32(currentCRC[0])<<24 | uint32(currentCRC[1])<<16 | uint32(currentCRC[2])<<8 | uint32(currentCRC[3])
val := uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
composite := crc32Combine(crc32.Castagnoli, currentVal, val, partLen)
// Convert composite to big-endian bytes
out := []byte{
byte(composite >> 24),
byte(composite >> 16),
byte(composite >> 8),
byte(composite),
}
return base64.StdEncoding.EncodeToString(out), nil
case types.ChecksumAlgorithmCrc64nvme:
data, err := base64.StdEncoding.DecodeString(partCrc)
if err != nil {
return "", fmt.Errorf("base64 decode partCrc: %w", err)
}
if len(data) != 8 {
return "", fmt.Errorf("invalid crc64 part checksum length: %d", len(data))
}
currentCRC, err := base64.StdEncoding.DecodeString(crc)
if err != nil {
return "", fmt.Errorf("base64 decode crc: %w", err)
}
if len(currentCRC) != 8 {
return "", fmt.Errorf("invalid crc64 checksum length: %d", len(currentCRC))
}
currentVal := uint64(currentCRC[0])<<56 | uint64(currentCRC[1])<<48 | uint64(currentCRC[2])<<40 | uint64(currentCRC[3])<<32 |
uint64(currentCRC[4])<<24 | uint64(currentCRC[5])<<16 | uint64(currentCRC[6])<<8 | uint64(currentCRC[7])
val := uint64(data[0])<<56 | uint64(data[1])<<48 | uint64(data[2])<<40 | uint64(data[3])<<32 |
uint64(data[4])<<24 | uint64(data[5])<<16 | uint64(data[6])<<8 | uint64(data[7])
composite := crc64Combine(crc64NVME, currentVal, val, partLen)
out := []byte{
byte(composite >> 56), byte(composite >> 48), byte(composite >> 40), byte(composite >> 32),
byte(composite >> 24), byte(composite >> 16), byte(composite >> 8), byte(composite),
}
return base64.StdEncoding.EncodeToString(out), nil
default:
return "", fmt.Errorf("composite checksum not supported for algorithm: %v", algo)
}
}
// NewCompositeChecksumReader initializes a composite checksum
// processor, which decodes and validates the provided
// checksums and returns the final checksum based on

View File

@@ -1,120 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"encoding/base64"
"hash/crc32"
"hash/crc64"
"testing"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
func TestAddCRCChecksum_CRC32(t *testing.T) {
data := []byte("this is a test buffer for crc32")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
crc1 := crc32.Checksum(part1, crc32.IEEETable)
crc2 := crc32.Checksum(part2, crc32.IEEETable)
crcFull := crc32.Checksum(data, crc32.IEEETable)
crc1b := []byte{byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1)}
crc2b := []byte{byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2)}
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc32, crc1b64, crc2b64, int64(len(part2)))
if err != nil {
t.Fatalf("AddCRCChecksum failed: %v", err)
}
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
if err != nil {
t.Fatalf("base64 decode failed: %v", err)
}
combinedVal := uint32(combinedBytes[0])<<24 | uint32(combinedBytes[1])<<16 | uint32(combinedBytes[2])<<8 | uint32(combinedBytes[3])
if combinedVal != crcFull {
t.Errorf("CRC32 combine mismatch: got %x, want %x", combinedVal, crcFull)
}
}
func TestAddCRCChecksum_CRC32c(t *testing.T) {
data := []byte("this is a test buffer for crc32c")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
castagnoli := crc32.MakeTable(crc32.Castagnoli)
crc1 := crc32.Checksum(part1, castagnoli)
crc2 := crc32.Checksum(part2, castagnoli)
crcFull := crc32.Checksum(data, castagnoli)
crc1b := []byte{byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1)}
crc2b := []byte{byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2)}
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc32c, crc1b64, crc2b64, int64(len(part2)))
if err != nil {
t.Fatalf("AddCRCChecksum failed: %v", err)
}
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
if err != nil {
t.Fatalf("base64 decode failed: %v", err)
}
combinedVal := uint32(combinedBytes[0])<<24 | uint32(combinedBytes[1])<<16 | uint32(combinedBytes[2])<<8 | uint32(combinedBytes[3])
if combinedVal != crcFull {
t.Errorf("CRC32c combine mismatch: got %x, want %x", combinedVal, crcFull)
}
}
func TestAddCRCChecksum_CRC64NVME(t *testing.T) {
data := []byte("this is a test buffer for crc64nvme")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
table := crc64NVMETable
crc1 := crc64.Checksum(part1, table)
crc2 := crc64.Checksum(part2, table)
crcFull := crc64.Checksum(data, table)
crc1b := []byte{
byte(crc1 >> 56), byte(crc1 >> 48), byte(crc1 >> 40), byte(crc1 >> 32),
byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1),
}
crc2b := []byte{
byte(crc2 >> 56), byte(crc2 >> 48), byte(crc2 >> 40), byte(crc2 >> 32),
byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2),
}
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc64nvme, crc1b64, crc2b64, int64(len(part2)))
if err != nil {
t.Fatalf("AddCRCChecksum failed: %v", err)
}
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
if err != nil {
t.Fatalf("base64 decode failed: %v", err)
}
combinedVal := uint64(combinedBytes[0])<<56 | uint64(combinedBytes[1])<<48 | uint64(combinedBytes[2])<<40 | uint64(combinedBytes[3])<<32 |
uint64(combinedBytes[4])<<24 | uint64(combinedBytes[5])<<16 | uint64(combinedBytes[6])<<8 | uint64(combinedBytes[7])
if combinedVal != crcFull {
t.Errorf("CRC64NVME combine mismatch: got %x, want %x", combinedVal, crcFull)
}
}

55
s3api/utils/logger.go Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"fmt"
"log"
"github.com/gofiber/fiber/v2"
)
func LogCtxDetails(ctx *fiber.Ctx, respBody []byte) {
isDebug, ok := ctx.Locals("isDebug").(bool)
_, notLogReqBody := ctx.Locals("logReqBody").(bool)
_, notLogResBody := ctx.Locals("logResBody").(bool)
if isDebug && ok {
// Log request body
if !notLogReqBody {
fmt.Println()
log.Printf("Request Body: %s", ctx.Request().Body())
}
// Log path parameters
fmt.Println()
log.Println("Path parameters: ")
for key, val := range ctx.AllParams() {
log.Printf("%s: %s", key, val)
}
// Log response headers
fmt.Println()
log.Println("Response Headers: ")
ctx.Response().Header.VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
// Log response body
if !notLogResBody && len(respBody) > 0 {
fmt.Println()
log.Printf("Response body %s", ctx.Response().Body())
}
}
}

View File

@@ -1,24 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
func IsObjectNameValid(name string) bool {
switch clean(name) {
case "", ".", "..", "/":
return false
}
return isObjectLocal(name)
}

View File

@@ -1,171 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// code modified from golang std library src/internal/filepathlite/path.go
// to support path separator '/' for all platforms.
package utils
import (
"strings"
)
const separator = '/'
// isObjectLocal checks if the given path would result in an object
// that is local to the bucket.
func isObjectLocal(path string) bool {
if path == "" || path == "." {
return true
}
path = strings.Join([]string{".", path}, string(separator))
hasDots := false
for p := path; p != ""; {
var part string
part, p, _ = strings.Cut(p, "/")
if part == "." || part == ".." {
hasDots = true
break
}
}
if hasDots {
path = clean(path)
}
if path == ".." || strings.HasPrefix(path, "../") {
return false
}
return true
}
func clean(path string) string {
originalPath := path
if path == "" {
return originalPath + "."
}
rooted := isPathSeparator(path[0])
// Invariants:
// reading from path; r is index of next byte to process.
// writing to buf; w is index of next byte to write.
// dotdot is index in buf where .. must stop, either because
// it is the leading slash or it is a leading ../../.. prefix.
n := len(path)
out := lazybuf{path: path, volAndPath: originalPath, volLen: 0}
r, dotdot := 0, 0
if rooted {
out.append(separator)
r, dotdot = 1, 1
}
for r < n {
switch {
case isPathSeparator(path[r]):
// empty path element
r++
case path[r] == '.' && (r+1 == n || isPathSeparator(path[r+1])):
// . element
r++
case path[r] == '.' && path[r+1] == '.' && (r+2 == n || isPathSeparator(path[r+2])):
// .. element: remove to last separator
r += 2
switch {
case out.w > dotdot:
// can backtrack
out.w--
for out.w > dotdot && !isPathSeparator(out.index(out.w)) {
out.w--
}
case !rooted:
// cannot backtrack, but not rooted, so append .. element.
if out.w > 0 {
out.append(separator)
}
out.append('.')
out.append('.')
dotdot = out.w
}
default:
// real path element.
// add slash if needed
if rooted && out.w != 1 || !rooted && out.w != 0 {
out.append(separator)
}
// copy element
for ; r < n && !isPathSeparator(path[r]); r++ {
out.append(path[r])
}
}
}
// Turn empty string into "."
if out.w == 0 {
out.append('.')
}
return FromSlash(out.string())
}
func isPathSeparator(c uint8) bool {
return c == '/'
}
func FromSlash(path string) string {
if separator == '/' {
return path
}
return replaceStringByte(path, '/', separator)
}
func replaceStringByte(s string, old, new byte) string {
if strings.IndexByte(s, old) == -1 {
return s
}
n := []byte(s)
for i := range n {
if n[i] == old {
n[i] = new
}
}
return string(n)
}
// A lazybuf is a lazily constructed path buffer.
// It supports append, reading previously appended bytes,
// and retrieving the final string. It does not allocate a buffer
// to hold the output until that output diverges from s.
type lazybuf struct {
path string
buf []byte
w int
volAndPath string
volLen int
}
func (b *lazybuf) index(i int) byte {
if b.buf != nil {
return b.buf[i]
}
return b.path[i]
}
func (b *lazybuf) append(c byte) {
if b.buf == nil {
if b.w < len(b.path) && b.path[b.w] == c {
b.w++
return
}
b.buf = make([]byte, len(b.path))
copy(b.buf, b.path[:b.w])
}
b.buf[b.w] = c
b.w++
}
func (b *lazybuf) string() string {
if b.buf == nil {
return b.volAndPath[:b.volLen+b.w]
}
return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
}

View File

@@ -1,64 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils_test
import (
"testing"
"github.com/versity/versitygw/s3api/utils"
)
func TestIsObjectNameValid(t *testing.T) {
tests := []struct {
name string
input string
want bool
}{
// valid names
{"simple file", "file.txt", true},
{"nested file", "dir/file.txt", true},
{"absolute nested file", "/dir/file.txt", true},
{"trailing slash", "dir/", true},
{"slash prefix", "/file.txt", true}, // treated as local after joined with bucket
{"dot slash prefix", "./file.txt", true},
// invalid names
{"dot dot only", "..", false},
{"dot only", ".", false},
{"dot slash", "./", false},
{"dot slash dot dot", "./..", false},
{"cleans to dot", "./../.", false},
{"empty", "", false},
{"file escapes 1", "../file.txt", false},
{"file escapes 2", "dir/../../file.txt", false},
{"file escapes 3", "../../../file.txt", false},
{"dir escapes 1", "../dir/", false},
{"dir escapes 2", "dir/../../dir/", false},
{"dir escapes 3", "../../../dir/", false},
{"dot escapes 1", "../.", false},
{"dot escapes 2", "dir/../../.", false},
{"dot escapes 3", "../../../.", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := utils.IsObjectNameValid(tt.input)
if got != tt.want {
t.Errorf("%v: IsObjectNameValid(%q) = %v, want %v",
tt.name, tt.input, got, tt.want)
}
})
}
}

View File

@@ -180,7 +180,7 @@ func ParsePresignedURIParts(ctx *fiber.Ctx) (AuthData, error) {
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
}
if ContextKeyRegion.Get(ctx) != creds[2] {
if ctx.Locals("region") != creds[2] {
return a, s3err.APIError{
Code: "SignatureDoesNotMatch",
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", creds[2]),

View File

@@ -31,7 +31,6 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
)
@@ -90,17 +89,11 @@ func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string,
if chType != "" {
checksumHasher, err := getHasher(chType)
if err != nil {
debuglogger.Logf("failed to initialize hash calculator: %v", err)
return nil, err
}
chRdr.checksumHash = checksumHasher
}
if chType == "" {
debuglogger.Infof("initializing signed chunk reader")
} else {
debuglogger.Infof("initializing signed chunk reader with '%v' trailing checksum", chType)
}
return chRdr, nil
}
@@ -157,13 +150,11 @@ func (cr *ChunkReader) getStringToSignPrefix(algo string) string {
func (cr *ChunkReader) getChunkStringToSign() string {
prefix := cr.getStringToSignPrefix(streamPayloadAlgo)
chunkHash := cr.chunkHash.Sum(nil)
strToSign := fmt.Sprintf("%s\n%s\n%s\n%s",
return fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
cr.prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "STRING TO SIGN", strToSign, 64)
return strToSign
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
@@ -175,15 +166,11 @@ func (cr *ChunkReader) getTrailerChunkStringToSign() string {
prefix := cr.getStringToSignPrefix(streamPayloadTrailerAlgo)
strToSign := fmt.Sprintf("%s\n%s\n%s",
return fmt.Sprintf("%s\n%s\n%s",
prefix,
cr.prevSig,
sig,
)
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "TRAILER STRING TO SIGN", strToSign, 64)
return strToSign
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
@@ -193,7 +180,6 @@ func (cr *ChunkReader) verifyTrailerSignature() error {
sig := hex.EncodeToString(hmac256(cr.signingKey, []byte(strToSign)))
if sig != cr.trailerSig {
debuglogger.Logf("incorrect trailing signature: (calculated): %v, (got): %v", sig, cr.trailerSig)
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
@@ -206,7 +192,6 @@ func (cr *ChunkReader) verifyChecksum() error {
checksum := base64.StdEncoding.EncodeToString(checksumHash)
if checksum != cr.parsedChecksum {
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-")))
debuglogger.Logf("incorrect trailing checksum: (calculated): %v, (got): %v", checksum, cr.parsedChecksum)
return s3err.GetChecksumBadDigestErr(algo)
}
@@ -220,7 +205,6 @@ func (cr *ChunkReader) checkSignature() error {
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.prevSig != cr.parsedSig {
debuglogger.Logf("incorrect signature: (calculated): %v, (got) %v", cr.prevSig, cr.parsedSig)
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
cr.parsedSig = ""
@@ -246,20 +230,18 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
}
}
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n], &n)
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
debuglogger.Logf("failed to parse chunk headers: %v", err)
return 0, err
}
cr.parsedSig = sig
// If we hit the final chunk, calculate and validate the final
// chunk signature and finish reading
if chunkSize == 0 {
debuglogger.Infof("final chunk parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
cr.chunkHash.Reset()
err := cr.checkSignature()
if err != nil {
@@ -267,7 +249,6 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
}
if cr.trailer != "" {
debuglogger.Infof("final chunk trailers parsed:\nchecksum: %v\ntrailing signature: %v", cr.parsedChecksum, cr.trailerSig)
err := cr.verifyChecksum()
if err != nil {
return 0, err
@@ -280,7 +261,6 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
return 0, io.EOF
}
debuglogger.Infof("chunk headers parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
// move data up to remove chunk header
copy(p, p[bufOffset:n])
@@ -296,7 +276,6 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
debuglogger.Logf("exceeding the limit of maximum integer allowed: (value): %v, (limit): %v", chunkSize+int64(n), math.MaxInt)
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
@@ -319,7 +298,6 @@ func getSigningKey(secret, region string, date time.Time) []byte {
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
debuglogger.Infof("signing key: %s", hex.EncodeToString(signingKey))
return signingKey
}
@@ -341,14 +319,12 @@ const (
// This returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte, l *int) (int64, string, int, error) {
stashLen := len(cr.stash)
if stashLen > maxHeaderSize {
debuglogger.Logf("the stash length exceeds the maximum allowed chunk header size: (stash len): %v, (header limit): %v", stashLen, maxHeaderSize)
return 0, "", 0, errInvalidChunkFormat
}
if cr.stash != nil {
debuglogger.Logf("recovering the stash: (stash len): %v", stashLen)
tmp := make([]byte, stashLen+len(header))
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
@@ -360,35 +336,33 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
// After the first chunk each chunk header should start
// with "\n\r\n"
if !cr.isFirstHeader {
if !cr.isFirstHeader && stashLen == 0 {
err := readAndSkip(rdr, '\r', '\n')
if err != nil {
debuglogger.Logf("failed to read chunk header first 2 bytes: (should be): \\r\\n, (got): %q", header[:2])
return cr.handleRdrErr(err, header)
}
copy(header, header[2:])
*l = *l - 2
}
// read and parse the chunk size
chunkSizeStr, err := readAndTrim(rdr, ';')
if err != nil {
debuglogger.Logf("failed to read chunk size: %v", err)
return cr.handleRdrErr(err, header)
}
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
if err != nil {
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
return 0, "", 0, errInvalidChunkFormat
}
// read the chunk signature
err = readAndSkip(rdr, 'c', 'h', 'u', 'n', 'k', '-', 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', '=')
if err != nil {
debuglogger.Logf("failed to read 'chunk-signature=': %v", err)
return cr.handleRdrErr(err, header)
}
sig, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read '\\r', after chunk signature: %v", err)
return cr.handleRdrErr(err, header)
}
@@ -397,17 +371,14 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
if cr.trailer != "" {
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n before the trailer: %v", err)
return cr.handleRdrErr(err, header)
}
// parse and validate the trailing header
trailer, err := readAndTrim(rdr, ':')
if err != nil {
debuglogger.Logf("failed to read trailer prefix: %v", err)
return cr.handleRdrErr(err, header)
}
if trailer != string(cr.trailer) {
debuglogger.Logf("incorrect trailer prefix: (expected): %v, (got): %v", cr.trailer, trailer)
return 0, "", 0, errInvalidChunkFormat
}
@@ -416,36 +387,30 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
// parse the checksum
checksum, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read checksum value: %v", err)
return cr.handleRdrErr(err, header)
}
if !IsValidChecksum(checksum, algo) {
debuglogger.Logf("invalid checksum value: %v", checksum)
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
}
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n after checksum: %v", err)
return cr.handleRdrErr(err, header)
}
// parse the trailing signature
trailerSigPrefix, err := readAndTrim(rdr, ':')
if err != nil {
debuglogger.Logf("failed to read trailing signature prefix: %v", err)
return cr.handleRdrErr(err, header)
}
if trailerSigPrefix != trailerSignatureHeader {
debuglogger.Logf("invalid trailing signature prefix: (expected): %v, (got): %v", trailerSignatureHeader, trailerSigPrefix)
return 0, "", 0, errInvalidChunkFormat
}
trailerSig, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read trailing signature: %v", err)
return cr.handleRdrErr(err, header)
}
@@ -456,7 +421,6 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
// "\r\n\r\n" is followed after the last chunk
err = readAndSkip(rdr, '\n', '\r', '\n')
if err != nil {
debuglogger.Logf("failed to read \\n\\r\\n at the end of chunk header: %v", err)
return cr.handleRdrErr(err, header)
}
@@ -465,30 +429,19 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n at the end of chunk header: %v", err)
return cr.handleRdrErr(err, header)
}
// find the index of chunk ending: '\r\n'
// skip the first 2 bytes as it is the starting '\r\n'
// the first chunk doesn't contain the starting '\r\n', but
// anyway, trimming the first 2 bytes doesn't pollute the logic.
ind := bytes.Index(header[2:], []byte{'\r', '\n'})
ind := bytes.Index(header, []byte{'\r', '\n'})
cr.isFirstHeader = false
// the offset is the found index + 4 - the stash length
// where:
// ind is the index of '\r\n'
// 4 specifies the trimmed 2 bytes plus 2 to shift the index at the end of '\r\n'
offset := ind + 4 - stashLen
return chunkSize, sig, offset, nil
return chunkSize, sig, ind + len(chunkHdrDelim) - stashLen, nil
}
// Stashes the header in cr.stash and returns "errskipHeader"
func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, error) {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
debuglogger.Logf("stashing the header: (header length): %v", len(header))
return 0, "", 0, errskipHeader
}
@@ -498,7 +451,6 @@ func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, er
func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, int, error) {
if err == io.EOF {
if cr.isEOF {
debuglogger.Logf("incomplete chunk encoding, EOF reached")
return 0, "", 0, errInvalidChunkFormat
}
return cr.stashAndSkipHeader(header)

View File

@@ -29,8 +29,6 @@ import (
"math/bits"
"strconv"
"strings"
"github.com/versity/versitygw/s3api/debuglogger"
)
var (
@@ -44,28 +42,27 @@ type UnsignedChunkReader struct {
expectedChecksum string
hasher hash.Hash
stash []byte
chunkCounter int
offset int
}
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
hasher, err := getHasher(ct)
if err != nil {
debuglogger.Logf("failed to initialize hash calculator: %v", err)
return nil, err
}
debuglogger.Infof("initializing unsigned chunk reader")
return &UnsignedChunkReader{
reader: bufio.NewReader(r),
checksumType: ct,
stash: make([]byte, 0),
hasher: hasher,
chunkCounter: 1,
}, nil
}
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// First read any stashed data
if len(ucr.stash) != 0 {
debuglogger.Infof("recovering the stash: (stash length): %v", len(ucr.stash))
n := copy(p, ucr.stash)
ucr.offset += n
@@ -92,24 +89,22 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// Read and cache the payload
_, err = io.ReadFull(rdr, payload)
if err != nil {
debuglogger.Logf("failed to read chunk data: %v", err)
return 0, err
}
// Skip the trailing "\r\n"
if err := ucr.readAndSkip('\r', '\n'); err != nil {
debuglogger.Logf("failed to read trailing \\r\\n after chunk data: %v", err)
return 0, err
}
// Copy the payload into the io.Reader buffer
n := copy(p[ucr.offset:], payload)
ucr.offset += n
ucr.chunkCounter++
if int64(n) < chunkSize {
// stash the remaining data
ucr.stash = payload[n:]
debuglogger.Infof("stashing the remaining data: (stash length): %v", len(ucr.stash))
dataRead := ucr.offset
ucr.offset = 0
return dataRead, nil
@@ -118,7 +113,6 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// Read and validate trailers
if err := ucr.readTrailer(); err != nil {
debuglogger.Logf("failed to read trailer: %v", err)
return 0, err
}
@@ -148,19 +142,15 @@ func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
line, err := ucr.reader.ReadString('\n')
if err != nil {
debuglogger.Logf("failed to parse chunk size: %v", err)
return 0, errMalformedEncoding
}
line = strings.TrimSpace(line)
chunkSize, err := strconv.ParseInt(line, 16, 64)
if err != nil {
debuglogger.Logf("failed to convert chunk size: %v", err)
return 0, errMalformedEncoding
}
debuglogger.Infof("chunk size extracted: %v", chunkSize)
return chunkSize, nil
}
@@ -171,7 +161,6 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
for {
v, err := ucr.reader.ReadByte()
if err != nil {
debuglogger.Logf("failed to read byte: %v", err)
if err == io.EOF {
return io.ErrUnexpectedEOF
}
@@ -184,14 +173,12 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
var tmp [3]byte
_, err = io.ReadFull(ucr.reader, tmp[:])
if err != nil {
debuglogger.Logf("failed to read chunk ending: \\n\\r\\n: %v", err)
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if !bytes.Equal(tmp[:], trailerDelim) {
debuglogger.Logf("incorrect trailer delimiter: (expected): \\n\\r\\n, (got): %q", tmp[:])
return errMalformedEncoding
}
break
@@ -202,18 +189,15 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
trailerHeader = strings.TrimSpace(trailerHeader)
trailerHeaderParts := strings.Split(trailerHeader, ":")
if len(trailerHeaderParts) != 2 {
debuglogger.Logf("invalid trailer header parts: %v", trailerHeaderParts)
return errMalformedEncoding
}
if trailerHeaderParts[0] != string(ucr.checksumType) {
debuglogger.Logf("invalid checksum type: %v", trailerHeaderParts[0])
//TODO: handle the error
return errMalformedEncoding
}
ucr.expectedChecksum = trailerHeaderParts[1]
debuglogger.Infof("parsed the trailing header:\n%v:%v", trailerHeaderParts[0], trailerHeaderParts[1])
// Validate checksum
return ucr.validateChecksum()
@@ -225,7 +209,6 @@ func (ucr *UnsignedChunkReader) validateChecksum() error {
checksum := base64.StdEncoding.EncodeToString(csum)
if checksum != ucr.expectedChecksum {
debuglogger.Logf("incorrect checksum: (expected): %v, (got): %v", ucr.expectedChecksum, checksum)
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
}

View File

@@ -17,7 +17,6 @@ package utils
import (
"bytes"
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"io"
@@ -29,9 +28,9 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/encoding/httpbinding"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -41,17 +40,21 @@ var (
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
)
const (
upperhex = "0123456789ABCDEF"
)
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
metadata = make(map[string]string)
headers.DisableNormalizing()
for key, value := range headers.AllInOrder() {
headers.VisitAllInOrder(func(key, value []byte) {
hKey := string(key)
if strings.HasPrefix(strings.ToLower(hKey), "x-amz-meta-") {
trimmedKey := hKey[11:]
headerValue := string(value)
metadata[trimmedKey] = headerValue
}
}
})
headers.EnableNormalizing()
return
@@ -66,20 +69,20 @@ func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength
body = bytes.NewReader(req.Body())
}
uri := ctx.OriginalURL()
escapedURI := escapeOriginalURI(ctx)
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
httpReq, err := http.NewRequest(string(req.Header.Method()), escapedURI, body)
if err != nil {
return nil, errors.New("error in creating an http request")
}
// Set the request headers
for key, value := range req.Header.All() {
req.Header.VisitAll(func(key, value []byte) {
keyStr := string(key)
if includeHeader(keyStr, signedHdrs) {
httpReq.Header.Add(keyStr, string(value))
}
}
})
// make sure all headers in the signed headers are present
for _, header := range signedHdrs {
@@ -121,10 +124,11 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
body = bytes.NewReader(req.Body())
}
uri, _, _ := strings.Cut(ctx.OriginalURL(), "?")
uri := string(ctx.Request().URI().Path())
uri = httpbinding.EscapePath(uri, false)
isFirst := true
for key, value := range ctx.Request().URI().QueryArgs().All() {
ctx.Request().URI().QueryArgs().VisitAll(func(key, value []byte) {
_, ok := signedQueryArgs[string(key)]
if !ok {
escapeValue := url.QueryEscape(string(value))
@@ -135,19 +139,19 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
uri += fmt.Sprintf("&%s=%s", key, escapeValue)
}
}
}
})
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
if err != nil {
return nil, errors.New("error in creating an http request")
}
// Set the request headers
for key, value := range req.Header.All() {
req.Header.VisitAll(func(key, value []byte) {
keyStr := string(key)
if includeHeader(keyStr, signedHdrs) {
httpReq.Header.Add(keyStr, string(value))
}
}
})
// Check if Content-Length in signed headers
// If content length is non 0, then the header will be included
@@ -177,11 +181,9 @@ func ParseUint(str string) (int32, error) {
}
num, err := strconv.ParseInt(str, 10, 32)
if err != nil {
debuglogger.Logf("invalid intager provided: %v\n", err)
return 1000, fmt.Errorf("invalid int: %w", err)
}
if num < 0 {
debuglogger.Logf("negative intager provided: %v\n", num)
return 1000, fmt.Errorf("negative uint: %v", num)
}
if num > 1000 {
@@ -210,18 +212,15 @@ func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
func IsValidBucketName(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
return false
}
// Checks to contain only digits, lowercase letters, dot, hyphen.
// Checks to start and end with only digits and lowercase letters.
if !bucketNameRegexp.MatchString(bucket) {
debuglogger.Logf("invalid bucket name: %v\n", bucket)
return false
}
// Checks not to be a valid IP address
if bucketNameIpRegexp.MatchString(bucket) {
debuglogger.Logf("bucket name is an ip address: %v\n", bucket)
return false
}
return true
@@ -297,30 +296,28 @@ func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, outp
func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]struct{}, error) {
attrs := map[s3response.ObjectAttributes]struct{}{}
var err error
for key, value := range ctx.Request().Header.All() {
ctx.Request().Header.VisitAll(func(key, value []byte) {
if string(key) == "X-Amz-Object-Attributes" {
if len(value) == 0 {
break
return
}
oattrs := strings.Split(string(value), ",")
for _, a := range oattrs {
attr := s3response.ObjectAttributes(a)
if !attr.IsValid() {
debuglogger.Logf("invalid object attribute: %v\n", attr)
err = s3err.GetAPIError(s3err.ErrInvalidObjectAttributes)
break
}
attrs[attr] = struct{}{}
}
}
}
})
if err != nil {
return nil, err
}
if len(attrs) == 0 {
debuglogger.Logf("empty get object attributes")
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
}
@@ -339,7 +336,6 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
objLockDate := ctx.Get("X-Amz-Object-Lock-Retain-Until-Date")
if (objLockDate != "" && objLockModeHdr == "") || (objLockDate == "" && objLockModeHdr != "") {
debuglogger.Logf("one of 2 required params is missing: (lock date): %v, (lock mode): %v\n", objLockDate, objLockModeHdr)
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)
}
@@ -347,11 +343,9 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
if objLockDate != "" {
rDate, err := time.Parse(time.RFC3339, objLockDate)
if err != nil {
debuglogger.Logf("failed to parse retain until date: %v\n", err)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if rDate.Before(time.Now()) {
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
}
retainUntilDate = rDate
@@ -362,14 +356,12 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
if objLockMode != "" &&
objLockMode != types.ObjectLockModeCompliance &&
objLockMode != types.ObjectLockModeGovernance {
debuglogger.Logf("invalid object lock mode: %v\n", objLockMode)
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectLockMode)
}
legalHold := types.ObjectLockLegalHoldStatus(legalHoldHdr)
if legalHold != "" && legalHold != types.ObjectLockLegalHoldStatusOff && legalHold != types.ObjectLockLegalHoldStatusOn {
debuglogger.Logf("invalid object lock legal hold status: %v\n", legalHold)
return nil, s3err.GetAPIError(s3err.ErrInvalidLegalHoldStatus)
}
@@ -389,66 +381,114 @@ func IsValidOwnership(val types.ObjectOwnership) bool {
case types.ObjectOwnershipObjectWriter:
return true
default:
debuglogger.Logf("invalid object ownership: %v\n", val)
return false
}
}
type ChecksumValues map[types.ChecksumAlgorithm]string
func escapeOriginalURI(ctx *fiber.Ctx) string {
path := ctx.Path()
// Headers concatinates checksum algorithm by prefixing each
// with 'x-amz-checksum-'
// e.g.
// "x-amz-checksum-crc64nvme, x-amz-checksum-sha1"
func (cv ChecksumValues) Headers() string {
result := ""
isFirst := false
// Escape the URI original path
escapedURI := escapePath(path)
for key := range cv {
if !isFirst {
result += ", "
}
result += fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(key)))
// Add the URI query params
query := string(ctx.Request().URI().QueryArgs().QueryString())
if query != "" {
escapedURI = escapedURI + "?" + query
}
return result
return escapedURI
}
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, ChecksumValues, error) {
// Escapes the path string
// Most of the parts copied from std url
func escapePath(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case shouldEscape(c):
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
// Checks if the character needs to be escaped
func shouldEscape(c byte) bool {
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '.', '~', '/':
return false
}
return true
}
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, map[types.ChecksumAlgorithm]string, error) {
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
err := IsChecksumAlgorithmValid(sdkAlgorithm)
if err != nil {
debuglogger.Logf("invalid checksum algorithm: %v\n", sdkAlgorithm)
return "", nil, err
}
checksums := ChecksumValues{}
checksums := map[types.ChecksumAlgorithm]string{}
var hdrErr error
// Parse and validate checksum headers
for key, value := range ctx.Request().Header.All() {
ctx.Request().Header.VisitAll(func(key, value []byte) {
// Skip `X-Amz-Checksum-Type` as it's a special header
if !strings.HasPrefix(string(key), "X-Amz-Checksum-") || string(key) == "X-Amz-Checksum-Type" {
continue
if hdrErr != nil || !strings.HasPrefix(string(key), "X-Amz-Checksum-") || string(key) == "X-Amz-Checksum-Type" {
return
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
err := IsChecksumAlgorithmValid(algo)
if err != nil {
debuglogger.Logf("invalid checksum header: %s\n", key)
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
break
return
}
checksums[algo] = string(value)
}
})
if hdrErr != nil {
return sdkAlgorithm, nil, hdrErr
}
if len(checksums) > 1 {
debuglogger.Logf("multiple checksum headers provided: %v\n", checksums.Headers())
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
@@ -478,22 +518,15 @@ var checksumLengths = map[types.ChecksumAlgorithm]int{
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
decoded, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
debuglogger.Logf("failed to parse checksum base64: %v\n", err)
return false
}
expectedLength, exists := checksumLengths[algorithm]
if !exists {
debuglogger.Logf("unknown checksum algorithm: %v\n", algorithm)
return false
}
isValid := len(decoded) == expectedLength
if !isValid {
debuglogger.Logf("decoded checksum length: (expected): %v, (got): %v\n", expectedLength, len(decoded))
}
return isValid
return len(decoded) == expectedLength
}
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
@@ -504,7 +537,6 @@ func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
alg != types.ChecksumAlgorithmSha1 &&
alg != types.ChecksumAlgorithmSha256 &&
alg != types.ChecksumAlgorithmCrc64nvme {
debuglogger.Logf("invalid checksum algorithm: %v\n", alg)
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
}
@@ -516,7 +548,6 @@ func IsChecksumTypeValid(t types.ChecksumType) error {
if t != "" &&
t != types.ChecksumTypeComposite &&
t != types.ChecksumTypeFullObject {
debuglogger.Logf("invalid checksum type: %v\n", t)
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
}
return nil
@@ -560,7 +591,6 @@ func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType
typeSchema := checksumMap[algo]
_, ok := typeSchema[t]
if !ok {
debuglogger.Logf("checksum type and algorithm mismatch: (type): %v, (algorithm): %v\n", t, algo)
return s3err.GetChecksumSchemaMismatchErr(algo, t)
}
@@ -582,7 +612,6 @@ func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, type
// Verify if checksum algorithm is provided, if
// checksum type is specified
if chType != "" && algo == "" {
debuglogger.Logf("checksum type can only be used with checksum algorithm: (type): %v\n", chType)
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
}
@@ -605,62 +634,3 @@ func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, type
return algo, chType, nil
}
// TagLimit specifies the allowed tag count in a tag set
type TagLimit int
const (
// Tag limit for bucket tagging
TagLimitBucket TagLimit = 50
// Tag limit for object tagging
TagLimitObject TagLimit = 10
)
// Parses and validates tagging
func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
var tagging s3response.TaggingInput
err := xml.Unmarshal(data, &tagging)
if err != nil {
debuglogger.Logf("invalid taggging: %s", data)
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
}
tLen := len(tagging.TagSet.Tags)
if tLen > int(limit) {
switch limit {
case TagLimitObject:
debuglogger.Logf("bucket tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrObjectTaggingLimited)
case TagLimitBucket:
debuglogger.Logf("object tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
}
}
tagSet := make(map[string]string, tLen)
for _, tag := range tagging.TagSet.Tags {
// validate tag key
if len(tag.Key) == 0 || len(tag.Key) > 128 {
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// validate tag value
if len(tag.Value) > 256 {
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// make sure there are no duplicate keys
_, ok := tagSet[tag.Key]
if ok {
debuglogger.Logf("duplicate tag key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
}
tagSet[tag.Key] = tag.Value
}
return tagSet, nil
}

View File

@@ -16,9 +16,6 @@ package utils
import (
"bytes"
"encoding/xml"
"errors"
"math/rand"
"net/http"
"reflect"
"testing"
@@ -28,7 +25,6 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -418,6 +414,128 @@ func TestIsValidOwnership(t *testing.T) {
}
}
func Test_shouldEscape(t *testing.T) {
type args struct {
c byte
}
tests := []struct {
name string
args args
want bool
}{
{
name: "shouldn't-escape-alphanum",
args: args{
c: 'h',
},
want: false,
},
{
name: "shouldn't-escape-unreserved-char",
args: args{
c: '_',
},
want: false,
},
{
name: "shouldn't-escape-unreserved-number",
args: args{
c: '0',
},
want: false,
},
{
name: "shouldn't-escape-path-separator",
args: args{
c: '/',
},
want: false,
},
{
name: "should-escape-special-char-1",
args: args{
c: '&',
},
want: true,
},
{
name: "should-escape-special-char-2",
args: args{
c: '*',
},
want: true,
},
{
name: "should-escape-special-char-3",
args: args{
c: '(',
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := shouldEscape(tt.args.c); got != tt.want {
t.Errorf("shouldEscape() = %v, want %v", got, tt.want)
}
})
}
}
func Test_escapePath(t *testing.T) {
type args struct {
s string
}
tests := []struct {
name string
args args
want string
}{
{
name: "empty-string",
args: args{
s: "",
},
want: "",
},
{
name: "alphanum-path",
args: args{
s: "/test-bucket/test-key",
},
want: "/test-bucket/test-key",
},
{
name: "path-with-unescapable-chars",
args: args{
s: "/test~bucket/test.key",
},
want: "/test~bucket/test.key",
},
{
name: "path-with-escapable-chars",
args: args{
s: "/bucket-*(/test=key&",
},
want: "/bucket-%2A%28/test%3Dkey%26",
},
{
name: "path-with-space",
args: args{
s: "/test-bucket/my key",
},
want: "/test-bucket/my%20key",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := escapePath(tt.args.s); got != tt.want {
t.Errorf("escapePath() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsChecksumAlgorithmValid(t *testing.T) {
type args struct {
alg types.ChecksumAlgorithm
@@ -739,162 +857,3 @@ func Test_checkChecksumTypeAndAlgo(t *testing.T) {
})
}
}
func TestParseTagging(t *testing.T) {
genRandStr := func(lgth int) string {
b := make([]byte, lgth)
for i := range b {
b[i] = byte(rand.Intn(95) + 32) // 126 - 32 + 1 = 95 printable characters
}
return string(b)
}
getTagSet := func(lgth int) s3response.TaggingInput {
res := s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{},
},
}
for i := 0; i < lgth; i++ {
res.TagSet.Tags = append(res.TagSet.Tags, s3response.Tag{
Key: genRandStr(10),
Value: genRandStr(20),
})
}
return res
}
type args struct {
data s3response.TaggingInput
overrideXML []byte
limit TagLimit
}
tests := []struct {
name string
args args
want map[string]string
wantErr error
}{
{
name: "valid tags within limit",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{
{Key: "key1", Value: "value1"},
{Key: "key2", Value: "value2"},
},
},
},
limit: TagLimitObject,
},
want: map[string]string{"key1": "value1", "key2": "value2"},
wantErr: nil,
},
{
name: "malformed XML",
args: args{
overrideXML: []byte("invalid xml"),
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrMalformedXML),
},
{
name: "exceeds bucket tag limit",
args: args{
data: getTagSet(51),
limit: TagLimitBucket,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrBucketTaggingLimited),
},
{
name: "exceeds object tag limit",
args: args{
data: getTagSet(11),
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrObjectTaggingLimited),
},
{
name: "invalid 0 length tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: "", Value: "value1"}},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
},
{
name: "invalid long tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: genRandStr(130), Value: "value1"}},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
},
{
name: "invalid long tag value",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: "key", Value: genRandStr(257)}},
},
},
limit: TagLimitBucket,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagValue),
},
{
name: "duplicate tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{
{Key: "key", Value: "value1"},
{Key: "key", Value: "value2"},
},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrDuplicateTagKey),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var data []byte
if tt.args.overrideXML != nil {
data = tt.args.overrideXML
} else {
var err error
data, err = xml.Marshal(tt.args.data)
if err != nil {
t.Fatalf("error marshalling input: %v", err)
}
}
got, err := ParseTagging(data, tt.args.limit)
if !errors.Is(err, tt.wantErr) {
t.Errorf("expected error %v, got %v", tt.wantErr, err)
}
if err == nil && !reflect.DeepEqual(got, tt.want) {
t.Errorf("expected result %v, got %v", tt.want, got)
}
})
}
}

View File

@@ -59,11 +59,6 @@ type ErrorCode int
const (
ErrNone ErrorCode = iota
ErrAccessDenied
ErrAnonymousRequest
ErrAnonymousCreateMp
ErrAnonymousCopyObject
ErrAnonymousPutBucketOwnership
ErrAnonymousGetBucketOwnership
ErrMethodNotAllowed
ErrBucketNotEmpty
ErrVersionedBucketNotEmpty
@@ -89,12 +84,7 @@ const (
ErrInvalidCopyDest
ErrInvalidCopySource
ErrInvalidCopySourceRange
ErrInvalidTagKey
ErrInvalidTagValue
ErrDuplicateTagKey
ErrBucketTaggingLimited
ErrObjectTaggingLimited
ErrInvalidURLEncodedTagging
ErrInvalidTag
ErrAuthHeaderEmpty
ErrSignatureVersionNotSupported
ErrMalformedPOSTRequest
@@ -123,7 +113,7 @@ const (
ErrSignatureTerminationStr
ErrSignatureIncorrService
ErrContentSHA256Mismatch
ErrMissingContentLength
ErrMissingDecodedContentLength
ErrInvalidAccessKeyID
ErrRequestNotReadyYet
ErrMissingDateHeader
@@ -167,7 +157,6 @@ const (
ErrChecksumTypeWithAlgo
ErrInvalidChecksumHeader
ErrTrailerHeaderNotSupported
ErrBadRequest
// Non-AWS errors
ErrExistingObjectIsDirectory
@@ -192,31 +181,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Access Denied.",
HTTPStatusCode: http.StatusForbidden,
},
ErrAnonymousRequest: {
Code: "AccessDenied",
Description: "Anonymous users cannot invoke this API. Please authenticate.",
HTTPStatusCode: http.StatusForbidden,
},
ErrAnonymousCreateMp: {
Code: "AccessDenied",
Description: "Anonymous users cannot initiate multipart uploads. Please authenticate.",
HTTPStatusCode: http.StatusForbidden,
},
ErrAnonymousCopyObject: {
Code: "AccessDenied",
Description: "Anonymous users cannot copy objects. Please authenticate.",
HTTPStatusCode: http.StatusForbidden,
},
ErrAnonymousPutBucketOwnership: {
Code: "AccessDenied",
Description: "s3:PutBucketOwnershipControls does not support Anonymous requests!",
HTTPStatusCode: http.StatusForbidden,
},
ErrAnonymousGetBucketOwnership: {
Code: "AccessDenied",
Description: "s3:GetBucketOwnershipControls does not support Anonymous requests!",
HTTPStatusCode: http.StatusForbidden,
},
ErrMethodNotAllowed: {
Code: "MethodNotAllowed",
Description: "The specified method is not allowed against this resource.",
@@ -342,34 +306,9 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagKey: {
Code: "InvalidTag",
Description: "The TagKey you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagValue: {
Code: "InvalidTag",
Description: "The TagValue you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrDuplicateTagKey: {
Code: "InvalidTag",
Description: "Cannot provide multiple Tags with the same key",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketTaggingLimited: {
Code: "BadRequest",
Description: "Bucket tag count cannot be greater than 50",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectTaggingLimited: {
Code: "BadRequest",
Description: "Object tags cannot be greater than 10",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidURLEncodedTagging: {
ErrInvalidTag: {
Code: "InvalidArgument",
Description: "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.",
Description: "The Tag value you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedXML: {
@@ -517,7 +456,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingContentLength: {
ErrMissingDecodedContentLength: {
Code: "MissingContentLength",
Description: "You must provide the Content-Length HTTP header.",
HTTPStatusCode: http.StatusLengthRequired,
@@ -554,7 +493,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
},
ErrInvalidRange: {
Code: "InvalidRange",
Description: "The requested range is not satisfiable",
Description: "The requested range is not valid for the request. Try another range.",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrInvalidURI: {
@@ -727,11 +666,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The value specified in the x-amz-trailer header is not supported",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBadRequest: {
Code: "400",
Description: "Bad Request",
HTTPStatusCode: http.StatusBadRequest,
},
// non aws errors
ErrExistingObjectIsDirectory: {

View File

@@ -22,7 +22,6 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3api/utils"
)
type S3EventSender interface {
@@ -142,20 +141,15 @@ func InitEventSender(cfg *EventConfig) (S3EventSender, error) {
func createEventSchema(ctx *fiber.Ctx, meta EventMeta, configId ConfigurationId) EventSchema {
path := strings.Split(ctx.Path(), "/")
var bucket, object string
if len(path) > 1 {
bucket, object = path[1], strings.Join(path[2:], "/")
}
acc := utils.ContextKeyAccount.Get(ctx).(auth.Account)
bucket, object := path[1], strings.Join(path[2:], "/")
acc := ctx.Locals("account").(auth.Account)
return EventSchema{
Records: []EventRecord{
{
EventVersion: "2.2",
EventSource: "aws:s3",
AwsRegion: utils.ContextKeyRegion.Get(ctx).(string),
AwsRegion: ctx.Locals("region").(string),
EventTime: time.Now().Format(time.RFC3339),
EventName: meta.EventName,
UserIdentity: EventUserIdentity{

View File

@@ -24,7 +24,6 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
)
@@ -69,16 +68,10 @@ func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
access := "-"
reqURI := ctx.OriginalURL()
path := strings.Split(ctx.Path(), "/")
var bucket, object string
if len(path) > 1 {
bucket, object = path[1], strings.Join(path[2:], "/")
}
bucket, object := path[1], strings.Join(path[2:], "/")
errorCode := ""
httpStatus := 200
startTime, ok := utils.ContextKeyStartTime.Get(ctx).(time.Time)
if !ok {
startTime = time.Now()
}
startTime := ctx.Locals("startTime").(time.Time)
tlsConnState := ctx.Context().TLSConnectionState()
if tlsConnState != nil {
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
@@ -96,9 +89,9 @@ func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
}
}
acct, ok := utils.ContextKeyAccount.Get(ctx).(auth.Account)
if ok {
access = acct.Access
switch ctx.Locals("account").(type) {
case auth.Account:
access = ctx.Locals("account").(auth.Account).Access
}
lf.BucketOwner = meta.BucketOwner
@@ -122,7 +115,7 @@ func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
lf.HostID = ctx.Get("X-Amz-Id-2")
lf.SignatureVersion = "SigV4"
lf.AuthenticationType = "AuthHeader"
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", utils.ContextKeyRegion.Get(ctx).(string))
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
lf.AclRequired = "Yes"

View File

@@ -22,7 +22,6 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3api/utils"
)
// FileLogger is a local file audit log
@@ -58,10 +57,7 @@ func (f *AdminFileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMe
access := "-"
reqURI := ctx.OriginalURL()
errorCode := ""
startTime, ok := utils.ContextKeyStartTime.Get(ctx).(time.Time)
if !ok {
startTime = time.Now()
}
startTime := ctx.Locals("startTime").(time.Time)
tlsConnState := ctx.Context().TLSConnectionState()
if tlsConnState != nil {
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
@@ -72,9 +68,9 @@ func (f *AdminFileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMe
errorCode = err.Error()
}
switch utils.ContextKeyAccount.Get(ctx).(type) {
switch ctx.Locals("account").(type) {
case auth.Account:
access = utils.ContextKeyAccount.Get(ctx).(auth.Account).Access
access = ctx.Locals("account").(auth.Account).Access
}
lf.Time = time.Now()

View File

@@ -28,7 +28,6 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
)
@@ -66,16 +65,10 @@ func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMet
access := "-"
reqURI := ctx.OriginalURL()
path := strings.Split(ctx.Path(), "/")
var bucket, object string
if len(path) > 1 {
bucket, object = path[1], strings.Join(path[2:], "/")
}
bucket, object := path[1], strings.Join(path[2:], "/")
errorCode := ""
httpStatus := 200
startTime, ok := utils.ContextKeyStartTime.Get(ctx).(time.Time)
if !ok {
startTime = time.Now()
}
startTime := ctx.Locals("startTime").(time.Time)
tlsConnState := ctx.Context().TLSConnectionState()
if tlsConnState != nil {
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
@@ -93,9 +86,9 @@ func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMet
}
}
acct, ok := utils.ContextKeyAccount.Get(ctx).(auth.Account)
if ok {
access = acct.Access
switch ctx.Locals("account").(type) {
case auth.Account:
access = ctx.Locals("account").(auth.Account).Access
}
lf.BucketOwner = meta.BucketOwner
@@ -119,7 +112,7 @@ func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMet
lf.HostID = ctx.Get("X-Amz-Id-2")
lf.SignatureVersion = "SigV4"
lf.AuthenticationType = "AuthHeader"
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", utils.ContextKeyRegion.Get(ctx).(string))
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
lf.AclRequired = "Yes"

View File

@@ -62,7 +62,7 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
Alias: (*Alias)(&p),
}
aux.LastModified = p.LastModified.UTC().Format(time.RFC3339)
aux.LastModified = p.LastModified.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -172,7 +172,7 @@ type ListObjectsV2Result struct {
Name *string
Prefix *string
StartAfter *string
ContinuationToken *string `xml:"ContinuationToken,omitempty"`
ContinuationToken *string
NextContinuationToken *string
KeyCount *int32
MaxKeys *int32
@@ -198,14 +198,15 @@ type Object struct {
func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias Object
aux := &struct {
LastModified string `xml:"LastModified,omitempty"`
LastModified *string `xml:"LastModified,omitempty"`
*Alias
}{
Alias: (*Alias)(&o),
}
if o.LastModified != nil {
aux.LastModified = o.LastModified.UTC().Format(time.RFC3339)
formattedTime := o.LastModified.UTC().Format(iso8601TimeFormat)
aux.LastModified = &formattedTime
}
return e.EncodeElement(aux, start)
@@ -232,7 +233,7 @@ func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
Alias: (*Alias)(&u),
}
aux.Initiated = u.Initiated.UTC().Format(time.RFC3339)
aux.Initiated = u.Initiated.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -329,7 +330,7 @@ func (r ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement
Alias: (*Alias)(&r),
}
aux.CreationDate = r.CreationDate.UTC().Format(time.RFC3339)
aux.CreationDate = r.CreationDate.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -343,44 +344,11 @@ type CanonicalUser struct {
DisplayName string
}
type CopyObjectOutput struct {
BucketKeyEnabled *bool
CopyObjectResult *CopyObjectResult
CopySourceVersionId *string
Expiration *string
SSECustomerAlgorithm *string
SSECustomerKeyMD5 *string
SSEKMSEncryptionContext *string
SSEKMSKeyId *string
ServerSideEncryption types.ServerSideEncryption
VersionId *string
}
type CopyObjectResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumCRC64NVME *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumType types.ChecksumType
ETag *string
LastModified *time.Time
}
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyObjectResult
aux := &struct {
LastModified string `xml:"LastModified,omitempty"`
*Alias
}{
Alias: (*Alias)(&r),
}
if r.LastModified != nil {
aux.LastModified = r.LastModified.UTC().Format(time.RFC3339)
}
return e.EncodeElement(aux, start)
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
LastModified time.Time
ETag string
CopySourceVersionId string `xml:"-"`
}
type CopyPartResult struct {
@@ -397,35 +365,20 @@ type CopyPartResult struct {
CopySourceVersionId string `xml:"-"`
}
func (r CopyPartResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyPartResult
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyObjectResult
aux := &struct {
LastModified string `xml:"LastModified,omitempty"`
LastModified string `xml:"LastModified"`
*Alias
}{
Alias: (*Alias)(&r),
}
if !r.LastModified.IsZero() {
aux.LastModified = r.LastModified.UTC().Format(time.RFC3339)
}
aux.LastModified = r.LastModified.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
type CompleteMultipartUploadResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
Location *string
Bucket *string
Key *string
ETag *string
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
ChecksumType *types.ChecksumType
}
type AccessControlPolicy struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy" json:"-"`
Owner CanonicalUser
@@ -480,37 +433,7 @@ type ListVersionsResult struct {
NextVersionIdMarker *string
Prefix *string
VersionIdMarker *string
Versions []ObjectVersion `xml:"Version"`
}
type ObjectVersion struct {
ChecksumAlgorithm []types.ChecksumAlgorithm
ChecksumType types.ChecksumType
ETag *string
IsLatest *bool
Key *string
LastModified *time.Time
Owner *types.Owner
RestoreStatus *types.RestoreStatus
Size *int64
StorageClass types.ObjectVersionStorageClass
VersionId *string
}
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias ObjectVersion
aux := &struct {
LastModified string `xml:"LastModified"`
*Alias
}{
Alias: (*Alias)(&o),
}
if o.LastModified != nil {
aux.LastModified = o.LastModified.UTC().Format(time.RFC3339)
}
return e.EncodeElement(aux, start)
Versions []types.ObjectVersion `xml:"Version"`
}
type GetBucketVersioningOutput struct {

View File

@@ -25,8 +25,5 @@ USERNAME_TWO=HIJKLMN
PASSWORD_TWO=OPQRSTU
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
RECREATE_BUCKETS=true
DELETE_BUCKETS_AFTER_TEST=true
REMOVE_TEST_FILE_FOLDER=true
AUTOGENERATE_USERS=true
USER_AUTOGENERATION_PREFIX=versitygw-docker-
VERSIONING_DIR=/tmp/versioning

View File

@@ -21,7 +21,6 @@ RUN apt-get update && \
jq \
bc \
libxml2-utils \
xmlstarlet \
ca-certificates && \
update-ca-certificates && \
rm -rf /var/lib/apt/lists/*

View File

@@ -110,11 +110,6 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
**ACL_AWS_ACCESS_KEY_ID**, **ACL_AWS_ACCESS_SECRET_KEY**: for direct mode, the ID and key for the S3 user in the **ACL_AWS_CANONICAL_ID** account.
**USER_ID_{role}_{id}**, **USERNAME_{role}_{id}**, **PASSWORD_{role}_{id}**: for setup_user_v2 non-autocreated users, the format for the user.
* example: USER_ID_USER_1={name}: user ID corresponding to the first user with **user** permissions in the test.
####
### Non-Secret
**VERSITY_EXE**: location of the versity executable relative to test folder.
@@ -151,13 +146,11 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
**DIRECT**: if **true**, bypass versitygw and run directly against s3 (for comparison and validity-checking purposes).
**DIRECT_DISPLAY_NAME**: AWS ACL main user display name if **DIRECT** is set to **true**.
**DIRECT_AWS_USER_ID**: AWS policy 12-digit user ID if **DIRECT** is set to **true**.
**DIRECT_DISPLAY_NAME**: username if **DIRECT** is set to **true**.
**COVERAGE_DB**: database to store client command coverage info and usage counts, if using.
**USERNAME_ONE**, **PASSWORD_ONE**, **USERNAME_TWO**, **PASSWORD_TWO**: setup_user (v1), credentials for users created and tested for non-root user **versitygw** operations (non-setup_user_v2).
**USERNAME_ONE**, **PASSWORD_ONE**, **USERNAME_TWO**, **PASSWORD_TWO**: credentials for users created and tested for non-root user **versitygw** operations.
**TEST_FILE_FOLDER**: where to put temporary test files.
@@ -169,22 +162,10 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
**TIME_LOG**: optional log to show duration of individual tests
**DIRECT_S3_ROOT_ACCOUNT_NAME**: for direct mode, S3 username for user with root permissions
**DIRECT_S3_ROOT_ACCOUNT_NAME**: for direct mode, S3 username
**DELETE_BUCKETS_AFTER_TEST**: whether or not to delete buckets after individual tests, useful for debugging if the post-test bucket state needs to be checked
**AUTOGENERATE_USERS**: setup_user_v2, whether or not to autocreate users for tests. If set to **false**, users must be pre-created (see `Secret` section above).
**USER_AUTOGENERATION_PREFIX**: setup_user_v2, if **AUTOCREATE_USERS** is set to **true**, the prefix for the autocreated username.
**CREATE_STATIC_USERS_IF_NONEXISTENT**: setup_user_v2, if **AUTOCREATE_USERS** is set to **false**, generate non-existing users if they don't exist, but don't delete them, as with user autogeneration
**DIRECT_POST_COMMAND_DELAY**: in direct mode, time to wait before sending new commands to try to prevent propagation delay issues
**SKIP_ACL_TESTING**: avoid ACL tests for systems which do not use ACLs
**MAX_FILE_DOWNLOAD_CHUNK_SIZE**: when set, will divide the download of large files with GetObject into chunks of the given size. Useful for direct testing with slower connections.
## REST Scripts
REST scripts are included for calls to S3's REST API in the `./tests/rest_scripts/` folder. To call a script, the following parameters are needed:

View File

@@ -27,21 +27,6 @@ abort_multipart_upload() {
return 0
}
abort_multipart_upload_rest() {
if ! check_param_count "abort_multipart_upload_rest" "bucket, key, upload ID" 3 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/abort_multipart_upload.sh); then
log 2 "error aborting multipart upload: $result"
return 1
fi
if [ "$result" != "204" ]; then
log 2 "expected '204' response, actual was '$result' (error: $(cat "$TEST_FILE_FOLDER"/result.txt)"
return 1
fi
return 0
}
abort_multipart_upload_with_user() {
if [ $# -ne 5 ]; then
log 2 "'abort multipart upload' command requires bucket, key, upload ID, username, password"

View File

@@ -27,18 +27,8 @@ send_command() {
fi
# shellcheck disable=SC2154
echo "${masked_args[*]}" >> "$COMMAND_LOG"
"$@"
return $?
fi
local command_result=0
"$@" || command_result=$?
if [ "$command_result" -ne 0 ]; then
if [ "$1" == "curl" ]; then
echo ", curl response code: $command_result"
elif [ "$command_result" -ne 1 ]; then
echo " ($1 response code: $command_result)"
fi
fi
if [ "$DIRECT" == "true" ]; then
sleep "$DIRECT_POST_COMMAND_DELAY"
fi
return $command_result
"$@"
}

View File

@@ -28,66 +28,4 @@ complete_multipart_upload() {
fi
log 5 "complete multipart upload error: $error"
return 0
}
complete_multipart_upload_rest() {
if ! check_param_count_v2 "bucket, key, upload ID, parts payload" 4 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" PARTS="$4" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh); then
log 2 "error completing multipart upload: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "complete multipart upload returned code $result: $(cat "$TEST_FILE_FOLDER/result.txt")"
return 1
fi
}
complete_multipart_upload_rest_nonexistent_param() {
if ! check_param_count_v2 "bucket, key, upload ID, parts payload" 4 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" PARTS="$4" ALGORITHM_PARAMETER="true" CHECKSUM_ALGORITHM="crc32c" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh 2>&1); then
log 2 "error completing multipart upload: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "complete multipart upload returned code $result: $(cat "$TEST_FILE_FOLDER/result.txt")"
return 1
fi
}
complete_multipart_upload_rest_incorrect_checksum() {
if ! check_param_count_v2 "bucket, key, upload ID, parts payload, type, algorithm, correct hash" 7 $#; then
return 1
fi
checksum="$7"
if [ "${checksum:0:1}" == "a" ]; then
checksum="b${checksum:1}"
else
checksum="a${checksum:1}"
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" PARTS="$4" CHECKSUM_TYPE="$5" CHECKSUM_ALGORITHM="$6" CHECKSUM_HASH="$checksum" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh 2>&1); then
log 2 "error completing multipart upload: $result"
return 1
fi
if ! check_rest_expected_error "$result" "$TEST_FILE_FOLDER/result.txt" 400 "BadDigest" "did not match"; then
log 2 "expected '400', was $result: $(cat "$TEST_FILE_FOLDER/result.txt")"
return 1
fi
}
complete_multipart_upload_rest_invalid_checksum() {
if ! check_param_count_v2 "bucket, key, upload ID, parts payload, type, algorithm, correct hash" 7 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$3" PARTS="$4" CHECKSUM_TYPE="$5" CHECKSUM_ALGORITHM="$6" CHECKSUM_HASH="$7" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh 2>&1); then
log 2 "error completing multipart upload: $result"
return 1
fi
if ! check_rest_expected_error "$result" "$TEST_FILE_FOLDER/result.txt" 400 "InvalidRequest" "header is invalid"; then
log 2 "expected '400', was $result: $(cat "$TEST_FILE_FOLDER/result.txt")"
return 1
fi
}

View File

@@ -44,13 +44,13 @@ copy_object() {
}
copy_object_empty() {
record_command "copy-object" "client:s3api"
record-command "copy-object" "client:s3api"
error=$(send_command aws --no-verify-ssl s3api copy-object 2>&1) || local result=$?
if [[ $result -eq 0 ]]; then
log 2 "copy object with empty parameters returned no error"
return 1
fi
if [[ $error != *"the following arguments are required: --bucket, --copy-source, --key"* ]]; then
if [[ $error != *"the following arguments are required: --bucket, --copy-source, --key" ]]; then
log 2 "copy object with no params returned mismatching error: $error"
return 1
fi

View File

@@ -20,14 +20,15 @@ source ./tests/report.sh
# param: bucket name
# return 0 for success, 1 for failure
create_bucket() {
log 6 "create_bucket"
if ! check_param_count "create_bucket" "command type, bucket" 2 $#; then
if [ $# -ne 2 ]; then
log 2 "create bucket missing command type, bucket name"
return 1
fi
record_command "create-bucket" "client:$1"
local exit_code=0
local error
log 6 "create bucket"
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
@@ -49,8 +50,8 @@ create_bucket() {
}
create_bucket_with_user() {
log 6 "create_bucket_with_user"
if ! check_param_count "create_bucket_with_user" "command type, bucket, access ID, secret key" 4 $#; then
if [ $# -ne 4 ]; then
log 2 "create bucket missing command type, bucket name, access, secret"
return 1
fi
local exit_code=0
@@ -72,9 +73,9 @@ create_bucket_with_user() {
}
create_bucket_object_lock_enabled() {
log 6 "create_bucket_object_lock_enabled"
record_command "create-bucket" "client:s3api"
if ! check_param_count "create_bucket_object_lock_enabled" "bucket" 1 $#; then
if [ $# -ne 1 ]; then
log 2 "create bucket missing bucket name"
return 1
fi
@@ -89,39 +90,3 @@ create_bucket_object_lock_enabled() {
fi
return 0
}
create_bucket_rest_with_invalid_acl() {
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ACL="public-reads" OBJECT_OWNERSHIP="BucketOwnerPreferred" ./tests/rest_scripts/create_bucket.sh 2>&1); then
log 2 "error creating bucket: $result"
return 1
fi
if ! check_rest_expected_error "$result" "$TEST_FILE_FOLDER/result.txt" "400" "InvalidArgument" ""; then
log 2 "error checking XML CreateBucket error"
return 1
fi
return 0
}
create_bucket_rest_expect_error() {
if ! check_param_count_v2 "bucket name, params, response code, error code, message" 5 $#; then
return 1
fi
env_vars="BUCKET_NAME=$1 $2"
if ! send_rest_command_expect_error "$env_vars" "./tests/rest_scripts/create_bucket.sh" "$3" "$4" "$5"; then
log 2 "error sending REST command and checking error"
return 1
fi
return 0
}
create_bucket_rest_expect_success() {
if ! check_param_count_v2 "bucket name, params" 2 $#; then
return 1
fi
env_vars="BUCKET_NAME=$1 $2"
if ! send_rest_command_expect_success "$env_vars" "./tests/rest_scripts/create_bucket.sh" "200"; then
log 2 "error sending REST command and checking error"
return 1
fi
return 0
}

View File

@@ -14,63 +14,13 @@
# specific language governing permissions and limitations
# under the License.
create_multipart_upload_rest() {
if ! check_param_count_v2 "bucket name, key" 2 $#; then
return 1
fi
if ! result=$(BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" COMMAND_LOG=$COMMAND_LOG ./tests/rest_scripts/create_multipart_upload.sh); then
log 2 "error creating multipart upload: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "put-object-retention returned code $result: $(cat "$TEST_FILE_FOLDER/output.txt")"
return 1
fi
if ! upload_id=$(get_element_text "$TEST_FILE_FOLDER/output.txt" "InitiateMultipartUploadResult" "UploadId"); then
log 2 "error getting upload ID: $upload_id"
return 1
fi
echo "$upload_id"
return 0
}
create_multipart_upload_rest_with_checksum_type_and_algorithm() {
if ! check_param_count_v2 "bucket, key, checksum type, checksum algorithm" 4 $#; then
return 1
fi
if ! result=$(COMMAND_LOG=$COMMAND_LOG BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" CHECKSUM_TYPE="$3" CHECKSUM_ALGORITHM="$4" ./tests/rest_scripts/create_multipart_upload.sh 2>&1); then
log 2 "error creating multipart upload: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "expected '200', was '$result' ($(cat "$TEST_FILE_FOLDER/output.txt"))"
return 1
fi
if ! upload_id=$(get_element_text "$TEST_FILE_FOLDER/output.txt" "InitiateMultipartUploadResult" "UploadId"); then
log 2 "error getting upload ID: $upload_id"
return 1
fi
echo "$upload_id"
return 0
}
create_multipart_upload_rest_with_checksum_type_and_algorithm_error() {
if ! check_param_count_v2 "bucket, key, checksum type, checksum algorithm, handle fn, response, code, error" 8 $#; then
return 1
fi
if ! result=$(COMMAND_LOG=$COMMAND_LOG BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" CHECKSUM_TYPE="$3" CHECKSUM_ALGORITHM="$4" ./tests/rest_scripts/create_multipart_upload.sh 2>&1); then
log 2 "error creating multipart upload: $result"
return 1
fi
if ! "$5" "$result" "$TEST_FILE_FOLDER/output.txt" "$6" "$7" "$8"; then
log 2 "error checking result"
return 1
fi
}
create_multipart_upload_s3api() {
# initialize a multipart upload
# params: bucket, key
# return 0 for success, 1 for failure
create_multipart_upload() {
record_command "create-multipart-upload" "client:s3api"
if ! check_param_count_v2 "bucket, key" 2 $#; then
if [ $# -ne 2 ]; then
log 2 "create multipart upload function must have bucket, key"
return 1
fi
@@ -87,33 +37,32 @@ create_multipart_upload_s3api() {
return 0
}
create_multipart_upload_s3api_custom() {
if ! check_param_count_gt "at least bucket and key" 2 $#; then
create_multipart_upload_with_user() {
record_command "create-multipart-upload" "client:s3api"
if [ $# -ne 4 ]; then
log 2 "create multipart upload function must have bucket, key, username, password"
return 1
fi
local multipart_data
log 5 "additional create multipart params"
for i in "$@"; do
log 5 "$i"
done
log 5 "${*:3}"
log 5 "aws --no-verify-ssl s3api create-multipart-upload --bucket $1 --key $2 ${*:3}"
multipart_data=$(send_command aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1) || local result=$?
if [[ $result -ne 0 ]]; then
log 2 "error creating custom multipart data command: $multipart_data"
if ! multipart_data=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
log 2 "Error creating multipart upload: $multipart_data"
return 1
fi
if ! upload_id=$(echo "$multipart_data" | grep -v "InsecureRequestWarning" | jq -r '.UploadId' 2>&1); then
log 2 "error parsing upload ID: $upload_id"
return 1
fi
log 5 "multipart data: $multipart_data"
upload_id=$(echo "$multipart_data" | grep -v "InsecureRequestWarning" | jq '.UploadId')
upload_id="${upload_id//\"/}"
log 5 "upload id: $upload_id"
echo "$upload_id"
return 0
}
create_multipart_upload_s3api_params() {
create_multipart_upload_params() {
record_command "create-multipart-upload" "client:s3api"
if ! check_param_count_v2 "bucket, key, content type, metadata, object lock legal hold status, \
object lock mode, object lock retain until date, and tagging" 8 $#; then
if [ $# -ne 8 ]; then
log 2 "create multipart upload function with params must have bucket, key, content type, metadata, object lock legal hold status, " \
"object lock mode, object lock retain until date, and tagging"
return 1
fi
local multipart_data
@@ -135,22 +84,43 @@ create_multipart_upload_s3api_params() {
return 0
}
create_multipart_upload_s3api_with_user() {
create_multipart_upload_custom() {
record_command "create-multipart-upload" "client:s3api"
if ! check_param_count_v2 "bucket, key, username, password" 4 $#; then
if [ $# -lt 2 ]; then
log 2 "create multipart upload custom function must have at least bucket and key"
return 1
fi
if ! multipart_data=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
log 2 "Error creating multipart upload: $multipart_data"
return 1
fi
if ! upload_id=$(echo "$multipart_data" | grep -v "InsecureRequestWarning" | jq -r '.UploadId' 2>&1); then
log 2 "error parsing upload ID: $upload_id"
local multipart_data
log 5 "additional create multipart params"
for i in "$@"; do
log 5 "$i"
done
log 5 "${*:3}"
log 5 "aws --no-verify-ssl s3api create-multipart-upload --bucket $1 --key $2 ${*:3}"
multipart_data=$(send_command aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1) || local result=$?
if [[ $result -ne 0 ]]; then
log 2 "error creating custom multipart data command: $multipart_data"
return 1
fi
log 5 "multipart data: $multipart_data"
upload_id=$(echo "$multipart_data" | grep -v "InsecureRequestWarning" | jq '.UploadId')
upload_id="${upload_id//\"/}"
echo "$upload_id"
log 5 "upload id: $upload_id"
return 0
}
create_multipart_upload_rest() {
if [ $# -ne 2 ]; then
log 2 "'create_multipart_upload_rest' requires bucket name, key"
return 1
fi
if ! result=$(BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" COMMAND_LOG=$COMMAND_LOG ./tests/rest_scripts/create_multipart_upload.sh); then
log 2 "error creating multipart upload: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "put-object-retention returned code $result: $(cat "$TEST_FILE_FOLDER/output.txt")"
return 1
fi
return 0
}

View File

@@ -50,16 +50,4 @@ delete_bucket() {
return 1
fi
return 0
}
delete_bucket_rest() {
if ! check_param_count_gt "bucket, env vars (optional)" 1 $#; then
return 1
fi
env_vars="BUCKET_NAME=$1 $2"
if ! send_rest_command_expect_success "$env_vars" "./tests/rest_scripts/delete_bucket.sh" "204"; then
log 2 "error sending REST command and checking error"
return 1
fi
return 0
}

View File

@@ -38,21 +38,6 @@ delete_bucket_policy() {
return 0
}
delete_bucket_policy_rest() {
if ! check_param_count "delete_bucket_policy_rest" "bucket" 1 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/delete_bucket_policy.sh 2>&1); then
log 2 "error deleting bucket policy: $result"
return 1
fi
if [ "$result" != "204" ]; then
log 2 "expected '204', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
return 1
fi
return 0
}
delete_bucket_policy_with_user() {
record_command "delete-bucket-policy" "client:s3api"
if [[ $# -ne 3 ]]; then

View File

@@ -18,7 +18,8 @@
delete_object() {
log 6 "delete_object"
record_command "delete-object" "client:$1"
if ! check_param_count "delete_object" "command type, bucket, key" 3 $#; then
if [ $# -ne 3 ]; then
log 2 "delete object command requires command type, bucket, key"
return 1
fi
local exit_code=0
@@ -46,20 +47,20 @@ delete_object() {
}
delete_object_bypass_retention() {
if ! check_param_count "delete_object_bypass_retention" "bucket, key, user, password" 4 $#; then
if [[ $# -ne 4 ]]; then
log 2 "'delete-object with bypass retention' requires bucket, key, user, password"
return 1
fi
if ! result=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" \
COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" BYPASS_GOVERNANCE_RETENTION="true" \
OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/delete_object.sh 2>&1); then
log 2 "error deleting object: $result"
if ! delete_object_error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --bypass-governance-retention 2>&1); then
log 2 "error deleting object with bypass retention: $delete_object_error"
return 1
fi
return 0
}
delete_object_version() {
if ! check_param_count "delete_object_version" "bucket, key, version ID" 3 $#; then
if [[ $# -ne 3 ]]; then
log 2 "'delete_object_version' requires bucket, key, version ID"
return 1
fi
if ! delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --version-id "$3" 2>&1); then
@@ -69,24 +70,9 @@ delete_object_version() {
return 0
}
delete_object_version_rest() {
if ! check_param_count "delete_object_version_rest" "bucket name, object name, version ID" 3 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" VERSION_ID="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/delete_object.sh 2>&1); then
log 2 "error deleting object: $result"
return 1
fi
if [ "$result" != "204" ]; then
delete_object_error=$(cat "$TEST_FILE_FOLDER/result.txt")
log 2 "expected '204', was '$result' ($delete_object_error)"
return 1
fi
return 0
}
delete_object_version_bypass_retention() {
if ! check_param_count "delete_object_version_bypass_retention" "bucket, key, version ID" 3 $#; then
if [[ $# -ne 3 ]]; then
log 2 "'delete_object_version_bypass_retention' requires bucket, key, version ID"
return 1
fi
if ! delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --version-id "$3" --bypass-governance-retention 2>&1); then
@@ -96,32 +82,17 @@ delete_object_version_bypass_retention() {
return 0
}
delete_object_version_rest_bypass_retention() {
if ! check_param_count "delete_object_version_rest_bypass_retention" "bucket, key, version ID" 3 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" VERSION_ID="$3" BYPASS_GOVERNANCE_RETENTION="true" \
OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/delete_object.sh 2>&1); then
log 2 "error deleting object: $result"
return 1
fi
if [ "$result" != "204" ]; then
log 2 "expected '204', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
return 1
fi
return 0
}
delete_object_with_user() {
record_command "delete-object" "client:$1"
if ! check_param_count "delete_object_version_bypass_retention" "command type, bucket, key, access ID, secret key" 5 $#; then
if [ $# -ne 5 ]; then
log 2 "delete object with user command requires command type, bucket, key, access ID, secret key"
return 1
fi
local exit_code=0
if [[ $1 == 's3' ]]; then
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" --bypass-governance-retention 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
delete_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$?
else
@@ -130,23 +101,48 @@ delete_object_with_user() {
fi
if [ $exit_code -ne 0 ]; then
log 2 "error deleting object: $delete_object_error"
export delete_object_error
return 1
fi
return 0
}
delete_object_rest() {
if ! check_param_count "delete_object_rest" "bucket, key" 2 $#; then
if [ $# -ne 2 ]; then
log 2 "'delete_object_rest' requires bucket name, object name"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/delete_object.sh 2>&1); then
log 2 "error deleting object: $result"
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="DELETE
/$1/$2
host:$aws_endpoint_url_address
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
UNSIGNED-PAYLOAD"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
if [ "$result" != "204" ]; then
delete_object_error=$(cat "$TEST_FILE_FOLDER/result.txt")
log 2 "expected '204', was '$result' ($delete_object_error)"
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -ks -w "%{http_code}" -X DELETE "$header://$aws_endpoint_url_address/$1/$2" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: UNSIGNED-PAYLOAD" \
-H "x-amz-date: $current_date_time" \
-o "$TEST_FILE_FOLDER"/delete_object_error.txt 2>&1)
if [[ "$reply" != "204" ]]; then
log 2 "delete object command returned error: $(cat "$TEST_FILE_FOLDER"/delete_object_error.txt)"
return 1
fi
return 0
}
}

View File

@@ -37,26 +37,6 @@ get_bucket_ownership_controls() {
return 0
}
get_bucket_ownership_controls_rest() {
if ! check_param_count "get_bucket_ownership_controls_rest" "bucket" 1 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OUTPUT_FILE="$TEST_FILE_FOLDER/ownershipControls.txt" ./tests/rest_scripts/get_bucket_ownership_controls.sh); then
log 2 "error getting bucket ownership controls: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "GetBucketOwnershipControls returned response code: $result, reply: $(cat "$TEST_FILE_FOLDER/ownershipControls.txt")"
return 1
fi
log 5 "controls: $(cat "$TEST_FILE_FOLDER/ownershipControls.txt")"
if ! rule=$(xmllint --xpath '//*[local-name()="ObjectOwnership"]/text()' "$TEST_FILE_FOLDER/ownershipControls.txt" 2>&1); then
log 2 "error getting ownership rule: $rule"
return 1
fi
echo "$rule"
}
get_object_ownership_rule() {
if [[ -n "$SKIP_BUCKET_OWNERSHIP_CONTROLS" ]]; then
log 5 "Skipping get bucket ownership controls"

Some files were not shown because too many files have changed in this diff Show More