Compare commits

..

1 Commits

Author SHA1 Message Date
Ben McClelland
db9cefa27c feat: add plugin backend
This new plugin backend allows loading a shared object to
implement the backend storage logic. See the following for
caveats with Go plugins: https://pkg.go.dev/plugin#hdr-Warnings.

This will also need cgo enabled within the builds, which is
currently disabled for github releases. So this will require
building the gateway from source for now.
2025-04-11 11:02:14 -07:00
130 changed files with 3481 additions and 5745 deletions

View File

@@ -1,5 +1,5 @@
name: azurite functional tests
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,5 @@
name: docker bats tests
permissions: {}
on: pull_request
jobs:

View File

@@ -1,4 +1,5 @@
name: Publish Docker image
on:
release:
types: [published]

View File

@@ -1,5 +1,5 @@
name: functional tests
permissions: {}
on: pull_request
jobs:

View File

@@ -1,10 +1,9 @@
name: general
permissions: {}
on: pull_request
jobs:
build:
name: Go Basic Checks
name: Build
runs-on: ubuntu-latest
steps:
@@ -24,6 +23,9 @@ jobs:
run: |
go get -v -t -d ./...
- name: Build
run: make
- name: Test
run: go test -coverprofile profile.txt -race -v -timeout 30s -tags=github ./...
@@ -33,26 +35,4 @@ jobs:
- name: Run govulncheck
run: govulncheck ./...
shell: bash
verify-build:
name: Verify Build Targets
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
os: [darwin, freebsd, linux]
arch: [amd64, arm64]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: Build for ${{ matrix.os }}/${{ matrix.arch }}
run: |
GOOS=${{ matrix.os }} GOARCH=${{ matrix.arch }} go build -o versitygw-${{ matrix.os }}-${{ matrix.arch }} cmd/versitygw/*.go
shell: bash

View File

@@ -1,12 +1,16 @@
name: goreleaser
permissions:
contents: write
on:
push:
# run only against tags
tags:
- '*'
permissions:
contents: write
# packages: write
# issues: write
jobs:
goreleaser:
runs-on: ubuntu-latest
@@ -25,10 +29,10 @@ jobs:
go-version: stable
- name: Run Releaser
uses: goreleaser/goreleaser-action@v6
uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser
version: '~> v2'
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}

View File

@@ -1,13 +0,0 @@
name: host style tests
permissions: {}
on: pull_request
jobs:
build-and-run:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: run host-style tests
run: make test-host-style

View File

@@ -1,5 +1,4 @@
name: shellcheck
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,4 @@
name: staticcheck
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,4 @@
name: system tests
permissions: {}
on: pull_request
jobs:
build:
@@ -13,79 +12,66 @@ jobs:
IAM_TYPE: folder
RUN_SET: "mc-non-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "mc, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "mc-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "REST, posix, non-static, all, folder IAM"
IAM_TYPE: folder
RUN_SET: "rest"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3, posix, non-file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-non-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, policy, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, user, non-static, s3 IAM"
IAM_TYPE: s3
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, bucket, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, multipart, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-multipart"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, object, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-object"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, policy, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, user, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
# TODO fix/debug s3 gateway
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
@@ -102,19 +88,16 @@ jobs:
IAM_TYPE: folder
RUN_SET: "s3cmd-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3cmd, posix, non-user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-non-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3cmd, posix, user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
steps:
- name: Check out code into the Go module directory
@@ -123,7 +106,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "stable"
go-version: 'stable'
id: go
- name: Get Dependencies
@@ -139,7 +122,6 @@ jobs:
- name: Install s3cmd
run: |
sudo apt-get update
sudo apt-get install s3cmd
- name: Install mc
@@ -147,10 +129,9 @@ jobs:
curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o /usr/local/bin/mc
chmod 755 /usr/local/bin/mc
- name: Install xml libraries (for rest)
- name: Install xmllint (for rest)
run: |
sudo apt-get update
sudo apt-get install libxml2-utils xmlstarlet
sudo apt-get install libxml2-utils
# see https://github.com/versity/versitygw/issues/1034
- name: Install AWS cli
@@ -169,7 +150,6 @@ jobs:
RUN_VERSITYGW: true
BACKEND: ${{ matrix.BACKEND }}
RECREATE_BUCKETS: ${{ matrix.RECREATE_BUCKETS }}
DELETE_BUCKETS_AFTER_TEST: ${{ matrix.DELETE_BUCKETS_AFTER_TEST }}
CERT: ${{ github.workspace }}/cert.pem
KEY: ${{ github.workspace }}/versitygw.pem
LOCAL_FOLDER: /tmp/gw
@@ -193,8 +173,6 @@ jobs:
COMMAND_LOG: command.log
TIME_LOG: time.log
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
AUTOGENERATE_USERS: true
USER_AUTOGENERATION_PREFIX: github-actions-test-
run: |
make testbin
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST

View File

@@ -1,5 +1,3 @@
version: 2
before:
hooks:
- go mod tidy
@@ -25,7 +23,7 @@ builds:
- -X=main.Build={{.Commit}} -X=main.BuildTime={{.Date}} -X=main.Version={{.Version}}
archives:
- formats: [ 'tar.gz' ]
- format: tar.gz
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
{{ .ProjectName }}_v{{ .Version }}_
@@ -45,7 +43,7 @@ archives:
# use zip for windows archives
format_overrides:
- goos: windows
formats: [ 'zip' ]
format: zip
# Additional files/globs you want to add to the archive.
#
@@ -60,7 +58,7 @@ checksum:
name_template: 'checksums.txt'
snapshot:
version_template: "{{ incpatch .Version }}-{{.ShortCommit}}"
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
@@ -88,7 +86,7 @@ nfpms:
license: Apache 2.0
ids:
builds:
- versitygw
formats:

View File

@@ -72,11 +72,6 @@ dist:
rm -f VERSION
gzip -f $(TARFILE)
.PHONY: snapshot
snapshot:
# brew install goreleaser/tap/goreleaser
goreleaser release --snapshot --skip publish --clean
# Creates and runs S3 gateway instance in a docker container
.PHONY: up-posix
up-posix:
@@ -96,9 +91,3 @@ up-azurite:
.PHONY: up-app
up-app:
$(DOCKERCOMPOSE) up
# Run the host-style tests in docker containers
.PHONY: test-host-style
test-host-style:
docker compose -f tests/host-style-tests/docker-compose.yml up --build --abort-on-container-exit --exit-code-from test

View File

@@ -18,8 +18,6 @@ import (
"errors"
"fmt"
"time"
"github.com/versity/versitygw/s3err"
)
type Role string
@@ -59,19 +57,10 @@ type ListUserAccountsResult struct {
// Mutable props, which could be changed when updating an IAM account
type MutableProps struct {
Secret *string `json:"secret"`
Role Role `json:"role"`
UserID *int `json:"userID"`
GroupID *int `json:"groupID"`
}
func (m MutableProps) Validate() error {
if m.Role != "" && !m.Role.IsValid() {
return s3err.GetAPIError(s3err.ErrAdminInvalidUserRole)
}
return nil
}
func updateAcc(acc *Account, props MutableProps) {
if props.Secret != nil {
acc.Secret = *props.Secret
@@ -82,9 +71,6 @@ func updateAcc(acc *Account, props MutableProps) {
if props.UserID != nil {
acc.UserID = *props.UserID
}
if props.Role != "" {
acc.Role = props.Role
}
}
// IAMService is the interface for all IAM service implementations

View File

@@ -290,49 +290,93 @@ func (s *IAMServiceInternal) readIAMData() ([]byte, error) {
func (s *IAMServiceInternal) storeIAM(update UpdateAcctFunc) error {
// We are going to be racing with other running gateways without any
// coordination. So the strategy here is to read the current file data,
// update the data, write back out to a temp file, then rename the
// temp file to the original file. This rename will replace the
// original file with the new file. This is atomic and should always
// allow for a consistent view of the data. There is a small
// window where the file could be read and then updated by
// another process. In this case any updates the other process did
// will be lost. This is a limitation of the internal IAM service.
// This should be rare, and even when it does happen should result
// in a valid IAM file, just without the other process's updates.
// coordination. So the strategy here is to read the current file data.
// If the file doesn't exist, then we assume someone else is currently
// updating the file. So we just need to keep retrying. We also need
// to make sure the data is consistent within a single update. So racing
// writes to a file would possibly leave this in some invalid state.
// We can get atomic updates with rename. If we read the data, update
// the data, write to a temp file, then rename the tempfile back to the
// data file. This should always result in a complete data image.
iamFname := filepath.Join(s.dir, iamFile)
backupFname := filepath.Join(s.dir, iamBackupFile)
// There is at least one unsolved failure mode here.
// If a gateway removes the data file and then crashes, all other
// gateways will retry forever thinking that the original will eventually
// write the file.
b, err := os.ReadFile(iamFname)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("read iam file: %w", err)
}
retries := 0
fname := filepath.Join(s.dir, iamFile)
// save copy of data
datacopy := make([]byte, len(b))
copy(datacopy, b)
for {
b, err := os.ReadFile(fname)
if errors.Is(err, fs.ErrNotExist) {
// racing with someone else updating
// keep retrying after backoff
retries++
if retries < maxretry {
time.Sleep(backoff)
continue
}
// make a backup copy in case something happens
err = s.writeUsingTempFile(b, backupFname)
if err != nil {
return fmt.Errorf("write backup iam file: %w", err)
}
// we have been unsuccessful trying to read the iam file
// so this must be the case where something happened and
// the file did not get updated successfully, and probably
// isn't going to be. The recovery procedure would be to
// copy the backup file into place of the original.
return fmt.Errorf("no iam file, needs backup recovery")
}
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("read iam file: %w", err)
}
b, err = update(b)
if err != nil {
return fmt.Errorf("update iam data: %w", err)
}
// reset retries on successful read
retries = 0
err = s.writeUsingTempFile(b, iamFname)
if err != nil {
return fmt.Errorf("write iam file: %w", err)
err = os.Remove(fname)
if errors.Is(err, fs.ErrNotExist) {
// racing with someone else updating
// keep retrying after backoff
time.Sleep(backoff)
continue
}
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove old iam file: %w", err)
}
// save copy of data
datacopy := make([]byte, len(b))
copy(datacopy, b)
// make a backup copy in case we crash before update
// this is after remove, so there is a small window something
// can go wrong, but the remove should barrier other gateways
// from trying to write backup at the same time. Only one
// gateway will successfully remove the file.
os.WriteFile(filepath.Join(s.dir, iamBackupFile), b, iamMode)
b, err = update(b)
if err != nil {
// update failed, try to write old data back out
os.WriteFile(fname, datacopy, iamMode)
return fmt.Errorf("update iam data: %w", err)
}
err = s.writeTempFile(b)
if err != nil {
// update failed, try to write old data back out
os.WriteFile(fname, datacopy, iamMode)
return err
}
break
}
return nil
}
func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
func (s *IAMServiceInternal) writeTempFile(b []byte) error {
fname := filepath.Join(s.dir, iamFile)
f, err := os.CreateTemp(s.dir, iamFile)
if err != nil {
return fmt.Errorf("create temp file: %w", err)
@@ -340,7 +384,6 @@ func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
defer os.Remove(f.Name())
_, err = f.Write(b)
f.Close()
if err != nil {
return fmt.Errorf("write temp file: %w", err)
}

View File

@@ -30,7 +30,6 @@ import (
"net/http"
"net/http/cookiejar"
"net/url"
"slices"
"strconv"
"strings"
)
@@ -53,6 +52,7 @@ type IpaIAMService struct {
var _ IAMService = &IpaIAMService{}
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure, debug bool) (*IpaIAMService, error) {
ipa := IpaIAMService{
id: 0,
version: IpaVersion,
@@ -72,7 +72,6 @@ func NewIpaIAMService(rootAcc Account, host, vaultName, username, password strin
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
tr := &http.Transport{
TLSClientConfig: mTLSConfig,
Proxy: http.ProxyFromEnvironment,
}
ipa.client = http.Client{Jar: jar, Transport: tr}
@@ -103,7 +102,13 @@ func NewIpaIAMService(rootAcc Account, host, vaultName, username, password strin
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
isSupported := slices.Contains(vaultConfig.Wrapping_supported_algorithms, "aes-128-cbc")
isSupported := false
for _, algo := range vaultConfig.Wrapping_supported_algorithms {
if algo == "aes-128-cbc" {
isSupported = true
break
}
}
if !isSupported {
return nil,

View File

@@ -139,9 +139,6 @@ func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) e
if props.UserID != nil {
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
}
if props.Role != "" {
req.Replace(ld.roleAtr, []string{string(props.Role)})
}
err := ld.conn.Modify(req)
//TODO: Handle non existing user case

View File

@@ -181,9 +181,11 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
return err
}
acl, err := auth.ParseACL(aclBytes)
if err != nil {
return err
var acl auth.ACL
if len(aclBytes) > 0 {
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return fmt.Errorf("unmarshal acl: %w", err)
}
}
if acl.Owner == acct.Access {
@@ -293,7 +295,7 @@ func (az *Azure) DeleteBucketOwnershipControls(ctx context.Context, bucket strin
}
func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
tags, err := backend.ParseObjectTags(getString(po.Tagging))
tags, err := parseTags(po.Tagging)
if err != nil {
return s3response.PutObjectOutput{}, err
}
@@ -416,7 +418,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
var opts *azblob.DownloadStreamOptions
if *input.Range != "" {
offset, count, isValid, err := backend.ParseObjectRange(*resp.ContentLength, *input.Range)
offset, count, isValid, err := backend.ParseGetObjectRange(*resp.ContentLength, *input.Range)
if err != nil {
return nil, err
}
@@ -505,26 +507,10 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
if err != nil {
return nil, azureErrToS3Err(err)
}
var size int64
if resp.ContentLength != nil {
size = *resp.ContentLength
}
startOffset, length, isValid, err := backend.ParseObjectRange(size, getString(input.Range))
if err != nil {
return nil, err
}
var contentRange string
if isValid {
contentRange = fmt.Sprintf("bytes %v-%v/%v",
startOffset, startOffset+length-1, size)
}
result := &s3.HeadObjectOutput{
ContentRange: &contentRange,
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentLength: &length,
AcceptRanges: resp.AcceptRanges,
ContentLength: resp.ContentLength,
ContentType: resp.ContentType,
ContentEncoding: resp.ContentEncoding,
ContentLanguage: resp.ContentLanguage,
@@ -605,9 +591,9 @@ func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s
return s3response.ListObjectsResult{}, azureErrToS3Err(err)
}
acl, err := auth.ParseACL(aclBytes)
if err != nil {
return s3response.ListObjectsResult{}, err
var acl auth.ACL
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return s3response.ListObjectsResult{}, fmt.Errorf("unmarshal acl: %w", err)
}
Pager:
@@ -708,9 +694,8 @@ func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input
return s3response.ListObjectsV2Result{}, azureErrToS3Err(err)
}
acl, err = auth.ParseACL(aclBytes)
if err != nil {
return s3response.ListObjectsV2Result{}, err
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return s3response.ListObjectsV2Result{}, fmt.Errorf("unmarshal acl: %w", err)
}
}
@@ -822,14 +807,14 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput
}, nil
}
func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
dstClient, err := az.getBlobClient(*input.Bucket, *input.Key)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource {
if input.MetadataDirective != types.MetadataDirectiveReplace {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
}
// Set object meta http headers
@@ -841,7 +826,7 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
BlobContentType: input.ContentType,
}, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
meta := input.Metadata
@@ -856,14 +841,14 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
// Set object metadata
_, err = dstClient.SetMetadata(ctx, parseMetadata(meta), nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
// Set object legal hold
if input.ObjectLockLegalHoldStatus != "" {
err = az.PutObjectLegalHold(ctx, *input.Bucket, *input.Key, "", input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
// Set object retention
@@ -877,28 +862,28 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
retParsed, err := json.Marshal(retention)
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("parse object retention: %w", err)
return nil, fmt.Errorf("parse object retention: %w", err)
}
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", true, retParsed)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
// Set object Tagging, if tagging directive is "REPLACE"
if input.TaggingDirective == types.TaggingDirectiveReplace {
tags, err := backend.ParseObjectTags(getString(input.Tagging))
tags, err := parseTags(input.Tagging)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
_, err = dstClient.SetTags(ctx, tags, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
LastModified: res.LastModified,
ETag: (*string)(res.ETag),
},
@@ -907,13 +892,13 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
srcBucket, srcObj, _, err := backend.ParseCopySource(*input.CopySource)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
// Get the source object
downloadResp, err := az.client.DownloadStream(ctx, srcBucket, srcObj, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
pInput := s3response.PutObjectInput{
@@ -951,28 +936,28 @@ func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInpu
// Create the destination object
resp, err := az.PutObject(ctx, pInput)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
// Copy the object tagging, if tagging directive is "COPY"
if input.TaggingDirective == types.TaggingDirectiveCopy {
srcClient, err := az.getBlobClient(srcBucket, srcObj)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
res, err := srcClient.GetTags(ctx, nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
_, err = dstClient.SetTags(ctx, parseAzTags(res.BlobTagSet), nil)
if err != nil {
return s3response.CopyObjectOutput{}, azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
}
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
ETag: &resp.ETag,
},
}, nil
@@ -1049,9 +1034,20 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
}
// parse object tags
tags, err := backend.ParseObjectTags(getString(input.Tagging))
if err != nil {
return s3response.InitiateMultipartUploadResult{}, err
tagsStr := getString(input.Tagging)
tags := map[string]string{}
if tagsStr != "" {
tagParts := strings.Split(tagsStr, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
}
if len(p[0]) > 128 || len(p[1]) > 256 {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag)
}
tags[p[0]] = p[1]
}
}
// set blob legal hold status in metadata
@@ -1091,7 +1087,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
// Create and empty blob in .sgwtmp/multipart/<uploadId>/<object hash>
// The blob indicates multipart upload initialization and holds the mp metadata
// e.g tagging, content-type, metadata, object lock status ...
_, err = az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts)
_, err := az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts)
if err != nil {
return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err)
}
@@ -1365,44 +1361,42 @@ func (az *Azure) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultip
// Copeies the multipart metadata from .sgwtmp namespace into the newly created blob
// Deletes the multipart upload 'blob' from .sgwtmp namespace
// It indicates the end of the multipart upload
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
var res s3response.CompleteMultipartUploadResult
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
tmpPath := createMetaTmpPath(*input.Key, *input.UploadId)
blobClient, err := az.getBlobClient(*input.Bucket, tmpPath)
if err != nil {
return res, "", err
return nil, err
}
props, err := blobClient.GetProperties(ctx, nil)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
tags, err := blobClient.GetTags(ctx, nil)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
if err != nil {
return res, "", err
return nil, err
}
blockIds := []string{}
blockList, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
if err != nil {
return res, "", azureErrToS3Err(err)
return nil, azureErrToS3Err(err)
}
if len(blockList.UncommittedBlocks) != len(input.MultipartUpload.Parts) {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
uncommittedBlocks := map[int32]*blockblob.Block{}
for _, el := range blockList.UncommittedBlocks {
ptNumber, err := decodeBlockId(backend.GetStringFromPtr(el.Name))
if err != nil {
return res, "", fmt.Errorf("invalid block name: %w", err)
return nil, fmt.Errorf("invalid block name: %w", err)
}
uncommittedBlocks[int32(ptNumber)] = el
@@ -1414,35 +1408,35 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
last := len(blockList.UncommittedBlocks) - 1
for i, part := range input.MultipartUpload.Parts {
if part.PartNumber == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPartOrder)
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
block, ok := uncommittedBlocks[*part.PartNumber]
if !ok {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.ETag != *block.Name {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
// all parts except the last need to be greater, than
// the minimum allowed size (5 Mib)
if i < last && *block.Size < backend.MinPartSize {
return res, "", s3err.GetAPIError(s3err.ErrEntityTooSmall)
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
totalSize += *block.Size
blockIds = append(blockIds, *block.Name)
}
if input.MpuObjectSize != nil && totalSize != *input.MpuObjectSize {
return res, "", s3err.GetIncorrectMpObjectSizeErr(totalSize, *input.MpuObjectSize)
return nil, s3err.GetIncorrectMpObjectSizeErr(totalSize, *input.MpuObjectSize)
}
opts := &blockblob.CommitBlockListOptions{
@@ -1459,20 +1453,20 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
resp, err := client.CommitBlockList(ctx, blockIds, opts)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
// cleanup the multipart upload
_, err = blobClient.Delete(ctx, nil)
if err != nil {
return res, "", parseMpError(err)
return nil, parseMpError(err)
}
return s3response.CompleteMultipartUploadResult{
return &s3.CompleteMultipartUploadOutput{
Bucket: input.Bucket,
Key: input.Key,
ETag: (*string)(resp.ETag),
}, "", nil
}, nil
}
func (az *Azure) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
@@ -1824,6 +1818,24 @@ func parseAzMetadata(m map[string]*string) map[string]string {
return meta
}
func parseTags(tagstr *string) (map[string]string, error) {
tagsStr := getString(tagstr)
tags := make(map[string]string)
if tagsStr != "" {
tagParts := strings.Split(tagsStr, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
tags[p[0]] = p[1]
}
}
return tags, nil
}
func parseAzTags(tagSet []*blob.Tags) map[string]string {
tags := map[string]string{}
for _, tag := range tagSet {
@@ -1964,9 +1976,11 @@ func (az *Azure) deleteContainerMetaData(ctx context.Context, bucket, key string
}
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
var acl auth.ACL
data, ok := meta[string(key)]
if !ok {
return &auth.ACL{}, nil
return &acl, nil
}
value, err := decodeString(*data)
@@ -1974,9 +1988,13 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
return nil, err
}
acl, err := auth.ParseACL(value)
if len(value) == 0 {
return &acl, nil
}
err = json.Unmarshal(value, &acl)
if err != nil {
return nil, err
return nil, fmt.Errorf("unmarshal acl: %w", err)
}
return &acl, nil

View File

@@ -40,7 +40,7 @@ func azErrToS3err(azErr *azcore.ResponseError) s3err.APIError {
case "BlobNotFound":
return s3err.GetAPIError(s3err.ErrNoSuchKey)
case "TagsTooLarge":
return s3err.GetAPIError(s3err.ErrInvalidTagValue)
return s3err.GetAPIError(s3err.ErrInvalidTag)
case "Requested Range Not Satisfiable":
return s3err.GetAPIError(s3err.ErrInvalidRange)
}

View File

@@ -52,7 +52,7 @@ type Backend interface {
// multipart operations
CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (_ s3response.CompleteMultipartUploadResult, versionid string, _ error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
@@ -65,7 +65,7 @@ type Backend interface {
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
@@ -100,6 +100,10 @@ type Backend interface {
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
}
// InterfaceVersion tracks changes to the Backend interface for plugins.
// Increment this when the Backend interface changes.
const InterfaceVersion = 1
type BackendUnsupported struct{}
var _ Backend = &BackendUnsupported{}
@@ -166,8 +170,8 @@ func (BackendUnsupported) DeleteBucketCors(_ context.Context, bucket string) err
func (BackendUnsupported) CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
return s3response.CompleteMultipartUploadResult{}, "", s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
@@ -200,8 +204,8 @@ func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)

View File

@@ -17,17 +17,11 @@ package backend
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"io/fs"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
@@ -89,11 +83,11 @@ var (
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
)
// ParseObjectRange parses input range header and returns startoffset, length, isValid
// ParseGetObjectRange parses input range header and returns startoffset, length, isValid
// and error. If no endoffset specified, then length is set to the object size
// for invalid inputs, it returns no error, but isValid=false
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
func ParseGetObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
if acceptRange == "" {
return 0, size, false, nil
}
@@ -114,17 +108,15 @@ func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil && bRange[0] != "" {
if err != nil {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if bRange[1] == "" {
if bRange[0] == "" {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
return startOffset, size - startOffset, true, nil
}
@@ -133,22 +125,12 @@ func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error
return 0, size, false, nil
}
if startOffset > endOffset {
if endOffset < startOffset {
return 0, size, false, nil
}
// for ranges like 'bytes=-100' return the last bytes specified with 'endOffset'
if bRange[0] == "" {
endOffset = min(endOffset, size)
return size - endOffset, endOffset, true, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if endOffset >= size {
endOffset = size - 1
return startOffset, size - startOffset, true, nil
}
return startOffset, endOffset - startOffset + 1, true, nil
@@ -230,81 +212,27 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
}
// ParseObjectTags parses the url encoded input string into
// map[string]string with unescaped key/value pair
func ParseObjectTags(tagging string) (map[string]string, error) {
if tagging == "" {
// map[string]string key-value tag set
func ParseObjectTags(t string) (map[string]string, error) {
if t == "" {
return nil, nil
}
tagSet := make(map[string]string)
tagging := make(map[string]string)
for tagging != "" {
var tag string
tag, tagging, _ = strings.Cut(tagging, "&")
// if 'tag' before the first appearance of '&' is empty continue
if tag == "" {
continue
tagParts := strings.Split(t, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
key, value, found := strings.Cut(tag, "=")
// if key is empty, but "=" is present, return invalid url ecnoding err
if found && key == "" {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
if len(p[0]) > 128 || len(p[1]) > 256 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
// return invalid tag key, if the key is longer than 128
if len(key) > 128 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// return invalid tag value, if tag value is longer than 256
if len(value) > 256 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// query unescape tag key
key, err := url.QueryUnescape(key)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
// query unescape tag value
value, err = url.QueryUnescape(value)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
// check tag key to be valid
if !isValidTagComponent(key) {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// check tag value to be valid
if !isValidTagComponent(value) {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// duplicate keys are not allowed: return invalid url encoding err
_, ok := tagSet[key]
if ok {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
tagSet[key] = value
tagging[p[0]] = p[1]
}
return tagSet, nil
}
var validTagComponent = regexp.MustCompile(`^[a-zA-Z0-9:/_.\-+ ]+$`)
// isValidTagComponent matches strings which contain letters, decimal digits,
// and special chars: '/', '_', '-', '+', '.', ' ' (space)
func isValidTagComponent(str string) bool {
if str == "" {
return true
}
return validTagComponent.Match([]byte(str))
return tagging, nil
}
func GetMultipartMD5(parts []types.CompletedPart) string {
@@ -341,65 +269,3 @@ func (f *FileSectionReadCloser) Read(p []byte) (int, error) {
func (f *FileSectionReadCloser) Close() error {
return f.F.Close()
}
// MoveFile moves a file from source to destination.
func MoveFile(source, destination string, perm os.FileMode) error {
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
err := os.Rename(source, destination)
if err == nil || !errors.Is(err, syscall.EXDEV) {
return err
}
// Rename can fail if the source and destination are not on the same
// filesystem. The fallback is to copy the file and then remove the source.
// We need to be careful that the desination does not exist before copying
// to prevent any other simultaneous writes to the file.
sourceFile, err := os.Open(source)
if err != nil {
return fmt.Errorf("open source: %w", err)
}
defer sourceFile.Close()
var destFile *os.File
for {
destFile, err = os.OpenFile(destination, os.O_CREATE|os.O_EXCL|os.O_WRONLY, perm)
if err != nil {
if errors.Is(err, fs.ErrExist) {
if removeErr := os.Remove(destination); removeErr != nil {
return fmt.Errorf("remove existing destination: %w", removeErr)
}
continue
}
return fmt.Errorf("create destination: %w", err)
}
break
}
defer destFile.Close()
_, err = io.Copy(destFile, sourceFile)
if err != nil {
return fmt.Errorf("copy data: %w", err)
}
err = os.Remove(source)
if err != nil {
return fmt.Errorf("remove source: %w", err)
}
return nil
}
// GenerateEtag generates a new quoted etag from the provided hash.Hash
func GenerateEtag(h hash.Hash) string {
dataSum := h.Sum(nil)
return fmt.Sprintf("\"%s\"", hex.EncodeToString(dataSum[:]))
}
// AreEtagsSame compares 2 etags by ignoring quotes
func AreEtagsSame(e1, e2 string) bool {
return strings.Trim(e1, `"`) == strings.Trim(e2, `"`)
}

516
backend/plugin/plugin.go Normal file
View File

@@ -0,0 +1,516 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package vgwplugin
import (
"bufio"
"context"
"fmt"
"plugin"
"reflect"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
// The plugin backend is used to dynamically load a Go plugin at runtime.
// It loads the plugin and calls the InitPlugin function to initialize it.
// A config string option is passed to init the plugin, it is expected that the
// plugin will handle its own configuration and initialization from this.
// If the plugin cannot be loaded or initialized, it returns an error.
// The InitPlugin function should be defined in the plugin and should have
// the signature func(configfile string) (version int, err error).
// The plugin should also implement the backend.Backend interface functions.
// However, the plugin does not need to implement all functions of the
// backend.Backend interface. It can implement only the functions it needs.
// Any non-implemented functions will return an error indicating that
// the function is not implemented.
// The plugin file should be compiled with the same Go version as the
// application using it. The plugin file should be built with the
// -buildmode=plugin flag.
// Example: go build -buildmode=plugin -o myplugin.so myplugin.go
// See the following for caveats and details:
// https://pkg.go.dev/plugin#hdr-Warnings
// PluginBackend implements the backend.Backend interface using Go plugins.
type PluginBackend struct {
p *plugin.Plugin
}
// NewPluginBackend creates a new PluginBackend. The path parameter should
// point to the compiled plugin file (e.g., .so file).
func NewPluginBackend(path, config string) (*PluginBackend, error) {
p, err := plugin.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open plugin: %w", err)
}
initSymbol, err := p.Lookup("InitPlugin")
if err != nil {
return nil, fmt.Errorf("failed to lookup InitPlugin symbol: %w", err)
}
initFunc, ok := initSymbol.(func(string) (int, error))
if !ok {
return nil, fmt.Errorf("InitPlugin symbol is not a func() (int, error)")
}
version, err := initFunc(config)
if err != nil {
return nil, fmt.Errorf("InitPlugin failed: %w", err)
}
if version != backend.InterfaceVersion {
return nil, fmt.Errorf("plugin interface version mismatch: gateway %v, plugin %v",
backend.InterfaceVersion, version)
}
return &PluginBackend{p: p}, nil
}
func (p *PluginBackend) callPluginFunc(name string, args []any) ([]reflect.Value, error) {
symbol, err := p.p.Lookup(name)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
symbolValue := reflect.ValueOf(symbol)
if symbolValue.Kind() != reflect.Func {
return nil, fmt.Errorf("symbol %s is not a function", name)
}
numIn := symbolValue.Type().NumIn()
if len(args) != numIn {
return nil, fmt.Errorf("incorrect number of arguments for function %s, expected %d, got %d", name, numIn, len(args))
}
in := make([]reflect.Value, len(args))
for i := range args {
in[i] = reflect.ValueOf(args[i])
}
return symbolValue.Call(in), nil
}
func (p *PluginBackend) String() string { return "Plugin Gateway" }
func (p *PluginBackend) Shutdown() {}
func (p *PluginBackend) ListBuckets(ctx context.Context, input s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
results, err := p.callPluginFunc("ListBuckets", []any{ctx, input})
if err != nil {
return s3response.ListAllMyBucketsResult{}, err
}
return results[0].Interface().(s3response.ListAllMyBucketsResult), convertError(results[1])
}
func (p *PluginBackend) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
results, err := p.callPluginFunc("HeadBucket", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.HeadBucketOutput), convertError(results[1])
}
func (p *PluginBackend) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketAcl", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, defaultACL []byte) error {
_, err := p.callPluginFunc("CreateBucket", []any{ctx, input, defaultACL})
return err
}
func (p *PluginBackend) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
_, err := p.callPluginFunc("PutBucketAcl", []any{ctx, bucket, data})
return err
}
func (p *PluginBackend) DeleteBucket(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucket", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketVersioning(ctx context.Context, bucket string, status types.BucketVersioningStatus) error {
_, err := p.callPluginFunc("PutBucketVersioning", []any{ctx, bucket, status})
return err
}
func (p *PluginBackend) GetBucketVersioning(ctx context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
results, err := p.callPluginFunc("GetBucketVersioning", []any{ctx, bucket})
if err != nil {
return s3response.GetBucketVersioningOutput{}, err
}
return results[0].Interface().(s3response.GetBucketVersioningOutput), convertError(results[1])
}
func (p *PluginBackend) PutBucketPolicy(ctx context.Context, bucket string, policy []byte) error {
_, err := p.callPluginFunc("PutBucketPolicy", []any{ctx, bucket, policy})
return err
}
func (p *PluginBackend) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketPolicy", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketPolicy(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketPolicy", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketOwnershipControls(ctx context.Context, bucket string, ownership types.ObjectOwnership) error {
_, err := p.callPluginFunc("PutBucketOwnershipControls", []any{ctx, bucket, ownership})
return err
}
func (p *PluginBackend) GetBucketOwnershipControls(ctx context.Context, bucket string) (types.ObjectOwnership, error) {
results, err := p.callPluginFunc("GetBucketOwnershipControls", []any{ctx, bucket})
if err != nil {
return "", err
}
return results[0].Interface().(types.ObjectOwnership), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketOwnershipControls(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketOwnershipControls", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketCors(ctx context.Context, data []byte) error {
_, err := p.callPluginFunc("PutBucketCors", []any{ctx, data})
return err
}
func (p *PluginBackend) GetBucketCors(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketCors", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketCors(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketCors", []any{ctx, bucket})
return err
}
func (p *PluginBackend) CreateMultipartUpload(ctx context.Context, input s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
results, err := p.callPluginFunc("CreateMultipartUpload", []any{ctx, input})
if err != nil {
return s3response.InitiateMultipartUploadResult{}, err
}
return results[0].Interface().(s3response.InitiateMultipartUploadResult), convertError(results[1])
}
func (p *PluginBackend) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
results, err := p.callPluginFunc("CompleteMultipartUpload", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.CompleteMultipartUploadOutput), convertError(results[1])
}
func (p *PluginBackend) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
_, err := p.callPluginFunc("AbortMultipartUpload", []any{ctx, input})
return err
}
func (p *PluginBackend) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
results, err := p.callPluginFunc("ListMultipartUploads", []any{ctx, input})
if err != nil {
return s3response.ListMultipartUploadsResult{}, err
}
return results[0].Interface().(s3response.ListMultipartUploadsResult), convertError(results[1])
}
func (p *PluginBackend) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
results, err := p.callPluginFunc("ListParts", []any{ctx, input})
if err != nil {
return s3response.ListPartsResult{}, err
}
return results[0].Interface().(s3response.ListPartsResult), convertError(results[1])
}
func (p *PluginBackend) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
results, err := p.callPluginFunc("UploadPart", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.UploadPartOutput), convertError(results[1])
}
func (p *PluginBackend) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
results, err := p.callPluginFunc("UploadPartCopy", []any{ctx, input})
if err != nil {
return s3response.CopyPartResult{}, err
}
return results[0].Interface().(s3response.CopyPartResult), convertError(results[1])
}
func (p *PluginBackend) PutObject(ctx context.Context, input s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
results, err := p.callPluginFunc("PutObject", []any{ctx, input})
if err != nil {
return s3response.PutObjectOutput{}, err
}
return results[0].Interface().(s3response.PutObjectOutput), convertError(results[1])
}
func (p *PluginBackend) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
results, err := p.callPluginFunc("HeadObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.HeadObjectOutput), convertError(results[1])
}
func (p *PluginBackend) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
results, err := p.callPluginFunc("GetObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.GetObjectOutput), convertError(results[1])
}
func (p *PluginBackend) GetObjectAcl(ctx context.Context, input *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
results, err := p.callPluginFunc("GetObjectAcl", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.GetObjectAclOutput), convertError(results[1])
}
func (p *PluginBackend) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
results, err := p.callPluginFunc("GetObjectAttributes", []any{ctx, input})
if err != nil {
return s3response.GetObjectAttributesResponse{}, err
}
return results[0].Interface().(s3response.GetObjectAttributesResponse), convertError(results[1])
}
func (p *PluginBackend) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
results, err := p.callPluginFunc("CopyObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.CopyObjectOutput), convertError(results[1])
}
func (p *PluginBackend) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
results, err := p.callPluginFunc("ListObjects", []any{ctx, input})
if err != nil {
return s3response.ListObjectsResult{}, err
}
return results[0].Interface().(s3response.ListObjectsResult), convertError(results[1])
}
func (p *PluginBackend) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
results, err := p.callPluginFunc("ListObjectsV2", []any{ctx, input})
if err != nil {
return s3response.ListObjectsV2Result{}, err
}
return results[0].Interface().(s3response.ListObjectsV2Result), convertError(results[1])
}
func (p *PluginBackend) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
results, err := p.callPluginFunc("DeleteObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.DeleteObjectOutput), convertError(results[1])
}
func (p *PluginBackend) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
results, err := p.callPluginFunc("DeleteObjects", []any{ctx, input})
if err != nil {
return s3response.DeleteResult{}, err
}
return results[0].Interface().(s3response.DeleteResult), convertError(results[1])
}
func (p *PluginBackend) PutObjectAcl(ctx context.Context, input *s3.PutObjectAclInput) error {
_, err := p.callPluginFunc("PutObjectAcl", []any{ctx, input})
return err
}
func (p *PluginBackend) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
results, err := p.callPluginFunc("ListObjectVersions", []any{ctx, input})
if err != nil {
return s3response.ListVersionsResult{}, err
}
return results[0].Interface().(s3response.ListVersionsResult), convertError(results[1])
}
func (p *PluginBackend) RestoreObject(ctx context.Context, input *s3.RestoreObjectInput) error {
_, err := p.callPluginFunc("RestoreObject", []any{ctx, input})
return err
}
func (p *PluginBackend) SelectObjectContent(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer) {
results, err := p.callPluginFunc("SelectObjectContent", []any{ctx, input})
if err != nil {
return func(w *bufio.Writer) {}
}
return results[0].Interface().(func(w *bufio.Writer))
}
func (p *PluginBackend) GetBucketTagging(ctx context.Context, bucket string) (map[string]string, error) {
results, err := p.callPluginFunc("GetBucketTagging", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().(map[string]string), convertError(results[1])
}
func (p *PluginBackend) PutBucketTagging(ctx context.Context, bucket string, tags map[string]string) error {
_, err := p.callPluginFunc("PutBucketTagging", []any{ctx, bucket, tags})
return err
}
func (p *PluginBackend) DeleteBucketTagging(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketTagging", []any{ctx, bucket})
return err
}
func (p *PluginBackend) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
results, err := p.callPluginFunc("GetObjectTagging", []any{ctx, bucket, object})
if err != nil {
return nil, err
}
return results[0].Interface().(map[string]string), convertError(results[1])
}
func (p *PluginBackend) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
_, err := p.callPluginFunc("PutObjectTagging", []any{ctx, bucket, object, tags})
return err
}
func (p *PluginBackend) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
_, err := p.callPluginFunc("DeleteObjectTagging", []any{ctx, bucket, object})
return err
}
func (p *PluginBackend) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
_, err := p.callPluginFunc("PutObjectLockConfiguration", []any{ctx, bucket, config})
return err
}
func (p *PluginBackend) GetObjectLockConfiguration(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetObjectLockConfiguration", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
_, err := p.callPluginFunc("PutObjectRetention", []any{ctx, bucket, object, versionId, bypass, retention})
return err
}
func (p *PluginBackend) GetObjectRetention(ctx context.Context, bucket, object, versionId string) ([]byte, error) {
results, err := p.callPluginFunc("GetObjectRetention", []any{ctx, bucket, object, versionId})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) PutObjectLegalHold(ctx context.Context, bucket, object, versionId string, status bool) error {
_, err := p.callPluginFunc("PutObjectLegalHold", []any{ctx, bucket, object, versionId, status})
return err
}
func (p *PluginBackend) GetObjectLegalHold(ctx context.Context, bucket, object, versionId string) (*bool, error) {
results, err := p.callPluginFunc("GetObjectLegalHold", []any{ctx, bucket, object, versionId})
if err != nil {
return nil, err
}
val := results[0].Interface()
if val == nil {
return nil, convertError(results[1])
}
return val.(*bool), convertError(results[1])
}
func (p *PluginBackend) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
_, err := p.callPluginFunc("ChangeBucketOwner", []any{ctx, bucket, acl})
return err
}
func (p *PluginBackend) ListBucketsAndOwners(ctx context.Context) ([]s3response.Bucket, error) {
results, err := p.callPluginFunc("ListBucketsAndOwners", []any{ctx})
if err != nil {
return nil, err
}
return results[0].Interface().([]s3response.Bucket), convertError(results[1])
}
func convertError(result reflect.Value) error {
if result.IsNil() {
return nil
}
err, ok := result.Interface().(error)
if !ok {
return fmt.Errorf("expected error, got %T", result.Interface())
}
return err
}
var _ backend.Backend = &PluginBackend{}

View File

@@ -18,6 +18,7 @@ import (
"context"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
@@ -72,11 +73,6 @@ type Posix struct {
// newDirPerm is the permission to set on newly created directories
newDirPerm fs.FileMode
// forceNoTmpFile is a flag to disable the use of O_TMPFILE even
// if the filesystem supports it. This is needed for cases where
// there are different filesystems mounted below the bucket level.
forceNoTmpFile bool
}
var _ backend.Backend = &Posix{}
@@ -93,7 +89,7 @@ const (
contentDispHdr = "content-disposition"
cacheCtrlHdr = "cache-control"
expiresHdr = "expires"
emptyMD5 = "\"d41d8cd98f00b204e9800998ecf8427e\""
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
aclkey = "acl"
ownershipkey = "ownership"
etagkey = "etag"
@@ -112,23 +108,13 @@ const (
skipFalloc = false
)
// PosixOpts are the options for the Posix backend
type PosixOpts struct {
// ChownUID sets the UID of the object to the UID of the user on PUT
ChownUID bool
// ChownGID sets the GID of the object to the GID of the user on PUT
ChownGID bool
// BucketLinks enables symlinks to directories to be treated as buckets
BucketLinks bool
//VersioningDir sets the version directory to enable object versioning
ChownUID bool
ChownGID bool
BucketLinks bool
VersioningDir string
// NewDirPerm specifies the permission to set on newly created directories
NewDirPerm fs.FileMode
// SideCarDir sets the directory to store sidecar metadata
SideCarDir string
// ForceNoTmpFile disables the use of O_TMPFILE even if the filesystem
// supports it
ForceNoTmpFile bool
NewDirPerm fs.FileMode
SideCarDir string
}
func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, error) {
@@ -178,17 +164,16 @@ func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, erro
}
return &Posix{
meta: meta,
rootfd: f,
rootdir: rootdir,
euid: os.Geteuid(),
egid: os.Getegid(),
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
bucketlinks: opts.BucketLinks,
versioningDir: verioningdirAbs,
newDirPerm: opts.NewDirPerm,
forceNoTmpFile: opts.ForceNoTmpFile,
meta: meta,
rootfd: f,
rootdir: rootdir,
euid: os.Geteuid(),
egid: os.Getegid(),
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
bucketlinks: opts.BucketLinks,
versioningDir: verioningdirAbs,
newDirPerm: opts.NewDirPerm,
}, nil
}
@@ -299,7 +284,7 @@ func (p *Posix) ListBuckets(_ context.Context, input s3response.ListBucketsInput
continue
}
aclJSON, err := p.meta.RetrieveAttribute(nil, fi.Name(), "", aclkey)
aclTag, err := p.meta.RetrieveAttribute(nil, fi.Name(), "", aclkey)
if errors.Is(err, meta.ErrNoSuchKey) {
// skip buckets without acl tag
continue
@@ -308,9 +293,10 @@ func (p *Posix) ListBuckets(_ context.Context, input s3response.ListBucketsInput
return s3response.ListAllMyBucketsResult{}, fmt.Errorf("get acl tag: %w", err)
}
acl, err := auth.ParseACL(aclJSON)
var acl auth.ACL
err = json.Unmarshal(aclTag, &acl)
if err != nil {
return s3response.ListAllMyBucketsResult{}, err
return s3response.ListAllMyBucketsResult{}, fmt.Errorf("parse acl tag: %w", err)
}
if acl.Owner == input.Owner {
@@ -369,10 +355,9 @@ func (p *Posix) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, a
if err != nil {
return fmt.Errorf("get bucket acl: %w", err)
}
acl, err := auth.ParseACL(aclJSON)
if err != nil {
return err
var acl auth.ACL
if err := json.Unmarshal(aclJSON, &acl); err != nil {
return fmt.Errorf("unmarshal acl: %w", err)
}
if acl.Owner == acct.Access {
@@ -706,8 +691,7 @@ func (p *Posix) createObjVersion(bucket, key string, size int64, acc auth.Accoun
versionBucketPath := filepath.Join(p.versioningDir, bucket)
versioningKey := filepath.Join(genObjVersionKey(key), versionId)
versionTmpPath := filepath.Join(versionBucketPath, metaTmpDir)
f, err := p.openTmpFile(versionTmpPath, versionBucketPath, versioningKey,
size, acc, doFalloc, p.forceNoTmpFile)
f, err := p.openTmpFile(versionTmpPath, versionBucketPath, versioningKey, size, acc, doFalloc)
if err != nil {
return versionPath, err
}
@@ -821,7 +805,7 @@ func (p *Posix) isObjDeleteMarker(bucket, object string) (bool, error) {
// delete markers from the versioning directory and returns
func (p *Posix) fileToObjVersions(bucket string) backend.GetVersionsFunc {
return func(path, versionIdMarker string, pastVersionIdMarker *bool, availableObjCount int, d fs.DirEntry) (*backend.ObjVersionFuncResult, error) {
var objects []s3response.ObjectVersion
var objects []types.ObjectVersion
var delMarkers []types.DeleteMarkerEntry
// if the number of available objects is 0, return truncated response
if availableObjCount <= 0 {
@@ -856,7 +840,7 @@ func (p *Posix) fileToObjVersions(bucket string) backend.GetVersionsFunc {
size := int64(0)
versionId := "null"
objects = append(objects, s3response.ObjectVersion{
objects = append(objects, types.ObjectVersion{
ETag: &etag,
Key: &key,
LastModified: backend.GetTimePtr(fi.ModTime()),
@@ -924,7 +908,7 @@ func (p *Posix) fileToObjVersions(bucket string) backend.GetVersionsFunc {
return nil, fmt.Errorf("get checksum: %w", err)
}
objects = append(objects, s3response.ObjectVersion{
objects = append(objects, types.ObjectVersion{
ETag: &etag,
Key: &path,
LastModified: backend.GetTimePtr(fi.ModTime()),
@@ -977,7 +961,7 @@ func (p *Posix) fileToObjVersions(bucket string) backend.GetVersionsFunc {
// First find the null versionId object(if exists)
// before starting the object versions listing
var nullVersionIdObj *s3response.ObjectVersion
var nullVersionIdObj *types.ObjectVersion
var nullObjDelMarker *types.DeleteMarkerEntry
nf, err := os.Stat(filepath.Join(versionPath, nullVersionId))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
@@ -1015,7 +999,7 @@ func (p *Posix) fileToObjVersions(bucket string) backend.GetVersionsFunc {
return nil, fmt.Errorf("get checksum: %w", err)
}
nullVersionIdObj = &s3response.ObjectVersion{
nullVersionIdObj = &types.ObjectVersion{
ETag: &etag,
Key: &path,
LastModified: backend.GetTimePtr(nf.ModTime()),
@@ -1136,7 +1120,7 @@ func (p *Posix) fileToObjVersions(bucket string) backend.GetVersionsFunc {
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get checksum: %w", err)
}
objects = append(objects, s3response.ObjectVersion{
objects = append(objects, types.ObjectVersion{
ETag: &etag,
Key: &path,
LastModified: backend.GetTimePtr(f.ModTime()),
@@ -1372,25 +1356,23 @@ func getPartChecksum(algo types.ChecksumAlgorithm, part types.CompletedPart) str
}
}
func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
acct, ok := ctx.Value("account").(auth.Account)
if !ok {
acct = auth.Account{}
}
var res s3response.CompleteMultipartUploadResult
if input.Bucket == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidBucketName)
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
if input.Key == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if input.UploadId == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchUpload)
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if input.MultipartUpload == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidRequest)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
bucket := *input.Bucket
@@ -1400,22 +1382,22 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return res, "", fmt.Errorf("stat bucket: %w", err)
return nil, fmt.Errorf("stat bucket: %w", err)
}
sum, err := p.checkUploadIDExists(bucket, object, uploadID)
if err != nil {
return res, "", err
return nil, err
}
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
checksums, err := p.retrieveChecksums(nil, bucket, filepath.Join(objdir, uploadID))
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get mp checksums: %w", err)
return nil, fmt.Errorf("get mp checksums: %w", err)
}
var checksumAlgorithm types.ChecksumAlgorithm
if checksums.Algorithm != "" {
@@ -1429,7 +1411,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
checksumType = types.ChecksumType("null")
}
return res, "", s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
return nil, s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
}
// check all parts ok
@@ -1440,13 +1422,13 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
var partNumber int32
for i, part := range parts {
if part.PartNumber == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPartOrder)
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
@@ -1455,14 +1437,14 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
fullPartPath := filepath.Join(bucket, partObjPath)
fi, err := os.Lstat(fullPartPath)
if err != nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
totalsize += fi.Size()
// all parts except the last need to be greater, thena
// the minimum allowed size (5 Mib)
if i < last && fi.Size() < backend.MinPartSize {
return res, "", s3err.GetAPIError(s3err.ErrEntityTooSmall)
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
b, err := p.meta.RetrieveAttribute(nil, bucket, partObjPath, etagkey)
@@ -1470,24 +1452,24 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
if err != nil {
etag = ""
}
if parts[i].ETag == nil || !backend.AreEtagsSame(etag, *parts[i].ETag) {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
if parts[i].ETag == nil || etag != *parts[i].ETag {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
partChecksum, err := p.retrieveChecksums(nil, bucket, partObjPath)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get part checksum: %w", err)
return nil, fmt.Errorf("get part checksum: %w", err)
}
// If checksum has been provided on mp initalization
err = validatePartChecksum(partChecksum, part)
if err != nil {
return res, "", err
return nil, err
}
}
if input.MpuObjectSize != nil && totalsize != *input.MpuObjectSize {
return res, "", s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
return nil, s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
}
var hashRdr *utils.HashReader
@@ -1496,22 +1478,22 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
case types.ChecksumTypeFullObject:
hashRdr, err = utils.NewHashReader(nil, "", utils.HashType(strings.ToLower(string(checksumAlgorithm))))
if err != nil {
return res, "", fmt.Errorf("initialize hash reader: %w", err)
return nil, fmt.Errorf("initialize hash reader: %w", err)
}
case types.ChecksumTypeComposite:
compositeChecksumRdr, err = utils.NewCompositeChecksumReader(utils.HashType(strings.ToLower(string(checksumAlgorithm))))
if err != nil {
return res, "", fmt.Errorf("initialize composite checksum reader: %w", err)
return nil, fmt.Errorf("initialize composite checksum reader: %w", err)
}
}
f, err := p.openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object,
totalsize, acct, skipFalloc, p.forceNoTmpFile)
totalsize, acct, skipFalloc)
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return res, "", s3err.GetAPIError(s3err.ErrQuotaExceeded)
return nil, s3err.GetAPIError(s3err.ErrQuotaExceeded)
}
return res, "", fmt.Errorf("open temp file: %w", err)
return nil, fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
@@ -1520,7 +1502,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
fullPartPath := filepath.Join(bucket, partObjPath)
pf, err := os.Open(fullPartPath)
if err != nil {
return res, "", fmt.Errorf("open part %v: %v", *part.PartNumber, err)
return nil, fmt.Errorf("open part %v: %v", *part.PartNumber, err)
}
var rdr io.Reader = pf
@@ -1530,7 +1512,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
} else if checksums.Type == types.ChecksumTypeComposite {
err := compositeChecksumRdr.Process(getPartChecksum(checksumAlgorithm, part))
if err != nil {
return res, "", fmt.Errorf("process %v part checksum: %w", *part.PartNumber, err)
return nil, fmt.Errorf("process %v part checksum: %w", *part.PartNumber, err)
}
}
@@ -1538,9 +1520,9 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
pf.Close()
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return res, "", s3err.GetAPIError(s3err.ErrQuotaExceeded)
return nil, s3err.GetAPIError(s3err.ErrQuotaExceeded)
}
return res, "", fmt.Errorf("copy part %v: %v", part.PartNumber, err)
return nil, fmt.Errorf("copy part %v: %v", part.PartNumber, err)
}
}
@@ -1550,7 +1532,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
objMeta := p.loadObjectMetaData(bucket, upiddir, nil, userMetaData)
err = p.storeObjectMetadata(f.File(), bucket, object, objMeta)
if err != nil {
return res, "", err
return nil, err
}
objname := filepath.Join(bucket, object)
@@ -1559,13 +1541,13 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
uid, gid, doChown := p.getChownIDs(acct)
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {
return res, "", err
return nil, err
}
}
vStatus, err := p.getBucketVersioningStatus(ctx, bucket)
if err != nil {
return res, "", err
return nil, err
}
vEnabled := p.isBucketVersioningEnabled(vStatus)
@@ -1575,7 +1557,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
if p.versioningEnabled() && vEnabled && err == nil && !d.IsDir() {
_, err := p.createObjVersion(bucket, object, d.Size(), acct)
if err != nil {
return res, "", fmt.Errorf("create object version: %w", err)
return nil, fmt.Errorf("create object version: %w", err)
}
}
@@ -1586,38 +1568,38 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
err := p.meta.StoreAttribute(f.File(), bucket, object, versionIdKey, []byte(versionID))
if err != nil {
return res, "", fmt.Errorf("set versionId attr: %w", err)
return nil, fmt.Errorf("set versionId attr: %w", err)
}
}
for k, v := range userMetaData {
err = p.meta.StoreAttribute(f.File(), bucket, object, fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
if err != nil {
return res, "", fmt.Errorf("set user attr %q: %w", k, err)
return nil, fmt.Errorf("set user attr %q: %w", k, err)
}
}
// load and set tagging
tagging, err := p.meta.RetrieveAttribute(nil, bucket, upiddir, tagHdr)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object tagging: %w", err)
return nil, fmt.Errorf("get object tagging: %w", err)
}
if err == nil {
err := p.meta.StoreAttribute(f.File(), bucket, object, tagHdr, tagging)
if err != nil {
return res, "", fmt.Errorf("set object tagging: %w", err)
return nil, fmt.Errorf("set object tagging: %w", err)
}
}
// load and set legal hold
lHold, err := p.meta.RetrieveAttribute(nil, bucket, upiddir, objectLegalHoldKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object legal hold: %w", err)
return nil, fmt.Errorf("get object legal hold: %w", err)
}
if err == nil {
err := p.meta.StoreAttribute(f.File(), bucket, object, objectLegalHoldKey, lHold)
if err != nil {
return res, "", fmt.Errorf("set object legal hold: %w", err)
return nil, fmt.Errorf("set object legal hold: %w", err)
}
}
@@ -1645,50 +1627,50 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
switch checksumAlgorithm {
case types.ChecksumAlgorithmCrc32:
if input.ChecksumCRC32 != nil && *input.ChecksumCRC32 != sum {
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
return nil, s3err.GetChecksumBadDigestErr(checksumAlgorithm)
}
checksum.CRC32 = &sum
crc32 = &sum
case types.ChecksumAlgorithmCrc32c:
if input.ChecksumCRC32C != nil && *input.ChecksumCRC32C != sum {
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
return nil, s3err.GetChecksumBadDigestErr(checksumAlgorithm)
}
checksum.CRC32C = &sum
crc32c = &sum
case types.ChecksumAlgorithmSha1:
if input.ChecksumSHA1 != nil && *input.ChecksumSHA1 != sum {
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
return nil, s3err.GetChecksumBadDigestErr(checksumAlgorithm)
}
checksum.SHA1 = &sum
sha1 = &sum
case types.ChecksumAlgorithmSha256:
if input.ChecksumSHA256 != nil && *input.ChecksumSHA256 != sum {
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
return nil, s3err.GetChecksumBadDigestErr(checksumAlgorithm)
}
checksum.SHA256 = &sum
sha256 = &sum
case types.ChecksumAlgorithmCrc64nvme:
if input.ChecksumCRC64NVME != nil && *input.ChecksumCRC64NVME != sum {
return res, "", s3err.GetChecksumBadDigestErr(checksumAlgorithm)
return nil, s3err.GetChecksumBadDigestErr(checksumAlgorithm)
}
checksum.CRC64NVME = &sum
crc64nvme = &sum
}
err := p.storeChecksums(f.File(), bucket, object, checksum)
if err != nil {
return res, "", fmt.Errorf("store object checksum: %w", err)
return nil, fmt.Errorf("store object checksum: %w", err)
}
}
// load and set retention
ret, err := p.meta.RetrieveAttribute(nil, bucket, upiddir, objectRetentionKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object retention: %w", err)
return nil, fmt.Errorf("get object retention: %w", err)
}
if err == nil {
err := p.meta.StoreAttribute(f.File(), bucket, object, objectRetentionKey, ret)
if err != nil {
return res, "", fmt.Errorf("set object retention: %w", err)
return nil, fmt.Errorf("set object retention: %w", err)
}
}
@@ -1697,12 +1679,12 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
err = p.meta.StoreAttribute(f.File(), bucket, object, etagkey, []byte(s3MD5))
if err != nil {
return res, "", fmt.Errorf("set etag attr: %w", err)
return nil, fmt.Errorf("set etag attr: %w", err)
}
err = f.link()
if err != nil {
return res, "", fmt.Errorf("link object in namespace: %w", err)
return nil, fmt.Errorf("link object in namespace: %w", err)
}
// cleanup tmp dirs
@@ -1711,17 +1693,18 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
// for same object name outstanding, this will fail if there are
os.Remove(filepath.Join(bucket, objdir))
return s3response.CompleteMultipartUploadResult{
return &s3.CompleteMultipartUploadOutput{
Bucket: &bucket,
ETag: &s3MD5,
Key: &object,
VersionId: &versionID,
ChecksumCRC32: crc32,
ChecksumCRC32C: crc32c,
ChecksumSHA1: sha1,
ChecksumSHA256: sha256,
ChecksumCRC64NVME: crc64nvme,
ChecksumType: &checksums.Type,
}, versionID, nil
ChecksumType: checksums.Type,
}, nil
}
func validatePartChecksum(checksum s3response.Checksum, part types.CompletedPart) error {
@@ -2200,12 +2183,6 @@ func (p *Posix) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3resp
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return lpr, fmt.Errorf("get mp checksum: %w", err)
}
if checksum.Algorithm == "" {
checksum.Algorithm = types.ChecksumAlgorithm("null")
}
if checksum.Type == "" {
checksum.Type = types.ChecksumType("null")
}
parts := make([]s3response.Part, 0, len(ents))
for i, e := range ents {
@@ -2339,7 +2316,7 @@ func (p *Posix) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.
partPath := filepath.Join(mpPath, fmt.Sprintf("%v", *part))
f, err := p.openTmpFile(filepath.Join(bucket, objdir),
bucket, partPath, length, acct, doFalloc, p.forceNoTmpFile)
bucket, partPath, length, acct, doFalloc)
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return nil, s3err.GetAPIError(s3err.ErrQuotaExceeded)
@@ -2410,7 +2387,8 @@ func (p *Posix) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.
return nil, fmt.Errorf("write part data: %w", err)
}
etag := backend.GenerateEtag(hash)
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum)
err = p.meta.StoreAttribute(f.File(), bucket, partPath, etagkey, []byte(etag))
if err != nil {
return nil, fmt.Errorf("set etag attr: %w", err)
@@ -2558,7 +2536,7 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
}
f, err := p.openTmpFile(filepath.Join(*upi.Bucket, objdir),
*upi.Bucket, partPath, length, acct, doFalloc, p.forceNoTmpFile)
*upi.Bucket, partPath, length, acct, doFalloc)
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrQuotaExceeded)
@@ -2649,7 +2627,8 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
}
}
etag := backend.GenerateEtag(hash)
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum)
err = p.meta.StoreAttribute(f.File(), *upi.Bucket, partPath, etagkey, []byte(etag))
if err != nil {
return s3response.CopyPartResult{}, fmt.Errorf("set etag attr: %w", err)
@@ -2799,7 +2778,7 @@ func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3
}
f, err := p.openTmpFile(filepath.Join(*po.Bucket, metaTmpDir),
*po.Bucket, *po.Key, contentLength, acct, doFalloc, p.forceNoTmpFile)
*po.Bucket, *po.Key, contentLength, acct, doFalloc)
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrQuotaExceeded)
@@ -2858,7 +2837,8 @@ func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3
}
}
etag := backend.GenerateEtag(hash)
dataSum := hash.Sum(nil)
etag := fmt.Sprintf("\"%v\"", hex.EncodeToString(dataSum[:]))
// if the versioning is enabled, generate a new versionID for the object
var versionID string
@@ -3151,9 +3131,7 @@ func (p *Posix) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (
acct = auth.Account{}
}
f, err := p.openTmpFile(filepath.Join(bucket, metaTmpDir),
bucket, object, srcObjVersion.Size(), acct, doFalloc,
p.forceNoTmpFile)
f, err := p.openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, srcObjVersion.Size(), acct, doFalloc)
if err != nil {
return nil, fmt.Errorf("open tmp file: %w", err)
}
@@ -3367,6 +3345,9 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
if input.Key == nil {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if input.Range == nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidRange)
}
var versionId string
if input.VersionId != nil {
versionId = *input.VersionId
@@ -3449,7 +3430,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetO
}
objSize := fi.Size()
startOffset, length, isValid, err := backend.ParseObjectRange(objSize, *input.Range)
startOffset, length, isValid, err := backend.ParseGetObjectRange(objSize, *input.Range)
if err != nil {
return nil, err
}
@@ -3635,34 +3616,20 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
return nil, fmt.Errorf("stat part: %w", err)
}
size := part.Size()
startOffset, length, isValid, err := backend.ParseObjectRange(size, getString(input.Range))
if err != nil {
return nil, err
}
var contentRange string
if isValid {
contentRange = fmt.Sprintf("bytes %v-%v/%v",
startOffset, startOffset+length-1, size)
}
b, err := p.meta.RetrieveAttribute(nil, bucket, partPath, etagkey)
etag := string(b)
if err != nil {
etag = ""
}
partsCount := int32(len(ents))
size := part.Size()
return &s3.HeadObjectOutput{
AcceptRanges: backend.GetPtrFromString("bytes"),
LastModified: backend.GetTimePtr(part.ModTime()),
ETag: &etag,
PartsCount: &partsCount,
ContentLength: &length,
ContentLength: &size,
StorageClass: types.StorageClassStandard,
ContentRange: &contentRange,
}, nil
}
@@ -3754,17 +3721,6 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
size := fi.Size()
startOffset, length, isValid, err := backend.ParseObjectRange(size, getString(input.Range))
if err != nil {
return nil, err
}
var contentRange string
if isValid {
contentRange = fmt.Sprintf("bytes %v-%v/%v",
startOffset, startOffset+length-1, size)
}
var objectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
status, err := p.GetObjectLegalHold(ctx, bucket, object, versionId)
if err == nil {
@@ -3799,9 +3755,7 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
}
return &s3.HeadObjectOutput{
ContentLength: &length,
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentRange: &contentRange,
ContentLength: &size,
ContentType: objMeta.ContentType,
ContentEncoding: objMeta.ContentEncoding,
ContentDisposition: objMeta.ContentDisposition,
@@ -3861,51 +3815,51 @@ func (p *Posix) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttr
}, nil
}
func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if input.Bucket == nil {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
if input.Key == nil {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
}
if input.CopySource == nil {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopySource)
return nil, s3err.GetAPIError(s3err.ErrInvalidCopySource)
}
if input.ExpectedBucketOwner == nil {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidRequest)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
srcBucket, srcObject, srcVersionId, err := backend.ParseCopySource(*input.CopySource)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
dstBucket := *input.Bucket
dstObject := *input.Key
_, err = os.Stat(srcBucket)
if errors.Is(err, fs.ErrNotExist) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("stat bucket: %w", err)
return nil, fmt.Errorf("stat bucket: %w", err)
}
vStatus, err := p.getBucketVersioningStatus(ctx, srcBucket)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
vEnabled := p.isBucketVersioningEnabled(vStatus)
if srcVersionId != "" {
if !p.versioningEnabled() || !vEnabled {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidVersionId)
return nil, s3err.GetAPIError(s3err.ErrInvalidVersionId)
}
vId, err := p.meta.RetrieveAttribute(nil, srcBucket, srcObject, versionIdKey)
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return s3response.CopyObjectOutput{}, fmt.Errorf("get src object version id: %w", err)
return nil, fmt.Errorf("get src object version id: %w", err)
}
if string(vId) != srcVersionId {
@@ -3916,37 +3870,37 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
_, err = os.Stat(dstBucket)
if errors.Is(err, fs.ErrNotExist) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("stat bucket: %w", err)
return nil, fmt.Errorf("stat bucket: %w", err)
}
objPath := joinPathWithTrailer(srcBucket, srcObject)
f, err := os.Open(objPath)
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
if p.versioningEnabled() && vEnabled {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchVersion)
return nil, s3err.GetAPIError(s3err.ErrNoSuchVersion)
}
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if errors.Is(err, syscall.ENAMETOOLONG) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrKeyTooLong)
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
}
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("open object: %w", err)
return nil, fmt.Errorf("open object: %w", err)
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("stat object: %w", err)
return nil, fmt.Errorf("stat object: %w", err)
}
if strings.HasSuffix(srcObject, "/") && !fi.IsDir() {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if !strings.HasSuffix(srcObject, "/") && fi.IsDir() {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
mdmap := make(map[string]string)
@@ -3964,7 +3918,7 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
dstObjdPath := joinPathWithTrailer(dstBucket, dstObject)
if dstObjdPath == objPath {
if input.MetadataDirective == types.MetadataDirectiveCopy {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
return &s3.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
}
// Delete the object metadata
@@ -3972,7 +3926,7 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
err := p.meta.DeleteAttribute(dstBucket, dstObject,
fmt.Sprintf("%v.%v", metaHdr, k))
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return s3response.CopyObjectOutput{}, fmt.Errorf("delete user metadata: %w", err)
return nil, fmt.Errorf("delete user metadata: %w", err)
}
}
// Store the new metadata
@@ -3980,13 +3934,13 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
err := p.meta.StoreAttribute(nil, dstBucket, dstObject,
fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("set user attr %q: %w", k, err)
return nil, fmt.Errorf("set user attr %q: %w", k, err)
}
}
checksums, err := p.retrieveChecksums(nil, dstBucket, dstObject)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return s3response.CopyObjectOutput{}, fmt.Errorf("get obj checksums: %w", err)
return nil, fmt.Errorf("get obj checksums: %w", err)
}
chType = checksums.Type
@@ -3997,18 +3951,18 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
if checksums.Algorithm != input.ChecksumAlgorithm {
f, err := os.Open(dstObjdPath)
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("open obj file: %w", err)
return nil, fmt.Errorf("open obj file: %w", err)
}
defer f.Close()
hashReader, err := utils.NewHashReader(f, "", utils.HashType(strings.ToLower(string(input.ChecksumAlgorithm))))
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("initialize hash reader: %w", err)
return nil, fmt.Errorf("initialize hash reader: %w", err)
}
_, err = hashReader.Read(nil)
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("read err: %w", err)
return nil, fmt.Errorf("read err: %w", err)
}
checksums = s3response.Checksum{}
@@ -4038,7 +3992,7 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
err = p.storeChecksums(f, dstBucket, dstObject, checksums)
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("store checksum: %w", err)
return nil, fmt.Errorf("store checksum: %w", err)
}
}
}
@@ -4047,7 +4001,7 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
etag = string(b)
vId, _ := p.meta.RetrieveAttribute(nil, dstBucket, dstObject, versionIdKey)
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
version = backend.GetPtrFromString(string(vId))
@@ -4062,18 +4016,18 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
Expires: input.Expires,
})
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
if input.TaggingDirective == types.TaggingDirectiveReplace {
tags, err := backend.ParseObjectTags(getString(input.Tagging))
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
err = p.PutObjectTagging(ctx, dstBucket, dstObject, tags)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
}
} else {
@@ -4081,7 +4035,7 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
checksums, err := p.retrieveChecksums(f, srcBucket, srcObject)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return s3response.CopyObjectOutput{}, fmt.Errorf("get obj checksum: %w", err)
return nil, fmt.Errorf("get obj checksum: %w", err)
}
// If any checksum algorithm is provided, replace, otherwise
@@ -4127,7 +4081,7 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
res, err := p.PutObject(ctx, putObjectInput)
if err != nil {
return s3response.CopyObjectOutput{}, err
return nil, err
}
// copy the source object tagging after the destination object
@@ -4135,12 +4089,12 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
if input.TaggingDirective == types.TaggingDirectiveCopy {
tagging, err := p.meta.RetrieveAttribute(nil, srcBucket, srcObject, tagHdr)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return s3response.CopyObjectOutput{}, fmt.Errorf("get source object tagging: %w", err)
return nil, fmt.Errorf("get source object tagging: %w", err)
}
if err == nil {
err := p.meta.StoreAttribute(nil, dstBucket, dstObject, tagHdr, tagging)
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("set destination object tagging: %w", err)
return nil, fmt.Errorf("set destination object tagging: %w", err)
}
}
}
@@ -4157,11 +4111,11 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
fi, err = os.Stat(dstObjdPath)
if err != nil {
return s3response.CopyObjectOutput{}, fmt.Errorf("stat dst object: %w", err)
return nil, fmt.Errorf("stat dst object: %w", err)
}
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
ETag: &etag,
LastModified: backend.GetTimePtr(fi.ModTime()),
ChecksumCRC32: crc32,
@@ -4233,13 +4187,12 @@ func (p *Posix) fileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
// All the objects in the bucket are owned by the bucket owner
if fetchOwner {
aclJSON, err := p.meta.RetrieveAttribute(nil, bucket, "", aclkey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
if err != nil {
return s3response.Object{}, fmt.Errorf("get bucket acl: %w", err)
}
acl, err := auth.ParseACL(aclJSON)
if err != nil {
return s3response.Object{}, err
var acl auth.ACL
if err := json.Unmarshal(aclJSON, &acl); err != nil {
return s3response.Object{}, fmt.Errorf("unmarshal acl: %w", err)
}
owner = &types.Owner{
@@ -4340,7 +4293,11 @@ func (p *Posix) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input)
marker := ""
if input.ContinuationToken != nil {
if input.StartAfter != nil {
marker = max(*input.StartAfter, *input.ContinuationToken)
if *input.StartAfter > *input.ContinuationToken {
marker = *input.StartAfter
} else {
marker = *input.ContinuationToken
}
} else {
marker = *input.ContinuationToken
}
@@ -4951,14 +4908,17 @@ func (p *Posix) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.
}
for _, fi := range fis {
aclJSON, err := p.meta.RetrieveAttribute(nil, fi.Name(), "", aclkey)
aclTag, err := p.meta.RetrieveAttribute(nil, fi.Name(), "", aclkey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return buckets, fmt.Errorf("get acl tag: %w", err)
}
acl, err := auth.ParseACL(aclJSON)
if err != nil {
return buckets, fmt.Errorf("parse acl tag: %w", err)
var acl auth.ACL
if len(aclTag) > 0 {
err = json.Unmarshal(aclTag, &acl)
if err != nil {
return buckets, fmt.Errorf("parse acl tag: %w", err)
}
}
buckets = append(buckets, s3response.Bucket{

View File

@@ -52,13 +52,9 @@ var (
defaultFilePerm uint32 = 0644
)
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool, forceNoTmpFile bool) (*tmpfile, error) {
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool) (*tmpfile, error) {
uid, gid, doChown := p.getChownIDs(acct)
if forceNoTmpFile {
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
}
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
// This can help reduce contention within the namespace (parent directories),
// etc. And will auto cleanup the inode on close if we never link this
@@ -72,7 +68,37 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
}
// O_TMPFILE not supported, try fallback
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
return nil, err
}
tmp := &tmpfile{
f: f,
bucket: bucket,
objname: obj,
size: size,
needsChown: doChown,
uid: uid,
gid: gid,
}
// falloc is best effort, its fine if this fails
if size > 0 && dofalloc {
tmp.falloc()
}
if doChown {
err := f.Chown(uid, gid)
if err != nil {
return nil, fmt.Errorf("set temp file ownership: %w", err)
}
}
return tmp, nil
}
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
@@ -106,46 +132,6 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
return tmp, nil
}
func (p *Posix) openMkTemp(dir, bucket, obj string, size int64, dofalloc bool, uid, gid int, doChown bool) (*tmpfile, error) {
err := backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, err
}
tmp := &tmpfile{
f: f,
bucket: bucket,
objname: obj,
size: size,
needsChown: doChown,
uid: uid,
gid: gid,
}
// falloc is best effort, its fine if this fails
if size > 0 && dofalloc {
tmp.falloc()
}
if doChown {
err := f.Chown(uid, gid)
if err != nil {
return nil, fmt.Errorf("set temp file ownership: %w", err)
}
}
return tmp, nil
}
func (tmp *tmpfile) falloc() error {
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
if err != nil {
@@ -236,9 +222,7 @@ func (tmp *tmpfile) fallbackLink() error {
objPath := filepath.Join(tmp.bucket, tmp.objname)
err = os.Rename(tempname, objPath)
if err != nil {
// rename only works for files within the same filesystem
// if this fails fallback to copy
return backend.MoveFile(tempname, objPath, fs.FileMode(defaultFilePerm))
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil

View File

@@ -38,7 +38,7 @@ type tmpfile struct {
size int64
}
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool, _ bool) (*tmpfile, error) {
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool) (*tmpfile, error) {
uid, gid, doChown := p.getChownIDs(acct)
// Create a temp file for upload while in progress (see link comments below).
@@ -80,17 +80,31 @@ func (tmp *tmpfile) link() error {
// this will no longer exist
defer os.Remove(tempname)
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
objPath := filepath.Join(tmp.bucket, tmp.objname)
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
// reset default file mode because CreateTemp uses 0600
tmp.f.Chmod(defaultFilePerm)
err := tmp.f.Close()
err = tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
return backend.MoveFile(tempname, objPath, defaultFilePerm)
err = os.Rename(tempname, objPath)
if err != nil {
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) Write(b []byte) (int, error) {

View File

@@ -36,7 +36,6 @@ func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
if s.endpoint != "" {
return s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = &s.endpoint
o.UsePathStyle = s.usePathStyle
}), nil
}

View File

@@ -15,9 +15,9 @@
package s3proxy
import (
"bytes"
"context"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
@@ -25,6 +25,7 @@ import (
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
@@ -39,12 +40,7 @@ import (
"github.com/versity/versitygw/s3response"
)
type metaPrefix string
const (
metaPrefixAcl metaPrefix = "vgw-meta-acl-"
metaPrefixPolicy metaPrefix = "vgw-meta-policy-"
)
const aclKey string = "versitygwAcl"
type S3Proxy struct {
backend.BackendUnsupported
@@ -55,52 +51,29 @@ type S3Proxy struct {
secret string
endpoint string
awsRegion string
metaBucket string
disableChecksum bool
sslSkipVerify bool
usePathStyle bool
debug bool
}
var _ backend.Backend = &S3Proxy{}
func NewWithClient(ctx context.Context, client *s3.Client, metaBucket string) (*S3Proxy, error) {
s := &S3Proxy{
metaBucket: metaBucket,
}
s.client = client
if s.metaBucket != "" && !s.bucketExists(ctx, s.metaBucket) {
return nil, fmt.Errorf("the provided meta bucket doesn't exist")
}
return s, s.validate(ctx)
}
func New(ctx context.Context, access, secret, endpoint, region, metaBucket string, disableChecksum, sslSkipVerify, usePathStyle, debug bool) (*S3Proxy, error) {
func New(access, secret, endpoint, region string, disableChecksum, sslSkipVerify, debug bool) (*S3Proxy, error) {
s := &S3Proxy{
access: access,
secret: secret,
endpoint: endpoint,
awsRegion: region,
metaBucket: metaBucket,
disableChecksum: disableChecksum,
sslSkipVerify: sslSkipVerify,
usePathStyle: usePathStyle,
debug: debug,
}
client, err := s.getClientWithCtx(ctx)
client, err := s.getClientWithCtx(context.Background())
if err != nil {
return nil, err
}
s.client = client
return s, s.validate(ctx)
}
func (s *S3Proxy) validate(ctx context.Context) error {
if s.metaBucket != "" && !s.bucketExists(ctx, s.metaBucket) {
return fmt.Errorf("the provided meta bucket doesn't exist")
}
return nil
return s, nil
}
func (s *S3Proxy) ListBuckets(ctx context.Context, input s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
@@ -157,29 +130,27 @@ func (s *S3Proxy) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
if input.GrantWriteACP != nil && *input.GrantWriteACP == "" {
input.GrantWriteACP = nil
}
if *input.Bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrBucketAlreadyOwnedByYou)
}
_, err := s.client.CreateBucket(ctx, input)
if err != nil {
return handleError(err)
}
// Store bucket default acl
if s.metaBucket != "" {
err = s.putMetaBucketObj(ctx, *input.Bucket, acl, metaPrefixAcl)
if err != nil {
return handleError(err)
}
}
var tagSet []types.Tag
tagSet = append(tagSet, types.Tag{
Key: backend.GetPtrFromString(aclKey),
Value: backend.GetPtrFromString(base64Encode(acl)),
})
return nil
_, err = s.client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: input.Bucket,
Tagging: &types.Tagging{
TagSet: tagSet,
},
})
return handleError(err)
}
func (s *S3Proxy) DeleteBucket(ctx context.Context, bucket string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.DeleteBucket(ctx, &s3.DeleteBucketInput{
Bucket: &bucket,
})
@@ -187,9 +158,6 @@ func (s *S3Proxy) DeleteBucket(ctx context.Context, bucket string) error {
}
func (s *S3Proxy) PutBucketOwnershipControls(ctx context.Context, bucket string, ownership types.ObjectOwnership) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.PutBucketOwnershipControls(ctx, &s3.PutBucketOwnershipControlsInput{
Bucket: &bucket,
OwnershipControls: &types.OwnershipControls{
@@ -204,9 +172,6 @@ func (s *S3Proxy) PutBucketOwnershipControls(ctx context.Context, bucket string,
}
func (s *S3Proxy) GetBucketOwnershipControls(ctx context.Context, bucket string) (types.ObjectOwnership, error) {
if bucket == s.metaBucket {
return "", s3err.GetAPIError(s3err.ErrAccessDenied)
}
var ownship types.ObjectOwnership
resp, err := s.client.GetBucketOwnershipControls(ctx, &s3.GetBucketOwnershipControlsInput{
Bucket: &bucket,
@@ -217,9 +182,6 @@ func (s *S3Proxy) GetBucketOwnershipControls(ctx context.Context, bucket string)
return resp.OwnershipControls.Rules[0].ObjectOwnership, nil
}
func (s *S3Proxy) DeleteBucketOwnershipControls(ctx context.Context, bucket string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.DeleteBucketOwnershipControls(ctx, &s3.DeleteBucketOwnershipControlsInput{
Bucket: &bucket,
})
@@ -227,9 +189,6 @@ func (s *S3Proxy) DeleteBucketOwnershipControls(ctx context.Context, bucket stri
}
func (s *S3Proxy) PutBucketVersioning(ctx context.Context, bucket string, status types.BucketVersioningStatus) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
Bucket: &bucket,
VersioningConfiguration: &types.VersioningConfiguration{
@@ -241,9 +200,6 @@ func (s *S3Proxy) PutBucketVersioning(ctx context.Context, bucket string, status
}
func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
if bucket == s.metaBucket {
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
out, err := s.client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
Bucket: &bucket,
})
@@ -255,9 +211,6 @@ func (s *S3Proxy) GetBucketVersioning(ctx context.Context, bucket string) (s3res
}
func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListVersionsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.Delimiter != nil && *input.Delimiter == "" {
input.Delimiter = nil
}
@@ -295,16 +248,13 @@ func (s *S3Proxy) ListObjectVersions(ctx context.Context, input *s3.ListObjectVe
NextVersionIdMarker: out.NextVersionIdMarker,
Prefix: out.Prefix,
VersionIdMarker: input.VersionIdMarker,
Versions: convertObjectVersions(out.Versions),
Versions: out.Versions,
}, nil
}
var defTime = time.Time{}
func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.CacheControl != nil && *input.CacheControl == "" {
input.CacheControl = nil
}
@@ -415,12 +365,7 @@ func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input s3response.Cr
}, nil
}
func (s *S3Proxy) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
var res s3response.CompleteMultipartUploadResult
if *input.Bucket == s.metaBucket {
return res, "", s3err.GetAPIError(s3err.ErrAccessDenied)
}
func (s *S3Proxy) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
if input.ChecksumCRC32 != nil && *input.ChecksumCRC32 == "" {
input.ChecksumCRC32 = nil
}
@@ -458,33 +403,11 @@ func (s *S3Proxy) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
input.SSECustomerKeyMD5 = nil
}
var versionid string
out, err := s.client.CompleteMultipartUpload(ctx, input)
if out != nil {
res = s3response.CompleteMultipartUploadResult{
Location: out.Location,
Bucket: out.Bucket,
Key: out.Key,
ETag: out.ETag,
ChecksumCRC32: out.ChecksumCRC32,
ChecksumCRC32C: out.ChecksumCRC32C,
ChecksumCRC64NVME: out.ChecksumCRC64NVME,
ChecksumSHA1: out.ChecksumSHA1,
ChecksumSHA256: out.ChecksumSHA256,
ChecksumType: &out.ChecksumType,
}
if out.VersionId != nil {
versionid = *out.VersionId
}
}
return res, versionid, handleError(err)
return out, handleError(err)
}
func (s *S3Proxy) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
if *input.Bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -496,9 +419,6 @@ func (s *S3Proxy) AbortMultipartUpload(ctx context.Context, input *s3.AbortMulti
}
func (s *S3Proxy) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListMultipartUploadsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.Delimiter != nil && *input.Delimiter == "" {
input.Delimiter = nil
}
@@ -567,9 +487,6 @@ func (s *S3Proxy) ListMultipartUploads(ctx context.Context, input *s3.ListMultip
}
func (s *S3Proxy) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -644,9 +561,6 @@ func (s *S3Proxy) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3re
}
func (s *S3Proxy) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ChecksumCRC32 != nil && *input.ChecksumCRC32 == "" {
input.ChecksumCRC32 = nil
}
@@ -687,9 +601,6 @@ func (s *S3Proxy) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s
}
func (s *S3Proxy) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.CopySourceIfMatch != nil && *input.CopySourceIfMatch == "" {
input.CopySourceIfMatch = nil
}
@@ -747,9 +658,6 @@ func (s *S3Proxy) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyIn
}
func (s *S3Proxy) PutObject(ctx context.Context, input s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.CacheControl != nil && *input.CacheControl == "" {
input.CacheControl = nil
}
@@ -904,9 +812,6 @@ func (s *S3Proxy) PutObject(ctx context.Context, input s3response.PutObjectInput
}
func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -964,9 +869,6 @@ func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
}
func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -1028,9 +930,6 @@ func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
}
func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
if *input.Bucket == s.metaBucket {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -1089,10 +988,7 @@ func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAt
}, handleError(err)
}
func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if input.CacheControl != nil && *input.CacheControl == "" {
input.CacheControl = nil
}
@@ -1227,39 +1123,10 @@ func (s *S3Proxy) CopyObject(ctx context.Context, input s3response.CopyObjectInp
StorageClass: input.StorageClass,
TaggingDirective: input.TaggingDirective,
})
if err != nil {
return s3response.CopyObjectOutput{}, handleError(err)
}
if out.CopyObjectResult == nil {
out.CopyObjectResult = &types.CopyObjectResult{}
}
return s3response.CopyObjectOutput{
BucketKeyEnabled: out.BucketKeyEnabled,
CopyObjectResult: &s3response.CopyObjectResult{
ChecksumCRC32: out.CopyObjectResult.ChecksumCRC32,
ChecksumCRC32C: out.CopyObjectResult.ChecksumCRC32C,
ChecksumCRC64NVME: out.CopyObjectResult.ChecksumCRC64NVME,
ChecksumSHA1: out.CopyObjectResult.ChecksumSHA1,
ChecksumSHA256: out.CopyObjectResult.ChecksumSHA256,
ChecksumType: out.CopyObjectResult.ChecksumType,
ETag: out.CopyObjectResult.ETag,
LastModified: out.CopyObjectResult.LastModified,
},
CopySourceVersionId: out.CopySourceVersionId,
Expiration: out.Expiration,
SSECustomerAlgorithm: out.SSECustomerAlgorithm,
SSECustomerKeyMD5: out.SSECustomerKeyMD5,
SSEKMSEncryptionContext: out.SSEKMSEncryptionContext,
SSEKMSKeyId: out.SSEKMSKeyId,
ServerSideEncryption: out.ServerSideEncryption,
VersionId: out.VersionId,
}, handleError(err)
return out, handleError(err)
}
func (s *S3Proxy) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.Delimiter != nil && *input.Delimiter == "" {
input.Delimiter = nil
}
@@ -1297,9 +1164,6 @@ func (s *S3Proxy) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (
}
func (s *S3Proxy) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
if *input.Bucket == s.metaBucket {
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ContinuationToken != nil && *input.ContinuationToken == "" {
input.ContinuationToken = nil
}
@@ -1341,9 +1205,6 @@ func (s *S3Proxy) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Inpu
}
func (s *S3Proxy) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
if *input.Bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -1368,9 +1229,6 @@ func (s *S3Proxy) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput)
}
func (s *S3Proxy) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
if *input.Bucket == s.metaBucket {
return s3response.DeleteResult{}, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
@@ -1394,22 +1252,77 @@ func (s *S3Proxy) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInpu
}
func (s *S3Proxy) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
data, err := s.getMetaBucketObjData(ctx, *input.Bucket, metaPrefixAcl)
if input.ExpectedBucketOwner != nil && *input.ExpectedBucketOwner == "" {
input.ExpectedBucketOwner = nil
}
tagout, err := s.client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
Bucket: input.Bucket,
})
if err != nil {
var ae smithy.APIError
if errors.As(err, &ae) {
// sdk issue workaround for missing NoSuchTagSet error type
// https://github.com/aws/aws-sdk-go-v2/issues/2878
if strings.Contains(ae.ErrorCode(), "NoSuchTagSet") {
return []byte{}, nil
}
if strings.Contains(ae.ErrorCode(), "NotImplemented") {
return []byte{}, nil
}
}
return nil, handleError(err)
}
return data, nil
for _, tag := range tagout.TagSet {
if *tag.Key == aclKey {
acl, err := base64Decode(*tag.Value)
if err != nil {
return nil, handleError(err)
}
return acl, nil
}
}
return []byte{}, nil
}
func (s *S3Proxy) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
return handleError(s.putMetaBucketObj(ctx, bucket, data, metaPrefixAcl))
tagout, err := s.client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
Bucket: &bucket,
})
if err != nil {
return handleError(err)
}
var found bool
for i, tag := range tagout.TagSet {
if *tag.Key == aclKey {
tagout.TagSet[i] = types.Tag{
Key: backend.GetPtrFromString(aclKey),
Value: backend.GetPtrFromString(base64Encode(data)),
}
found = true
break
}
}
if !found {
tagout.TagSet = append(tagout.TagSet, types.Tag{
Key: backend.GetPtrFromString(aclKey),
Value: backend.GetPtrFromString(base64Encode(data)),
})
}
_, err = s.client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
Bucket: &bucket,
Tagging: &types.Tagging{
TagSet: tagout.TagSet,
},
})
return handleError(err)
}
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
tagging := &types.Tagging{
TagSet: []types.Tag{},
}
@@ -1429,9 +1342,6 @@ func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, t
}
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
if bucket == s.metaBucket {
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
output, err := s.client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
Bucket: &bucket,
Key: &object,
@@ -1449,9 +1359,6 @@ func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (
}
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
if bucket == s.metaBucket {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
_, err := s.client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
Bucket: &bucket,
Key: &object,
@@ -1460,29 +1367,34 @@ func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string
}
func (s *S3Proxy) PutBucketPolicy(ctx context.Context, bucket string, policy []byte) error {
return handleError(s.putMetaBucketObj(ctx, bucket, policy, metaPrefixPolicy))
_, err := s.client.PutBucketPolicy(ctx, &s3.PutBucketPolicyInput{
Bucket: &bucket,
Policy: backend.GetPtrFromString(string(policy)),
})
return handleError(err)
}
func (s *S3Proxy) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, error) {
data, err := s.getMetaBucketObjData(ctx, bucket, metaPrefixPolicy)
policy, err := s.client.GetBucketPolicy(ctx, &s3.GetBucketPolicyInput{
Bucket: &bucket,
})
if err != nil {
return nil, handleError(err)
}
return data, nil
result := []byte{}
if policy.Policy != nil {
result = []byte(*policy.Policy)
}
return result, nil
}
func (s *S3Proxy) DeleteBucketPolicy(ctx context.Context, bucket string) error {
key := getMetaKey(bucket, metaPrefixPolicy)
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: &s.metaBucket,
Key: &key,
_, err := s.client.DeleteBucketPolicy(ctx, &s3.DeleteBucketPolicyInput{
Bucket: &bucket,
})
if err != nil && !areErrSame(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
return handleError(err)
}
return nil
return handleError(err)
}
func (s *S3Proxy) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
@@ -1511,11 +1423,10 @@ func (s *S3Proxy) GetObjectLegalHold(ctx context.Context, bucket, object, versio
}
func (s *S3Proxy) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
acll, err := auth.ParseACL(acl)
if err != nil {
return err
var acll auth.ACL
if err := json.Unmarshal(acl, &acll); err != nil {
return fmt.Errorf("unmarshal acl: %w", err)
}
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", s.endpoint, bucket, acll.Owner), nil)
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
@@ -1591,95 +1502,6 @@ func (s *S3Proxy) ListBucketsAndOwners(ctx context.Context) ([]s3response.Bucket
return buckets, nil
}
func (s *S3Proxy) bucketExists(ctx context.Context, bucket string) bool {
_, err := s.client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: &bucket,
})
return err == nil
}
func (s *S3Proxy) putMetaBucketObj(ctx context.Context, bucket string, data []byte, prefix metaPrefix) error {
// if meta bucket is not provided, return successful response
if s.metaBucket == "" {
return nil
}
key := getMetaKey(bucket, prefix)
// store the provided bucket acl/policy as an object in meta bucket
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: &s.metaBucket,
Key: &key,
Body: bytes.NewReader(data),
})
return err
}
func (s *S3Proxy) getMetaBucketObjData(ctx context.Context, bucket string, prefix metaPrefix) ([]byte, error) {
// return default bahviour of get bucket policy/acl, if meta bucket is not provided
if s.metaBucket == "" {
switch prefix {
case metaPrefixAcl:
return []byte{}, nil
case metaPrefixPolicy:
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)
}
}
key := getMetaKey(bucket, prefix)
// get meta bucket object
res, err := s.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &s.metaBucket,
Key: &key,
})
if areErrSame(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
switch prefix {
case metaPrefixAcl:
// If bucket acl is not found, return default acl
return []byte{}, nil
case metaPrefixPolicy:
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)
}
}
if err != nil {
return nil, err
}
data, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("read meta object data: %w", err)
}
return data, nil
}
// Checks if the provided err is a type of smithy.APIError
// and if the error code and message match with the provided apiErr
func areErrSame(err error, apiErr s3err.APIError) bool {
if err == nil {
return false
}
var ae smithy.APIError
if errors.As(err, &ae) {
if ae.ErrorCode() != apiErr.Code {
return false
}
// 404 errors are not well serialized by aws-sdk-go-v2
if ae.ErrorCode() != "NoSuchKey" && ae.ErrorMessage() != apiErr.Description {
return false
}
return true
}
return false
}
// generates meta object key with bucket name and meta prefix
func getMetaKey(bucket string, prefix metaPrefix) string {
return string(prefix) + bucket
}
func handleError(err error) error {
if err == nil {
return nil
@@ -1700,6 +1522,18 @@ func handleError(err error) error {
return err
}
func base64Encode(input []byte) string {
return base64.StdEncoding.EncodeToString(input)
}
func base64Decode(encoded string) ([]byte, error) {
decoded, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return nil, err
}
return decoded, nil
}
func convertObjects(objs []types.Object) []s3response.Object {
result := make([]s3response.Object, 0, len(objs))
@@ -1719,24 +1553,3 @@ func convertObjects(objs []types.Object) []s3response.Object {
return result
}
func convertObjectVersions(versions []types.ObjectVersion) []s3response.ObjectVersion {
result := make([]s3response.ObjectVersion, 0, len(versions))
for _, v := range versions {
result = append(result, s3response.ObjectVersion{
ChecksumAlgorithm: v.ChecksumAlgorithm,
ChecksumType: v.ChecksumType,
ETag: v.ETag,
IsLatest: v.IsLatest,
Key: v.Key,
LastModified: v.LastModified,
Owner: v.Owner,
RestoreStatus: v.RestoreStatus,
Size: v.Size,
StorageClass: v.StorageClass,
VersionId: v.VersionId,
})
}
return result
}

View File

@@ -193,25 +193,23 @@ func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
// ioctl to not have to read and copy the part data to the final object. This
// saves a read and write cycle for all mutlipart uploads.
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
acct, ok := ctx.Value("account").(auth.Account)
if !ok {
acct = auth.Account{}
}
var res s3response.CompleteMultipartUploadResult
if input.Bucket == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidBucketName)
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
if input.Key == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchKey)
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if input.UploadId == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchUpload)
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if input.MultipartUpload == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidRequest)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
bucket := *input.Bucket
@@ -221,22 +219,22 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return res, "", fmt.Errorf("stat bucket: %w", err)
return nil, fmt.Errorf("stat bucket: %w", err)
}
sum, err := s.checkUploadIDExists(bucket, object, uploadID)
if err != nil {
return res, "", err
return nil, err
}
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
checksums, err := s.retrieveChecksums(nil, bucket, filepath.Join(objdir, uploadID))
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get mp checksums: %w", err)
return nil, fmt.Errorf("get mp checksums: %w", err)
}
// ChecksumType should be the same as specified on CreateMultipartUpload
@@ -246,7 +244,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
checksumType = types.ChecksumType("null")
}
return res, "", s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
return nil, s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
}
// check all parts ok
@@ -257,13 +255,13 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
var partNumber int32
for i, part := range parts {
if part.PartNumber == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPartOrder)
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
@@ -272,14 +270,14 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
fullPartPath := filepath.Join(bucket, partObjPath)
fi, err := os.Lstat(fullPartPath)
if err != nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
totalsize += fi.Size()
// all parts except the last need to be greater, thena
// the minimum allowed size (5 Mib)
if i < last && fi.Size() < backend.MinPartSize {
return res, "", s3err.GetAPIError(s3err.ErrEntityTooSmall)
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
b, err := s.meta.RetrieveAttribute(nil, bucket, partObjPath, etagkey)
@@ -287,24 +285,24 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
if err != nil {
etag = ""
}
if parts[i].ETag == nil || !backend.AreEtagsSame(etag, *parts[i].ETag) {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
if parts[i].ETag == nil || etag != *parts[i].ETag {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
partChecksum, err := s.retrieveChecksums(nil, bucket, partObjPath)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get part checksum: %w", err)
return nil, fmt.Errorf("get part checksum: %w", err)
}
// If checksum has been provided on mp initalization
err = validatePartChecksum(partChecksum, part)
if err != nil {
return res, "", err
return nil, err
}
}
if input.MpuObjectSize != nil && totalsize != *input.MpuObjectSize {
return res, "", s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
return nil, s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
}
// use totalsize=0 because we wont be writing to the file, only moving
@@ -312,22 +310,22 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
f, err := s.openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, 0, acct)
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return res, "", s3err.GetAPIError(s3err.ErrQuotaExceeded)
return nil, s3err.GetAPIError(s3err.ErrQuotaExceeded)
}
return res, "", fmt.Errorf("open temp file: %w", err)
return nil, fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
for _, part := range parts {
if part.PartNumber == nil || *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
fullPartPath := filepath.Join(bucket, partObjPath)
pf, err := os.Open(fullPartPath)
if err != nil {
return res, "", fmt.Errorf("open part %v: %v", *part.PartNumber, err)
return nil, fmt.Errorf("open part %v: %v", *part.PartNumber, err)
}
// scoutfs move data is a metadata only operation that moves the data
@@ -336,7 +334,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
err = moveData(pf, f.File())
pf.Close()
if err != nil {
return res, "", fmt.Errorf("move blocks part %v: %v", *part.PartNumber, err)
return nil, fmt.Errorf("move blocks part %v: %v", *part.PartNumber, err)
}
}
@@ -345,7 +343,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
objMeta := s.loadUserMetaData(bucket, upiddir, userMetaData)
err = s.storeObjectMetadata(f.File(), bucket, object, objMeta)
if err != nil {
return res, "", err
return nil, err
}
objname := filepath.Join(bucket, object)
@@ -354,50 +352,50 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
uid, gid, doChown := s.getChownIDs(acct)
err = backend.MkdirAll(dir, uid, gid, doChown, s.newDirPerm)
if err != nil {
return res, "", err
return nil, err
}
}
for k, v := range userMetaData {
err = s.meta.StoreAttribute(f.File(), bucket, object, fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
if err != nil {
return res, "", fmt.Errorf("set user attr %q: %w", k, err)
return nil, fmt.Errorf("set user attr %q: %w", k, err)
}
}
// load and set tagging
tagging, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, tagHdr)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object tagging: %w", err)
return nil, fmt.Errorf("get object tagging: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, tagHdr, tagging)
if err != nil {
return res, "", fmt.Errorf("set object tagging: %w", err)
return nil, fmt.Errorf("set object tagging: %w", err)
}
}
// load and set legal hold
lHold, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectLegalHoldKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object legal hold: %w", err)
return nil, fmt.Errorf("get object legal hold: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectLegalHoldKey, lHold)
if err != nil {
return res, "", fmt.Errorf("set object legal hold: %w", err)
return nil, fmt.Errorf("set object legal hold: %w", err)
}
}
// load and set retention
ret, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectRetentionKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object retention: %w", err)
return nil, fmt.Errorf("get object retention: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectRetentionKey, ret)
if err != nil {
return res, "", fmt.Errorf("set object retention: %w", err)
return nil, fmt.Errorf("set object retention: %w", err)
}
}
@@ -406,12 +404,12 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
err = s.meta.StoreAttribute(f.File(), bucket, object, etagkey, []byte(s3MD5))
if err != nil {
return res, "", fmt.Errorf("set etag attr: %w", err)
return nil, fmt.Errorf("set etag attr: %w", err)
}
err = f.link()
if err != nil {
return res, "", fmt.Errorf("link object in namespace: %w", err)
return nil, fmt.Errorf("link object in namespace: %w", err)
}
// cleanup tmp dirs
@@ -420,11 +418,11 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
// for same object name outstanding
os.Remove(filepath.Join(bucket, objdir))
return s3response.CompleteMultipartUploadResult{
return &s3.CompleteMultipartUploadOutput{
Bucket: &bucket,
ETag: &s3MD5,
Key: &object,
}, "", nil
}, nil
}
func (s *ScoutFS) storeObjectMetadata(f *os.File, bucket, object string, m objectMetadata) error {
@@ -769,13 +767,13 @@ func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (
return s3response.ListObjectsResult{
CommonPrefixes: results.CommonPrefixes,
Contents: results.Objects,
Delimiter: backend.GetPtrFromString(delim),
Marker: backend.GetPtrFromString(marker),
NextMarker: backend.GetPtrFromString(results.NextMarker),
Prefix: backend.GetPtrFromString(prefix),
Delimiter: &delim,
IsTruncated: &results.Truncated,
Marker: &marker,
MaxKeys: &maxkeys,
Name: &bucket,
NextMarker: &results.NextMarker,
Prefix: &prefix,
}, nil
}
@@ -790,11 +788,7 @@ func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Inpu
}
marker := ""
if input.ContinuationToken != nil {
if input.StartAfter != nil {
marker = max(*input.StartAfter, *input.ContinuationToken)
} else {
marker = *input.ContinuationToken
}
marker = *input.ContinuationToken
}
delim := ""
if input.Delimiter != nil {
@@ -820,20 +814,16 @@ func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Inpu
return s3response.ListObjectsV2Result{}, fmt.Errorf("walk %v: %w", bucket, err)
}
count := int32(len(results.Objects))
return s3response.ListObjectsV2Result{
CommonPrefixes: results.CommonPrefixes,
Contents: results.Objects,
Delimiter: &delim,
IsTruncated: &results.Truncated,
ContinuationToken: &marker,
MaxKeys: &maxkeys,
Name: &bucket,
KeyCount: &count,
Delimiter: backend.GetPtrFromString(delim),
ContinuationToken: backend.GetPtrFromString(marker),
NextContinuationToken: backend.GetPtrFromString(results.NextMarker),
Prefix: backend.GetPtrFromString(prefix),
StartAfter: backend.GetPtrFromString(*input.StartAfter),
NextContinuationToken: &results.NextMarker,
Prefix: &prefix,
}, nil
}

View File

@@ -23,7 +23,6 @@ import (
"os"
"path/filepath"
"strconv"
"syscall"
"golang.org/x/sys/unix"
@@ -32,7 +31,6 @@ import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
"github.com/versity/versitygw/s3err"
)
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
@@ -92,9 +90,6 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
// file descriptor into the namespace.
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, err
}
@@ -155,20 +150,10 @@ func (tmp *tmpfile) link() error {
}
defer dirf.Close()
for {
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if errors.Is(err, fs.ErrExist) {
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
continue
}
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
break
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
err = tmp.f.Close()

View File

@@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"io/fs"
"sort"
"strings"
"syscall"
@@ -37,38 +38,10 @@ type GetObjFunc func(path string, d fs.DirEntry) (s3response.Object, error)
var ErrSkipObj = errors.New("skip this object")
// map to store object common prefixes
type cpMap map[string]int
func (c cpMap) Add(key string) {
_, ok := c[key]
if !ok {
c[key] = len(c)
}
}
// Len returns the length of the map
func (c cpMap) Len() int {
return len(c)
}
// CpArray converts the map into a sorted []types.CommonPrefixes array
func (c cpMap) CpArray() []types.CommonPrefix {
commonPrefixes := make([]types.CommonPrefix, c.Len())
for cp, i := range c {
pfx := cp
commonPrefixes[i] = types.CommonPrefix{
Prefix: &pfx,
}
}
return commonPrefixes
}
// Walk walks the supplied fs.FS and returns results compatible with list
// objects responses
func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
cpmap := cpMap{}
cpmap := make(map[string]struct{})
var objects []s3response.Object
// if max is 0, it should return empty non-truncated result
@@ -147,7 +120,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipAll
}
objects = append(objects, dirobj)
if (len(objects) + cpmap.Len()) == int(max) {
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
@@ -201,7 +174,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
objects = append(objects, obj)
if (len(objects) + cpmap.Len()) == int(max) {
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
@@ -245,7 +218,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipAll
}
objects = append(objects, obj)
if (len(objects) + cpmap.Len()) == int(max) {
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
@@ -271,8 +244,8 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
truncated = true
return fs.SkipAll
}
cpmap.Add(cpref)
if (len(objects) + cpmap.Len()) == int(max) {
cpmap[cpref] = struct{}{}
if (len(objects) + len(cpmap)) == int(max) {
newMarker = cpref
pastMax = true
}
@@ -287,12 +260,25 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return WalkResults{}, err
}
var commonPrefixStrings []string
for k := range cpmap {
commonPrefixStrings = append(commonPrefixStrings, k)
}
sort.Strings(commonPrefixStrings)
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
for _, cp := range commonPrefixStrings {
pfx := cp
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
Prefix: &pfx,
})
}
if !truncated {
newMarker = ""
}
return WalkResults{
CommonPrefixes: cpmap.CpArray(),
CommonPrefixes: commonPrefixes,
Objects: objects,
Truncated: truncated,
NextMarker: newMarker,
@@ -310,7 +296,7 @@ func contains(a string, strs []string) bool {
type WalkVersioningResults struct {
CommonPrefixes []types.CommonPrefix
ObjectVersions []s3response.ObjectVersion
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
Truncated bool
NextMarker string
@@ -318,7 +304,7 @@ type WalkVersioningResults struct {
}
type ObjVersionFuncResult struct {
ObjectVersions []s3response.ObjectVersion
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
NextVersionIdMarker string
Truncated bool
@@ -329,8 +315,8 @@ type GetVersionsFunc func(path, versionIdMarker string, pastVersionIdMarker *boo
// WalkVersions walks the supplied fs.FS and returns results compatible with
// ListObjectVersions action response
func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyMarker, versionIdMarker string, max int, getObj GetVersionsFunc, skipdirs []string) (WalkVersioningResults, error) {
cpmap := cpMap{}
var objects []s3response.ObjectVersion
cpmap := make(map[string]struct{})
var objects []types.ObjectVersion
var delMarkers []types.DeleteMarkerEntry
var pastMarker bool
@@ -385,11 +371,11 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
if delimiter == "/" &&
prefix != path+"/" &&
strings.HasPrefix(path+"/", prefix) {
cpmap.Add(path + "/")
cpmap[path+"/"] = struct{}{}
return fs.SkipDir
}
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -416,7 +402,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
if delimiter == "" {
// If no delimiter specified, then all files with matching
// prefix are included in results
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -459,7 +445,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
suffix := strings.TrimPrefix(path, prefix)
before, _, found := strings.Cut(suffix, delimiter)
if !found {
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -481,8 +467,8 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
// Common prefixes are a set, so should not have duplicates.
// These are abstractly a "directory", so need to include the
// delimiter at the end.
cpmap.Add(prefix + before + delimiter)
if (len(objects) + cpmap.Len()) == int(max) {
cpmap[prefix+before+delimiter] = struct{}{}
if (len(objects) + len(cpmap)) == int(max) {
nextMarker = path
truncated = true
@@ -495,8 +481,21 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
return WalkVersioningResults{}, err
}
var commonPrefixStrings []string
for k := range cpmap {
commonPrefixStrings = append(commonPrefixStrings, k)
}
sort.Strings(commonPrefixStrings)
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
for _, cp := range commonPrefixStrings {
pfx := cp
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
Prefix: &pfx,
})
}
return WalkVersioningResults{
CommonPrefixes: cpmap.CpArray(),
CommonPrefixes: commonPrefixes,
ObjectVersions: objects,
DelMarkers: delMarkers,
Truncated: truncated,

View File

@@ -100,11 +100,6 @@ func adminCommand() *cli.Command {
Usage: "secret access key for the new user",
Aliases: []string{"s"},
},
&cli.StringFlag{
Name: "role",
Usage: "the new user role",
Aliases: []string{"r"},
},
&cli.IntFlag{
Name: "user-id",
Usage: "userID for the new user",
@@ -316,14 +311,8 @@ func deleteUser(ctx *cli.Context) error {
}
func updateUser(ctx *cli.Context) error {
access, secret, userId, groupId, role := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id"), auth.Role(ctx.String("role"))
access, secret, userId, groupId := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id")
props := auth.MutableProps{}
if ctx.IsSet("role") {
if !role.IsValid() {
return fmt.Errorf("invalid user role: %v", role)
}
props.Role = role
}
if ctx.IsSet("secret") {
props.Secret = &secret
}

View File

@@ -50,7 +50,6 @@ var (
logWebhookURL, accessLog string
adminLogFile string
healthPath string
virtualDomain string
debug bool
pprof string
quiet bool
@@ -228,13 +227,6 @@ func initFlags() []cli.Flag {
Destination: &quiet,
Aliases: []string{"q"},
},
&cli.StringFlag{
Name: "virtual-domain",
Usage: "enables the virtual host style bucket addressing with the specified arg as the base domain",
EnvVars: []string{"VGW_VIRTUAL_DOMAIN"},
Destination: &virtualDomain,
Aliases: []string{"vd"},
},
&cli.StringFlag{
Name: "access-log",
Usage: "enable server access logging to specified file",
@@ -533,19 +525,19 @@ func initFlags() []cli.Flag {
},
&cli.StringFlag{
Name: "ipa-user",
Usage: "Username used to connect to FreeIPA (requires permissions to read user vault contents)",
Usage: "Username used to connect to FreeIPA. Needs permissions to read user vault contents",
EnvVars: []string{"VGW_IPA_USER"},
Destination: &ipaUser,
},
&cli.StringFlag{
Name: "ipa-password",
Usage: "Password of the user used to connect to FreeIPA",
Usage: "Password of the user used to connect to FreeIPA.",
EnvVars: []string{"VGW_IPA_PASSWORD"},
Destination: &ipaPassword,
},
&cli.BoolFlag{
Name: "ipa-insecure",
Usage: "Disable verify TLS certificate of FreeIPA server",
Usage: "Verify TLS certificate of FreeIPA server. Default is 'true'.",
EnvVars: []string{"VGW_IPA_INSECURE"},
Destination: &ipaInsecure,
},
@@ -611,9 +603,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
if readonly {
opts = append(opts, s3api.WithReadOnly())
}
if virtualDomain != "" {
opts = append(opts, s3api.WithHostStyle(virtualDomain))
}
admApp := fiber.New(fiber.Config{
AppName: "versitygw",

View File

@@ -15,60 +15,50 @@
package main
import (
"errors"
"fmt"
"plugin"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/plugins"
vgwplugin "github.com/versity/versitygw/backend/plugin"
)
var (
pluginPath string
pluginConfig string
)
func pluginCommand() *cli.Command {
return &cli.Command{
Name: "plugin",
Usage: "load a backend from a plugin",
Description: "Runs a s3 gateway and redirects the requests to the backend defined in the plugin",
Action: runPluginBackend,
Usage: "plugin storage backend",
Description: `This tells the gateway to load the backend from a dynamic runtime plugin.`,
Action: runPlugin,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "config",
Usage: "location of the config file",
Aliases: []string{"c"},
Name: "file",
Usage: "path to plugin shared object file",
Value: "",
Required: true,
EnvVars: []string{"VGW_PLUGIN_FILE"},
Destination: &pluginPath,
Aliases: []string{"f"},
},
&cli.StringFlag{
Name: "config",
Usage: "configuration option for the plugin",
Value: "",
Required: true,
EnvVars: []string{"VGW_PLUGIN_CONFIG"},
Destination: &pluginConfig,
Aliases: []string{"c"},
},
},
}
}
func runPluginBackend(ctx *cli.Context) error {
if ctx.NArg() == 0 {
return fmt.Errorf("no plugin file provided to be loaded")
}
pluginPath := ctx.Args().Get(0)
config := ctx.String("config")
p, err := plugin.Open(pluginPath)
func runPlugin(ctx *cli.Context) error {
be, err := vgwplugin.NewPluginBackend(pluginPath, pluginConfig)
if err != nil {
return err
return fmt.Errorf("init plugin backend: %w", err)
}
backendSymbol, err := p.Lookup("Backend")
if err != nil {
return err
}
backendPluginPtr, ok := backendSymbol.(*plugins.BackendPlugin)
if !ok {
return errors.New("plugin is not of type *plugins.BackendPlugin")
}
if backendPluginPtr == nil {
return errors.New("variable Backend is nil")
}
be, err := (*backendPluginPtr).New(config)
if err != nil {
return err
}
return runGateway(ctx.Context, be)
}

View File

@@ -31,7 +31,6 @@ var (
dirPerms uint
sidecar string
nometa bool
forceNoTmpFile bool
)
func posixCommand() *cli.Command {
@@ -94,12 +93,6 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
EnvVars: []string{"VGW_META_NONE"},
Destination: &nometa,
},
&cli.BoolFlag{
Name: "disableotmp",
Usage: "disable O_TMPFILE support for new objects",
EnvVars: []string{"VGW_DISABLE_OTMP"},
Destination: &forceNoTmpFile,
},
},
}
}
@@ -120,12 +113,11 @@ func runPosix(ctx *cli.Context) error {
}
opts := posix.PosixOpts{
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
NewDirPerm: fs.FileMode(dirPerms),
ForceNoTmpFile: forceNoTmpFile,
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
NewDirPerm: fs.FileMode(dirPerms),
}
var ms meta.MetadataStorer

View File

@@ -26,10 +26,8 @@ var (
s3proxySecret string
s3proxyEndpoint string
s3proxyRegion string
s3proxyMetaBucket string
s3proxyDisableChecksum bool
s3proxySslSkipVerify bool
s3proxyUsePathStyle bool
s3proxyDebug bool
)
@@ -73,12 +71,6 @@ to an s3 storage backend service.`,
EnvVars: []string{"VGW_S3_REGION"},
Destination: &s3proxyRegion,
},
&cli.StringFlag{
Name: "meta-bucket",
Usage: "s3 service meta bucket to store buckets acl/policy",
EnvVars: []string{"VGW_S3_META_BUCKET"},
Destination: &s3proxyMetaBucket,
},
&cli.BoolFlag{
Name: "disable-checksum",
Usage: "disable gateway to server object checksums",
@@ -93,13 +85,6 @@ to an s3 storage backend service.`,
Value: false,
Destination: &s3proxySslSkipVerify,
},
&cli.BoolFlag{
Name: "use-path-style",
Usage: "use path style addressing for s3 proxy",
EnvVars: []string{"VGW_S3_USE_PATH_STYLE"},
Value: false,
Destination: &s3proxyUsePathStyle,
},
&cli.BoolFlag{
Name: "debug",
Usage: "output extra debug tracing",
@@ -112,8 +97,8 @@ to an s3 storage backend service.`,
}
func runS3(ctx *cli.Context) error {
be, err := s3proxy.New(ctx.Context, s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
s3proxyMetaBucket, s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyUsePathStyle, s3proxyDebug)
be, err := s3proxy.New(s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyDebug)
if err != nil {
return fmt.Errorf("init s3 backend: %w", err)
}

View File

@@ -34,7 +34,7 @@ var (
totalReqs int
upload bool
download bool
hostStyle bool
pathStyle bool
checksumDisable bool
versioningEnabled bool
azureTests bool
@@ -74,12 +74,6 @@ func initTestFlags() []cli.Flag {
Destination: &endpoint,
Aliases: []string{"e"},
},
&cli.BoolFlag{
Name: "host-style",
Usage: "Use host-style bucket addressing",
Value: false,
Destination: &hostStyle,
},
&cli.BoolFlag{
Name: "debug",
Usage: "enable debug mode",
@@ -130,11 +124,6 @@ func initTestCommands() []*cli.Command {
},
},
},
{
Name: "scoutfs",
Usage: "Tests scoutfs full flow",
Action: getAction(integration.TestScoutfs),
},
{
Name: "iam",
Usage: "Tests iam service",
@@ -197,6 +186,12 @@ func initTestCommands() []*cli.Command {
Value: 1,
Destination: &concurrency,
},
&cli.BoolFlag{
Name: "pathStyle",
Usage: "Use Pathstyle bucket addressing",
Value: false,
Destination: &pathStyle,
},
&cli.BoolFlag{
Name: "checksumDis",
Usage: "Disable server checksum",
@@ -228,8 +223,8 @@ func initTestCommands() []*cli.Command {
if debug {
opts = append(opts, integration.WithDebug())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
if pathStyle {
opts = append(opts, integration.WithPathStyle())
}
if checksumDisable {
opts = append(opts, integration.WithDisableChecksum())
@@ -292,9 +287,6 @@ func initTestCommands() []*cli.Command {
if checksumDisable {
opts = append(opts, integration.WithDisableChecksum())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s3conf := integration.NewS3Conf(opts...)
@@ -324,9 +316,6 @@ func getAction(tf testFunc) func(*cli.Context) error {
if azureTests {
opts = append(opts, integration.WithAzureMode())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s := integration.NewS3Conf(opts...)
tf(s)
@@ -362,9 +351,6 @@ func extractIntTests() (commands []*cli.Command) {
if versioningEnabled {
opts = append(opts, integration.WithVersioningEnabled())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s := integration.NewS3Conf(opts...)
err := testFunc(s)

View File

@@ -99,26 +99,6 @@ ROOT_SECRET_ACCESS_KEY=
# endpoint is unauthenticated, and returns a 200 status for GET.
#VGW_HEALTH=
# Enable VGW_READ_ONLY to only allow read operations to the S3 server. No write
# operations will be allowed.
#VGW_READ_ONLY=false
# The VGW_VIRTUAL_DOMAIN option enables the virtual host style bucket
# addressing. The path style addressing is the default, and remains enabled
# even when virtual host style is enabled. The VGW_VIRTUAL_DOMAIN option
# specifies the domain name that will be used for the virtual host style
# addressing. For virtual addressing, access to a bucket is in the request
# form:
# https://<bucket>.<VGW_VIRTUAL_DOMAIN>/
# for example: https://mybucket.example.com/ where
# VGW_VIRTUAL_DOMAIN=example.com
# and all subdomains of VGW_VIRTUAL_DOMAIN should be reserved for buckets.
# This means that virtual host addressing will generally require a DNS
# entry for each bucket that needs to be accessed.
# The default path style request is of the form:
# https://<VGW_ENDPOINT>/<bucket>
#VGW_VIRTUAL_DOMAIN=
###############
# Access Logs #
###############
@@ -260,24 +240,6 @@ ROOT_SECRET_ACCESS_KEY=
#VGW_IAM_LDAP_USER_ID_ATR=
#VGW_IAM_LDAP_GROUP_ID_ATR=
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
# in an external FreeIPA service. Currently the FreeIPA IAM service only
# supports account retrieval. Creating and modifying accounts must be done
# outside of the versitygw service.
# FreeIPA server url e.g. https://ipa.example.test
#VGW_IPA_HOST=
# A name of the user vault containing their secret
#VGW_IPA_VAULT_NAME=
# Username used to connect to FreeIPA (requires permissions to read user vault
# contents)
#VGW_IPA_USER=
# Password of the user used to connect to FreeIPA
#VGW_IPA_PASSWORD=
# Disable verify TLS certificate of FreeIPA server
#VGW_IPA_INSECURE=false
# FreeIPA IAM debug output
#VGW_IPA_DEBUG=false
###############
# IAM caching #
###############
@@ -355,40 +317,6 @@ ROOT_SECRET_ACCESS_KEY=
# as any parent directories automatically created with object uploads.
#VGW_DIR_PERMS=0755
# To enable object versions, the VGW_VERSIONING_DIR option must be set to the
# directory that will be used to store the object versions. The version
# directory must NOT be a subdirectory of the VGW_BACKEND_ARG directory.
#VGW_VERSIONING_DIR=
# The gateway uses xattrs to store metadata for objects by default. For systems
# that do not support xattrs, the VGW_META_SIDECAR option can be set to a
# directory that will be used to store the metadata for objects. This is
# currently experimental, and may have issues for some edge cases.
#VGW_META_SIDECAR=
# The VGW_META_NONE option will disable the metadata functionality for the
# gateway. This will cause the gateway to not store any metadata for objects
# or buckets. This include bucket ACLs and Policy. This may be useful for
# read only access to pre-existing data where the gateway should not modify
# the data. It is recommened to enable VGW_READ_ONLY (Global Options) along
# with this.
#VGW_META_NONE=false
# The gateway will use O_TMPFILE for writing objects while uploading and
# link the file to the final object name when the upload is complete if the
# filesystem supports O_TMPFILE. This creates an atomic object creation
# that is not visible to other clients or racing uploads until the upload
# is complete. This will not work if there is a different filesystem mounted
# below the bucket level than where the bucket resides. The VGW_DISABLE_OTMP
# option can be set to true to disable this functionality and force the fallback
# mode when O_TMPFILE is not available. This fallback will create a temporary
# file in the bucket directory and rename it to the final object name when
# the upload is complete if the final location is in the same filesystem, or
# copy the file to the final location if the final location is in a different
# filesystem. This fallback mode is still atomic, but may be less efficient
# than O_TMPFILE when the data needs to be copied into the final location.
#VGW_DISABLE_OTMP=false
###########
# scoutfs #
###########

46
go.mod
View File

@@ -6,31 +6,31 @@ toolchain go1.24.1
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
github.com/DataDog/datadog-go/v5 v5.6.0
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.4
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1
github.com/aws/smithy-go v1.22.3
github.com/go-ldap/ldap/v3 v3.4.11
github.com/gofiber/fiber/v2 v2.52.8
github.com/go-ldap/ldap/v3 v3.4.10
github.com/gofiber/fiber/v2 v2.52.6
github.com/google/go-cmp v0.7.0
github.com/google/uuid v1.6.0
github.com/hashicorp/vault-client-go v0.4.3
github.com/nats-io/nats.go v1.42.0
github.com/oklog/ulid/v2 v2.1.1
github.com/nats-io/nats.go v1.41.0
github.com/oklog/ulid/v2 v2.1.0
github.com/pkg/xattr v0.4.10
github.com/segmentio/kafka-go v0.4.48
github.com/segmentio/kafka-go v0.4.47
github.com/smira/go-statsd v1.3.4
github.com/urfave/cli/v2 v2.27.6
github.com/valyala/fasthttp v1.62.0
github.com/valyala/fasthttp v1.60.0
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
golang.org/x/sync v0.14.0
golang.org/x/sys v0.33.0
golang.org/x/sync v0.13.0
golang.org/x/sys v0.32.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
@@ -38,8 +38,8 @@ require (
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
@@ -47,31 +47,31 @@ require (
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.11.0 // indirect
)
require (
github.com/andybalholm/brotli v1.1.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.14
github.com/aws/aws-sdk-go-v2/credentials v1.17.67
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.76
github.com/aws/aws-sdk-go-v2/config v1.29.13
github.com/aws/aws-sdk-go-v2/credentials v1.17.66
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect

142
go.sum
View File

@@ -1,15 +1,15 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 h1:j8BorDEigD8UFOSZQiSqAMOOleyQOOQPnUAwV+Ls1gA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 h1:Bg8m3nq/X1DeePkAbCfb6ml6F3F0IunEhE8TMh+lY48=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
@@ -29,14 +29,14 @@ github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38y
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
github.com/aws/aws-sdk-go-v2/config v1.29.13 h1:RgdPqWoE8nPpIekpVpDJsBckbqT4Liiaq9f35pbTh1Y=
github.com/aws/aws-sdk-go-v2/config v1.29.13/go.mod h1:NI28qs/IOUIRhsR7GQ/JdexoqRN9tDxkIrYZq0SOF44=
github.com/aws/aws-sdk-go-v2/credentials v1.17.66 h1:aKpEKaTy6n4CEJeYI1MNj97oSDLi4xro3UzQfwf5RWE=
github.com/aws/aws-sdk-go-v2/credentials v1.17.66/go.mod h1:xQ5SusDmHb/fy55wU0QqTy0yNfLqxzec59YcsRZB+rI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.76 h1:TZEAZHyLeRbSvETr20mAoJDUPhIMuFZ9ZwjkftWongU=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.76/go.mod h1:7h7z0FVKk7IYXuIZ8bWI58Afwc3kPMHqVIdczGgU3wc=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71 h1:s43gLuY+zGmtpx+KybfFP4IckopmTfDOPdlf/L++N5I=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71/go.mod h1:KH6wWmY3O3c/jVAjHk0MGzVAFDxkOSt42Eoe4ZO4ge0=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
@@ -47,26 +47,26 @@ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcu
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 h1:BCG7DCXEXpNCcpwCxg1oi9pkJWH2+eZzTn9MY56MbVw=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.4 h1:4yxno6bNHkekkfqG/a1nz/gC2gBwhJSojV1+oTE7K+4=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.4/go.mod h1:qbn305Je/IofWBJ4bJz/Q7pDEtnnoInw/dGt71v6rHE=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1 h1:2Ku1xwAohSSXHR1tpAnyVDSQSxoDMA+/NZBytW+f4qg=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18 h1:xz7WvTMfSStb9Y8NpCT82FXLNC3QasqBfuAFHY4Pk5g=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -74,19 +74,22 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
github.com/gofiber/fiber/v2 v2.52.8 h1:xl4jJQ0BV5EJTA2aWiKw/VddRpHrKeZLF0QPUxqn0x4=
github.com/gofiber/fiber/v2 v2.52.8/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
@@ -97,6 +100,7 @@ github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5O
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
@@ -113,8 +117,8 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
@@ -128,14 +132,14 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/nats-io/nats.go v1.42.0 h1:ynIMupIOvf/ZWH/b2qda6WGKGNSjwOUutTpWRvAmhaM=
github.com/nats-io/nats.go v1.42.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
@@ -147,8 +151,8 @@ github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -156,8 +160,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/segmentio/kafka-go v0.4.48 h1:9jyu9CWK4W5W+SroCe8EffbrRZVqAOkuaLd/ApID4Vs=
github.com/segmentio/kafka-go v0.4.48/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smira/go-statsd v1.3.4 h1:kBYWcLSGT+qC6JVbvfz48kX7mQys32fjDOPrfmsSx2c=
github.com/smira/go-statsd v1.3.4/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
@@ -166,6 +170,7 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
@@ -175,8 +180,8 @@ github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.62.0 h1:8dKRBX/y2rCzyc6903Zu1+3qN0H/d2MsxPPmVNamiH0=
github.com/valyala/fasthttp v1.62.0/go.mod h1:FCINgr4GKdKqV8Q0xv8b+UxPV+H/O5nNFo3D+r54Htg=
github.com/valyala/fasthttp v1.60.0 h1:kBRYS0lOhVJ6V+bYN8PqAHELKHtXqwq9zNMLKx1MBsw=
github.com/valyala/fasthttp v1.60.0/go.mod h1:iY4kDgV3Gc6EqhRZ8icqcmlG6bqhcDXfuHgTO4FXCvc=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
@@ -194,28 +199,46 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -231,14 +254,23 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -246,8 +278,11 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -255,10 +290,13 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,35 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package plugins
import "github.com/versity/versitygw/backend"
// BackendPlugin defines an interface for creating backend
// implementation instances.
// Plugins implementing this interface can be built as shared
// libraries using Go's plugin system (to build use `go build -buildmode=plugin`).
// The shared library should export an instance of
// this interface in a variable named `Backend`.
type BackendPlugin interface {
// New creates and initializes a new backend.Backend instance.
// The config parameter specifies the path of the file containing
// the configuration for the backend.
//
// Implementations of this method should perform the necessary steps to
// establish a connection to the underlying storage system or service
// (e.g., network storage system, distributed storage system, cloud storage)
// and configure it according to the provided configuration.
New(config string) (backend.Backend, error)
}

View File

@@ -100,16 +100,7 @@ func (c AdminController) UpdateUser(ctx *fiber.Ctx) error {
})
}
err := props.Validate()
if err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminInvalidUserRole),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
})
}
err = c.iam.UpdateUserAccount(access, props)
err := c.iam.UpdateUserAccount(access, props)
if err != nil {
if strings.Contains(err.Error(), "user not found") {
err = s3err.GetAPIError(s3err.ErrAdminUserNotFound)

View File

@@ -29,10 +29,10 @@ var _ backend.Backend = &BackendMock{}
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
// panic("mock out the ChangeBucketOwner method")
// },
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
// panic("mock out the CompleteMultipartUpload method")
// },
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
// panic("mock out the CopyObject method")
// },
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
@@ -199,10 +199,10 @@ type BackendMock struct {
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, acl []byte) error
// CompleteMultipartUploadFunc mocks the CompleteMultipartUpload method.
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error)
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
// CopyObjectFunc mocks the CopyObject method.
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
// CreateBucketFunc mocks the CreateBucket method.
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error
@@ -904,7 +904,7 @@ func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
}
// CompleteMultipartUpload calls CompleteMultipartUploadFunc.
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
if mock.CompleteMultipartUploadFunc == nil {
panic("BackendMock.CompleteMultipartUploadFunc: method is nil but Backend.CompleteMultipartUpload was just called")
}
@@ -940,7 +940,7 @@ func (mock *BackendMock) CompleteMultipartUploadCalls() []struct {
}
// CopyObject calls CopyObjectFunc.
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if mock.CopyObjectFunc == nil {
panic("BackendMock.CopyObjectFunc: method is nil but Backend.CopyObject was just called")
}

View File

@@ -20,7 +20,7 @@ import (
"errors"
"fmt"
"io"
"math"
"log"
"net/http"
"net/url"
"strconv"
@@ -33,7 +33,6 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3event"
@@ -58,10 +57,6 @@ const (
)
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender, mm *metrics.Manager, debug bool, readonly bool) S3ApiController {
if debug {
debuglogger.SetDebugEnabled()
}
return S3ApiController{
be: be,
iam: iam,
@@ -84,7 +79,7 @@ func (c S3ApiController) ListBuckets(ctx *fiber.Ctx) error {
maxBucketsParsed, err := strconv.ParseInt(maxBucketsStr, 10, 32)
if err != nil || maxBucketsParsed < 0 || maxBucketsParsed > 10000 {
if c.debug {
debuglogger.Logf("error parsing max-buckets %q: %v", maxBucketsStr, err)
log.Printf("error parsing max-buckets %q: %v\n", maxBucketsStr, err)
}
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxBuckets),
&MetaOpts{
@@ -258,7 +253,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
n, err := strconv.Atoi(partNumberMarker)
if err != nil || n < 0 {
if err != nil && c.debug {
debuglogger.Logf("error parsing part number marker %q: %v",
log.Printf("error parsing part number marker %q: %v",
partNumberMarker, err)
}
return SendResponse(ctx,
@@ -275,7 +270,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
maxParts, err := utils.ParseUint(mxParts)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max parts %q: %v",
log.Printf("error parsing max parts %q: %v",
mxParts, err)
}
return SendResponse(ctx,
@@ -381,10 +376,6 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
partNumberMarker := ctx.Get("X-Amz-Part-Number-Marker")
maxPartsParsed, err := utils.ParseUint(maxParts)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max parts %q: %v",
maxParts, err)
}
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxParts),
&MetaOpts{
Logger: c.logger,
@@ -395,6 +386,9 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
}
attrs, err := utils.ParseObjectAttributes(ctx)
if err != nil {
if c.debug {
log.Printf("error parsing object attributes: %v", err)
}
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
Logger: c.logger,
@@ -498,7 +492,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
checksumMode := types.ChecksumMode(ctx.Get("x-amz-checksum-mode"))
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
if c.debug {
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
log.Printf("invalid x-amz-checksum-mode header value: %v\n", checksumMode)
}
return SendResponse(ctx, s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-mode"),
&MetaOpts{
@@ -509,7 +503,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
})
}
ctx.Locals("skip-res-body-log", true)
ctx.Locals("logResBody", false)
res, err := c.be.GetObject(ctx.Context(), &s3.GetObjectInput{
Bucket: &bucket,
Key: &key,
@@ -676,17 +670,6 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
// -1 will stream response body until EOF if content length not set
contentLen := -1
if res.ContentLength != nil {
if *res.ContentLength > int64(math.MaxInt) {
debuglogger.Logf("content length %v int overflow",
*res.ContentLength)
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRange),
&MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
Action: metrics.ActionGetObject,
BucketOwner: parsedAcl.Owner,
})
}
contentLen = int(*res.ContentLength)
}
utils.StreamResponseBody(ctx, res.Body, contentLen)
@@ -941,7 +924,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
maxkeys, err := utils.ParseUint(maxkeysStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max keys %q: %v",
log.Printf("error parsing max keys %q: %v",
maxkeysStr, err)
}
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxKeys),
@@ -1074,7 +1057,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
maxUploads, err := utils.ParseUint(maxUploadsStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max uploads %q: %v",
log.Printf("error parsing max uploads %q: %v",
maxUploadsStr, err)
}
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxUploads),
@@ -1125,7 +1108,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
maxkeys, err := utils.ParseUint(maxkeysStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max keys %q: %v",
log.Printf("error parsing max keys %q: %v",
maxkeysStr, err)
}
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxKeys),
@@ -1179,7 +1162,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
maxkeys, err := utils.ParseUint(maxkeysStr)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing max keys %q: %v",
log.Printf("error parsing max keys %q: %v",
maxkeysStr, err)
}
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidMaxKeys),
@@ -1227,9 +1210,13 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
if ctx.Request().URI().QueryArgs().Has("tagging") {
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitBucket)
var bucketTagging s3response.TaggingInput
err := xml.Unmarshal(ctx.Body(), &bucketTagging)
if err != nil {
return SendResponse(ctx, err,
if c.debug {
log.Printf("error unmarshalling bucket tagging: %v", err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
&MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
@@ -1238,6 +1225,21 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
})
}
tags := make(map[string]string, len(bucketTagging.TagSet.Tags))
for _, tag := range bucketTagging.TagSet.Tags {
if len(tag.Key) > 128 || len(tag.Value) > 256 {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidTag),
&MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
Action: metrics.ActionPutBucketTagging,
BucketOwner: parsedAcl.Owner,
})
}
tags[tag.Key] = tag.Value
}
err = auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
Readonly: c.readonly,
Acl: parsedAcl,
@@ -1257,7 +1259,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
})
}
err = c.be.PutBucketTagging(ctx.Context(), bucket, tagging)
err = c.be.PutBucketTagging(ctx.Context(), bucket, tags)
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.logger,
@@ -1272,9 +1274,6 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
var ownershipControls s3response.OwnershipControls
if err := xml.Unmarshal(ctx.Body(), &ownershipControls); err != nil {
if c.debug {
debuglogger.Logf("failed to unmarshal request body: %v", err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.logger,
@@ -1284,12 +1283,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
})
}
rulesCount := len(ownershipControls.Rules)
isValidOwnership := utils.IsValidOwnership(ownershipControls.Rules[0].ObjectOwnership)
if rulesCount != 1 || !isValidOwnership {
if c.debug && rulesCount != 1 {
debuglogger.Logf("ownership control rules should be 1, got %v", rulesCount)
}
if len(ownershipControls.Rules) != 1 || !utils.IsValidOwnership(ownershipControls.Rules[0].ObjectOwnership) {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.logger,
@@ -1352,7 +1346,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
err = xml.Unmarshal(ctx.Body(), &versioningConf)
if err != nil {
if c.debug {
debuglogger.Logf("error unmarshalling versioning configuration: %v",
log.Printf("error unmarshalling versioning configuration: %v",
err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
@@ -1367,7 +1361,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
if versioningConf.Status != types.BucketVersioningStatusEnabled &&
versioningConf.Status != types.BucketVersioningStatusSuspended {
if c.debug {
debuglogger.Logf("invalid versioning configuration status: %v", versioningConf.Status)
log.Printf("invalid versioning configuration status: %v\n", versioningConf.Status)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
@@ -1524,7 +1518,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
}
if ownership == types.ObjectOwnershipBucketOwnerEnforced {
if c.debug {
debuglogger.Logf("bucket acls are disabled")
log.Println("bucket acls are disabled")
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAclNotSupported),
&MetaOpts{
@@ -1560,7 +1554,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
err := xml.Unmarshal(ctx.Body(), &accessControlPolicy)
if err != nil {
if c.debug {
debuglogger.Logf("error unmarshalling access control policy: %v", err)
log.Printf("error unmarshalling access control policy: %v", err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedACL),
&MetaOpts{
@@ -1573,7 +1567,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
err = accessControlPolicy.Validate()
if err != nil {
if c.debug {
debuglogger.Logf("invalid access control policy: %v", err)
log.Printf("invalid access control policy: %v\n", err)
}
return SendResponse(ctx, err,
&MetaOpts{
@@ -1585,7 +1579,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
if *accessControlPolicy.Owner.ID != parsedAcl.Owner {
if c.debug {
debuglogger.Logf("invalid access control policy owner id: %v, expected %v", *accessControlPolicy.Owner.ID, parsedAcl.Owner)
log.Printf("invalid access control policy owner id: %v, expected %v", *accessControlPolicy.Owner.ID, parsedAcl.Owner)
}
return SendResponse(ctx, s3err.APIError{
Code: "InvalidArgument",
@@ -1601,7 +1595,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
if grants+acl != "" {
if c.debug {
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
log.Printf("invalid request: %q (grants) %q (acl)",
grants, acl)
}
return SendResponse(ctx,
@@ -1621,7 +1615,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
} else if acl != "" {
if acl != "private" && acl != "public-read" && acl != "public-read-write" {
if c.debug {
debuglogger.Logf("invalid acl: %q", acl)
log.Printf("invalid acl: %q", acl)
}
return SendResponse(ctx,
s3err.GetAPIError(s3err.ErrInvalidRequest),
@@ -1634,7 +1628,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
}
if grants != "" {
if c.debug {
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
log.Printf("invalid request: %q (grants) %q (acl)",
grants, acl)
}
return SendResponse(ctx,
@@ -1662,7 +1656,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
}
} else {
if c.debug {
debuglogger.Logf("none of the bucket acl options has been specified: canned, req headers, req body")
log.Println("none of the bucket acl options has been specified: canned, req headers, req body")
}
return SendResponse(ctx,
s3err.GetAPIError(s3err.ErrMissingSecurityHeader),
@@ -1704,6 +1698,9 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
})
}
if ok := utils.IsValidOwnership(objectOwnership); !ok {
if c.debug {
log.Printf("invalid bucket object ownership: %v", objectOwnership)
}
return SendResponse(ctx, s3err.APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Invalid x-amz-object-ownership header: %v", objectOwnership),
@@ -1719,7 +1716,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
if acl+grants != "" && objectOwnership == types.ObjectOwnershipBucketOwnerEnforced {
if c.debug {
debuglogger.Logf("bucket acls are disabled for %v object ownership", objectOwnership)
log.Printf("bucket acls are disabled for %v object ownership", objectOwnership)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketAclWithObjectOwnership),
&MetaOpts{
@@ -1732,7 +1729,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
if acl != "" && grants != "" {
if c.debug {
debuglogger.Logf("invalid request: %q (grants) %q (acl)", grants, acl)
log.Printf("invalid request: %q (grants) %q (acl)", grants, acl)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrBothCannedAndHeaderGrants),
&MetaOpts{
@@ -1760,9 +1757,6 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
ACL: types.BucketCannedACL(acl),
}, defACL, c.iam, acct.Role == auth.RoleAdmin)
if err != nil {
if c.debug {
debuglogger.Logf("failed to update bucket acl: %v", err)
}
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.logger,
@@ -1854,9 +1848,13 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
}
if ctx.Request().URI().QueryArgs().Has("tagging") {
tagging, err := utils.ParseTagging(ctx.Body(), utils.TagLimitObject)
var objTagging s3response.TaggingInput
err := xml.Unmarshal(ctx.Body(), &objTagging)
if err != nil {
return SendResponse(ctx, err,
if c.debug {
log.Printf("error unmarshalling object tagging: %v", err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
&MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
@@ -1865,6 +1863,25 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
tags := make(map[string]string, len(objTagging.TagSet.Tags))
for _, tag := range objTagging.TagSet.Tags {
if len(tag.Key) > 128 || len(tag.Value) > 256 {
if c.debug {
log.Printf("invalid tag key/value len: %q %q",
tag.Key, tag.Value)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidTag),
&MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
Action: metrics.ActionPutObjectTagging,
BucketOwner: parsedAcl.Owner,
})
}
tags[tag.Key] = tag.Value
}
err = auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
Readonly: c.readonly,
Acl: parsedAcl,
@@ -1885,7 +1902,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
})
}
err = c.be.PutObjectTagging(ctx.Context(), bucket, keyStart, tagging)
err = c.be.PutObjectTagging(ctx.Context(), bucket, keyStart, tags)
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.logger,
@@ -1932,9 +1949,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
retention, err := auth.ParseObjectLockRetentionInput(ctx.Body())
if err != nil {
if c.debug {
debuglogger.Logf("failed to parse object lock configuration input: %v", err)
}
return SendResponse(ctx, err, &MetaOpts{
Logger: c.logger,
MetricsMng: c.mm,
@@ -1955,9 +1969,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
if ctx.Request().URI().QueryArgs().Has("legal-hold") {
var legalHold types.ObjectLockLegalHold
if err := xml.Unmarshal(ctx.Body(), &legalHold); err != nil {
if c.debug {
debuglogger.Logf("failed to parse request body: %v", err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.logger,
@@ -1968,9 +1979,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
}
if legalHold.Status != types.ObjectLockLegalHoldStatusOff && legalHold.Status != types.ObjectLockLegalHoldStatusOn {
if c.debug {
debuglogger.Logf("invalid legal hold status: %v", legalHold.Status)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.logger,
@@ -2016,7 +2024,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
copySource, err := url.QueryUnescape(copySource)
if err != nil {
if c.debug {
debuglogger.Logf("error unescaping copy source %q: %v",
log.Printf("error unescaping copy source %q: %v",
cs, err)
}
return SendXMLResponse(ctx, nil,
@@ -2032,7 +2040,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
partNumber := int32(ctx.QueryInt("partNumber", -1))
if partNumber < 1 || partNumber > 10000 {
if c.debug {
debuglogger.Logf("invalid part number: %d", partNumber)
log.Printf("invalid part number: %d", partNumber)
}
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrInvalidPartNumber),
@@ -2096,7 +2104,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
partNumber := int32(ctx.QueryInt("partNumber", -1))
if partNumber < 1 || partNumber > 10000 {
if c.debug {
debuglogger.Logf("invalid part number: %d", partNumber)
log.Printf("invalid part number: %d", partNumber)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumber),
&MetaOpts{
@@ -2131,7 +2139,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing content length %q: %v",
log.Printf("error parsing content length %q: %v",
contentLengthStr, err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
@@ -2145,9 +2153,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
algorithm, checksums, err := utils.ParseChecksumHeaders(ctx)
if err != nil {
if c.debug {
debuglogger.Logf("err parsing checksum headers: %v", err)
}
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.logger,
@@ -2165,6 +2170,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
body = bytes.NewReader([]byte{})
}
ctx.Locals("logReqBody", false)
res, err := c.be.UploadPart(ctx.Context(),
&s3.UploadPartInput{
Bucket: &bucket,
@@ -2234,7 +2240,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
if len(ctx.Body()) > 0 {
if grants+acl != "" {
if c.debug {
debuglogger.Logf("invalid request: %q (grants) %q (acl)",
log.Printf("invalid request: %q (grants) %q (acl)",
grants, acl)
}
return SendResponse(ctx,
@@ -2251,7 +2257,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
err := xml.Unmarshal(ctx.Body(), &accessControlPolicy)
if err != nil {
if c.debug {
debuglogger.Logf("error unmarshalling access control policy: %v",
log.Printf("error unmarshalling access control policy: %v",
err)
}
return SendResponse(ctx,
@@ -2290,7 +2296,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
if acl != "" {
if acl != "private" && acl != "public-read" && acl != "public-read-write" {
if c.debug {
debuglogger.Logf("invalid acl: %q", acl)
log.Printf("invalid acl: %q", acl)
}
return SendResponse(ctx,
s3err.GetAPIError(s3err.ErrInvalidRequest),
@@ -2303,7 +2309,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
}
if len(ctx.Body()) > 0 || grants != "" {
if c.debug {
debuglogger.Logf("invalid request: %q (grants) %q (acl) %v (body len)",
log.Printf("invalid request: %q (grants) %q (acl) %v (body len)",
grants, acl, len(ctx.Body()))
}
return SendResponse(ctx,
@@ -2358,7 +2364,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
copySource, err := url.QueryUnescape(copySource)
if err != nil {
if c.debug {
debuglogger.Logf("error unescaping copy source %q: %v",
log.Printf("error unescaping copy source %q: %v",
cs, err)
}
return SendXMLResponse(ctx, nil,
@@ -2397,7 +2403,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
tm, err := time.Parse(iso8601Format, copySrcModifSince)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing copy source modified since %q: %v",
log.Printf("error parsing copy source modified since %q: %v",
copySrcModifSince, err)
}
return SendXMLResponse(ctx, nil,
@@ -2415,7 +2421,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
tm, err := time.Parse(iso8601Format, copySrcUnmodifSince)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing copy source unmodified since %q: %v",
log.Printf("error parsing copy source unmodified since %q: %v",
copySrcUnmodifSince, err)
}
return SendXMLResponse(ctx, nil,
@@ -2433,9 +2439,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
metadata := utils.GetUserMetaData(&ctx.Request().Header)
if directive != "" && directive != "COPY" && directive != "REPLACE" {
if c.debug {
debuglogger.Logf("invalid metadata directive: %v", directive)
}
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrInvalidMetadataDirective),
&MetaOpts{
@@ -2453,9 +2456,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
tDirective := types.TaggingDirective(ctx.Get("X-Amz-Tagging-Directive"))
if tDirective != "" && tDirective != types.TaggingDirectiveCopy && tDirective != types.TaggingDirectiveReplace {
if c.debug {
debuglogger.Logf("invalid tagging direcrive: %v", tDirective)
}
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrInvalidTaggingDirective),
&MetaOpts{
@@ -2474,6 +2474,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
checksumAlgorithm := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
err = utils.IsChecksumAlgorithmValid(checksumAlgorithm)
if err != nil {
if c.debug {
log.Printf("invalid checksum algorithm: %v", checksumAlgorithm)
}
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
Logger: c.logger,
@@ -2595,7 +2598,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
if c.debug {
debuglogger.Logf("error parsing content length %q: %v",
log.Printf("error parsing content length %q: %v",
contentLengthStr, err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
@@ -2637,6 +2640,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
body = bytes.NewReader([]byte{})
}
ctx.Locals("logReqBody", false)
res, err := c.be.PutObject(ctx.Context(),
s3response.PutObjectInput{
Bucket: &bucket,
@@ -2912,7 +2916,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
err := xml.Unmarshal(ctx.Body(), &dObj)
if err != nil {
if c.debug {
debuglogger.Logf("error unmarshalling delete objects: %v", err)
log.Printf("error unmarshalling delete objects: %v", err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest),
&MetaOpts{
@@ -3219,7 +3223,6 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
partNumberQuery := int32(ctx.QueryInt("partNumber", -1))
versionId := ctx.Query("versionId")
objRange := ctx.Get("Range")
key := ctx.Params("key")
keyEnd := ctx.Params("*1")
if keyEnd != "" {
@@ -3234,7 +3237,7 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
if ctx.Request().URI().QueryArgs().Has("partNumber") {
if partNumberQuery < 1 || partNumberQuery > 10000 {
if c.debug {
debuglogger.Logf("invalid part number: %d", partNumberQuery)
log.Printf("invalid part number: %d", partNumberQuery)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumber),
&MetaOpts{
@@ -3272,7 +3275,7 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
checksumMode := types.ChecksumMode(ctx.Get("x-amz-checksum-mode"))
if checksumMode != "" && checksumMode != types.ChecksumModeEnabled {
if c.debug {
debuglogger.Logf("invalid x-amz-checksum-mode header value: %v", checksumMode)
log.Printf("invalid x-amz-checksum-mode header value: %v\n", checksumMode)
}
return SendResponse(ctx, s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-mode"),
&MetaOpts{
@@ -3290,7 +3293,6 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
PartNumber: partNumber,
VersionId: &versionId,
ChecksumMode: checksumMode,
Range: &objRange,
})
if err != nil {
if res != nil {
@@ -3329,18 +3331,6 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
Value: getstring(res.Restore),
},
}
if getstring(res.AcceptRanges) != "" {
headers = append(headers, utils.CustomHeader{
Key: "accept-ranges",
Value: getstring(res.AcceptRanges),
})
}
if getstring(res.ContentRange) != "" {
headers = append(headers, utils.CustomHeader{
Key: "Content-Range",
Value: getstring(res.ContentRange),
})
}
if getstring(res.ContentDisposition) != "" {
headers = append(headers, utils.CustomHeader{
Key: "Content-Disposition",
@@ -3498,9 +3488,6 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
var restoreRequest types.RestoreRequest
if err := xml.Unmarshal(ctx.Body(), &restoreRequest); err != nil {
if !errors.Is(err, io.EOF) {
if c.debug {
debuglogger.Logf("failed to parse the request body: %v", err)
}
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.logger,
@@ -3553,7 +3540,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
err := xml.Unmarshal(ctx.Body(), &payload)
if err != nil {
if c.debug {
debuglogger.Logf("error unmarshalling select object content: %v", err)
log.Printf("error unmarshalling select object content: %v", err)
}
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrMalformedXML),
@@ -3611,7 +3598,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
err := xml.Unmarshal(ctx.Body(), &data)
if err != nil {
if c.debug {
debuglogger.Logf("error unmarshalling complete multipart upload: %v", err)
log.Printf("error unmarshalling complete multipart upload: %v", err)
}
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrMalformedXML),
@@ -3625,7 +3612,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
if len(data.Parts) == 0 {
if c.debug {
debuglogger.Logf("empty parts provided for complete multipart upload")
log.Println("empty parts provided for complete multipart upload")
}
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrEmptyParts),
@@ -3643,9 +3630,6 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
val, err := strconv.ParseInt(mpuObjSizeHdr, 10, 64)
//TODO: Not sure if invalid request should be returned
if err != nil {
if c.debug {
debuglogger.Logf("invalid value for 'x-amz-mp-objects-size' header: %v", err)
}
return SendXMLResponse(ctx, nil,
s3err.GetAPIError(s3err.ErrInvalidRequest),
&MetaOpts{
@@ -3657,7 +3641,6 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
}
if val < 0 {
debuglogger.Logf("value for 'x-amz-mp-objects-size' header is less than 0: %v", val)
return SendXMLResponse(ctx, nil,
s3err.GetInvalidMpObjectSizeErr(val),
&MetaOpts{
@@ -3694,6 +3677,9 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
_, checksums, err := utils.ParseChecksumHeaders(ctx)
if err != nil {
if c.debug {
log.Printf("err parsing checksum headers: %v", err)
}
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
Logger: c.logger,
@@ -3706,6 +3692,9 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
checksumType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
err = utils.IsChecksumTypeValid(checksumType)
if err != nil {
if c.debug {
log.Printf("invalid checksum type: %v", err)
}
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
Logger: c.logger,
@@ -3715,7 +3704,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
})
}
res, versid, err := c.be.CompleteMultipartUpload(ctx.Context(),
res, err := c.be.CompleteMultipartUpload(ctx.Context(),
&s3.CompleteMultipartUploadInput{
Bucket: &bucket,
Key: &key,
@@ -3732,11 +3721,11 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
ChecksumType: checksumType,
})
if err == nil {
if versid != "" {
if getstring(res.VersionId) != "" {
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
{
Key: "x-amz-version-id",
Value: versid,
Value: getstring(res.VersionId),
},
})
}
@@ -3749,7 +3738,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
BucketOwner: parsedAcl.Owner,
ObjectETag: res.ETag,
EventName: s3event.EventCompleteMultipartUpload,
VersionId: backend.GetPtrFromString(versid),
VersionId: res.VersionId,
})
}
return SendXMLResponse(ctx, res, err,
@@ -3797,6 +3786,9 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
checksumAlgorithm, checksumType, err := utils.ParseCreateMpChecksumHeaders(ctx)
if err != nil {
if c.debug {
log.Printf("err parsing checksum headers: %v", err)
}
return SendXMLResponse(ctx, nil, err,
&MetaOpts{
Logger: c.logger,
@@ -3882,7 +3874,7 @@ func SendResponse(ctx *fiber.Ctx, err error, l *MetaOpts) error {
return ctx.Send(s3err.GetAPIErrorResponse(apierr, "", "", ""))
}
debuglogger.Logf("Internal Error, %v", err)
log.Printf("Internal Error, %v", err)
ctx.Status(http.StatusInternalServerError)
return ctx.Send(s3err.GetAPIErrorResponse(
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
@@ -3897,6 +3889,8 @@ func SendResponse(ctx *fiber.Ctx, err error, l *MetaOpts) error {
})
}
utils.LogCtxDetails(ctx, []byte{})
if l.Status == 0 {
l.Status = http.StatusOK
}
@@ -3936,7 +3930,7 @@ func SendXMLResponse(ctx *fiber.Ctx, resp any, err error, l *MetaOpts) error {
return ctx.Send(s3err.GetAPIErrorResponse(serr, "", "", ""))
}
debuglogger.Logf("Internal Error, %v", err)
log.Printf("Internal Error, %v", err)
ctx.Status(http.StatusInternalServerError)
return ctx.Send(s3err.GetAPIErrorResponse(
@@ -3962,6 +3956,7 @@ func SendXMLResponse(ctx *fiber.Ctx, resp any, err error, l *MetaOpts) error {
}
}
utils.LogCtxDetails(ctx, b)
if l.Logger != nil {
l.Logger.Log(ctx, nil, b, s3log.LogMeta{
Action: l.Action,
@@ -3990,7 +3985,7 @@ func SendXMLResponse(ctx *fiber.Ctx, resp any, err error, l *MetaOpts) error {
msglen := len(xmlhdr) + len(b)
if msglen > maxXMLBodyLen {
debuglogger.Logf("XML encoded body len %v exceeds max len %v",
log.Printf("XML encoded body len %v exceeds max len %v",
msglen, maxXMLBodyLen)
ctx.Status(http.StatusInternalServerError)

View File

@@ -974,9 +974,9 @@ func TestS3ApiController_PutActions(t *testing.T) {
PutObjectAclFunc: func(context.Context, *s3.PutObjectAclInput) error {
return nil
},
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{},
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{},
}, nil
},
PutObjectFunc: func(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
@@ -1765,8 +1765,8 @@ func TestS3ApiController_CreateActions(t *testing.T) {
RestoreObjectFunc: func(context.Context, *s3.RestoreObjectInput) error {
return nil
},
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
return s3response.CompleteMultipartUploadResult{}, "", nil
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return &s3.CompleteMultipartUploadOutput{}, nil
},
CreateMultipartUploadFunc: func(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, nil

View File

@@ -1,226 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package debuglogger
import (
"fmt"
"log"
"net/http"
"strings"
"sync/atomic"
"github.com/gofiber/fiber/v2"
)
type Color string
const (
green Color = "\033[32m"
yellow Color = "\033[33m"
blue Color = "\033[34m"
Purple Color = "\033[0;35m"
reset = "\033[0m"
borderChar = "─"
boxWidth = 120
)
// Logs http request details: headers, body, params, query args
func LogFiberRequestDetails(ctx *fiber.Ctx) {
// Log the full request url
fullURL := ctx.Protocol() + "://" + ctx.Hostname() + ctx.OriginalURL()
fmt.Printf("%s[URL]: %s%s\n", green, fullURL, reset)
// log request headers
wrapInBox(green, "REQUEST HEADERS", boxWidth, func() {
ctx.Request().Header.VisitAll(func(key, value []byte) {
printWrappedLine(yellow, string(key), string(value))
})
})
// skip request body log for PutObject and UploadPart
skipBodyLog := isLargeDataAction(ctx)
if !skipBodyLog {
body := ctx.Request().Body()
if len(body) != 0 {
printBoxTitleLine(blue, "REQUEST BODY", boxWidth, false)
fmt.Printf("%s%s%s\n", blue, body, reset)
printHorizontalBorder(blue, boxWidth, false)
}
}
if ctx.Request().URI().QueryArgs().Len() != 0 {
ctx.Request().URI().QueryArgs().VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
}
}
// Logs http response details: body, headers
func LogFiberResponseDetails(ctx *fiber.Ctx) {
wrapInBox(green, "RESPONSE HEADERS", boxWidth, func() {
ctx.Response().Header.VisitAll(func(key, value []byte) {
printWrappedLine(yellow, string(key), string(value))
})
})
_, ok := ctx.Locals("skip-res-body-log").(bool)
if !ok {
body := ctx.Response().Body()
if len(body) != 0 {
PrintInsideHorizontalBorders(blue, "RESPONSE BODY", string(body), boxWidth)
}
}
}
var debugEnabled atomic.Bool
// SetDebugEnabled sets the debug mode
func SetDebugEnabled() {
debugEnabled.Store(true)
}
// Logf is the same as 'fmt.Printf' with debug prefix,
// a color added and '\n' at the end
func Logf(format string, v ...any) {
if !debugEnabled.Load() {
return
}
debugPrefix := "[DEBUG]: "
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
}
// Infof prints out green info block with [INFO]: prefix
func Infof(format string, v ...any) {
if !debugEnabled.Load() {
return
}
debugPrefix := "[INFO]: "
fmt.Printf(string(green)+debugPrefix+format+reset+"\n", v...)
}
// PrintInsideHorizontalBorders prints the text inside horizontal
// border and title in the center of upper border
func PrintInsideHorizontalBorders(color Color, title, text string, width int) {
if !debugEnabled.Load() {
return
}
printBoxTitleLine(color, title, width, false)
fmt.Printf("%s%s%s\n", color, text, reset)
printHorizontalBorder(color, width, false)
}
// Prints out box title either with closing characters or not: "┌", "┐"
// e.g ┌────────────────[ RESPONSE HEADERS ]────────────────┐
func printBoxTitleLine(color Color, title string, length int, closing bool) {
leftCorner, rightCorner := "┌", "┐"
if !closing {
leftCorner, rightCorner = borderChar, borderChar
}
// Calculate how many border characters are needed
titleFormatted := fmt.Sprintf("[ %s ]", title)
borderSpace := length - len(titleFormatted) - 2 // 2 for corners
leftLen := borderSpace / 2
rightLen := borderSpace - leftLen
// Build the line
line := leftCorner +
strings.Repeat(borderChar, leftLen) +
titleFormatted +
strings.Repeat(borderChar, rightLen) +
rightCorner
fmt.Println(string(color) + line + reset)
}
// Prints out a horizontal line either with closing characters or not: "└", "┘"
func printHorizontalBorder(color Color, length int, closing bool) {
leftCorner, rightCorner := "└", "┘"
if !closing {
leftCorner, rightCorner = borderChar, borderChar
}
line := leftCorner + strings.Repeat(borderChar, length-2) + rightCorner + reset
fmt.Println(string(color) + line)
}
// wrapInBox wraps the output of a function call (fn) inside a styled box with a title.
func wrapInBox(color Color, title string, length int, fn func()) {
printBoxTitleLine(color, title, length, true)
fn()
printHorizontalBorder(color, length, true)
}
// returns the provided string length
// defaulting to 13 for exceeding lengths
func getLen(str string) int {
if len(str) < 13 {
return 13
}
return len(str)
}
// prints a formatted key-value pair within a box layout,
// wrapping the value text if it exceeds the allowed width.
func printWrappedLine(keyColor Color, key, value string) {
prefix := fmt.Sprintf("%s│%s %s%-13s%s : ", green, reset, keyColor, key, reset)
prefixLen := len(prefix) - len(green) - len(reset) - len(keyColor) - len(reset)
// the actual prefix size without colors
actualPrefixLen := getLen(key) + 5
lineWidth := boxWidth - prefixLen
valueLines := wrapText(value, lineWidth)
for i, line := range valueLines {
if i == 0 {
if len(line) < lineWidth {
line += strings.Repeat(" ", lineWidth-len(line))
}
fmt.Printf("%s%s%s %s│%s\n", prefix, reset, line, green, reset)
} else {
line = strings.Repeat(" ", actualPrefixLen-2) + line
if len(line) < boxWidth-4 {
line += strings.Repeat(" ", boxWidth-len(line)-4)
}
fmt.Printf("%s│ %s%s %s│%s\n", green, reset, line, green, reset)
}
}
}
// wrapText splits the input text into lines of at most `width` characters each.
func wrapText(text string, width int) []string {
var lines []string
for len(text) > width {
lines = append(lines, text[:width])
text = text[width:]
}
if text != "" {
lines = append(lines, text)
}
return lines
}
// TODO: remove this and use utils.IsBidDataAction after refactoring
// and creating 'internal' package
func isLargeDataAction(ctx *fiber.Ctx) bool {
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
return true
}
}
return false
}

View File

@@ -33,8 +33,7 @@ import (
)
const (
iso8601Format = "20060102T150405Z"
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
iso8601Format = "20060102T150405Z"
)
type RootUserConfig struct {
@@ -106,16 +105,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
return sendResponse(ctx, err, logger, mm)
}
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if utils.IsBigDataAction(ctx) {
// for streaming PUT actions, authorization is deferred
@@ -137,18 +126,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
return ctx.Next()
}
// Content-Length has to be set for data uploads: PutObject, UploadPart
if contentLengthStr == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
}
// the upload limit for big data actions: PutObject, UploadPart
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
if contentLength > maxObjSizeLimit {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
}
return ctx.Next()
@@ -165,6 +142,15 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}
}
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength, debug)
if err != nil {
return sendResponse(ctx, err, logger, mm)

View File

@@ -1,40 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"fmt"
"strings"
"github.com/gofiber/fiber/v2"
)
// HostStyleParser is a middleware which parses the bucket name
// from the 'Host' header and appends in the request URL path
func HostStyleParser(virtualDomain string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
host := string(ctx.Request().Host())
// the host should match this pattern: '<bucket_name>.<virtual_domain>'
bucket, _, found := strings.Cut(host, "."+virtualDomain)
if !found || bucket == "" {
return ctx.Next()
}
path := ctx.Path()
pathStyleUrl := fmt.Sprintf("/%v%v", bucket, path)
ctx.Path(pathStyleUrl)
return ctx.Next()
}
}

View File

@@ -15,15 +15,30 @@
package middlewares
import (
"fmt"
"log"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/debuglogger"
)
func DebugLogger() fiber.Handler {
func RequestLogger(isDebug bool) fiber.Handler {
return func(ctx *fiber.Ctx) error {
debuglogger.LogFiberRequestDetails(ctx)
err := ctx.Next()
debuglogger.LogFiberResponseDetails(ctx)
return err
ctx.Locals("isDebug", isDebug)
if isDebug {
log.Println("Request headers: ")
ctx.Request().Header.VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
if ctx.Request().URI().QueryArgs().Len() != 0 {
fmt.Println()
log.Println("Request query arguments: ")
ctx.Request().URI().QueryArgs().VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
}
}
return ctx.Next()
}
}

View File

@@ -16,7 +16,6 @@ package middlewares
import (
"io"
"strconv"
"time"
"github.com/gofiber/fiber/v2"
@@ -55,26 +54,7 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
}
ctx.Locals("account", account)
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
if utils.IsBigDataAction(ctx) {
// Content-Length has to be set for data uploads: PutObject, UploadPart
if contentLengthStr == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
}
// the upload limit for big data actions: PutObject, UploadPart
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
if contentLength > maxObjSizeLimit {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
}
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
return utils.NewPresignedAuthReader(ctx, r, authData, account.Secret, debug)
})

View File

@@ -26,7 +26,7 @@ import (
func DecodeURL(logger s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
unescp, err := url.PathUnescape(string(ctx.Request().URI().PathOriginal()))
unescp, err := url.QueryUnescape(string(ctx.Request().URI().PathOriginal()))
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger, MetricsMng: mm})
}

View File

@@ -42,7 +42,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
app.Patch("/delete-user", middlewares.IsAdmin(logger), adminController.DeleteUser)
// UpdateUser admin api
app.Patch("/update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
app.Patch("update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
// ListUsers admin api
app.Patch("/list-users", middlewares.IsAdmin(logger), adminController.ListUsers)

View File

@@ -29,16 +29,15 @@ import (
)
type S3ApiServer struct {
app *fiber.App
backend backend.Backend
router *S3ApiRouter
port string
cert *tls.Certificate
quiet bool
debug bool
readonly bool
health string
virtualDomain string
app *fiber.App
backend backend.Backend
router *S3ApiRouter
port string
cert *tls.Certificate
quiet bool
debug bool
readonly bool
health string
}
func New(
@@ -77,16 +76,7 @@ func New(
})
}
app.Use(middlewares.DecodeURL(l, mm))
// initialize host-style parser in virtual domain is specified
if server.virtualDomain != "" {
app.Use(middlewares.HostStyleParser(server.virtualDomain))
}
// initialize the debug logger in debug mode
if server.debug {
app.Use(middlewares.DebugLogger())
}
app.Use(middlewares.RequestLogger(server.debug))
// Authentication middlewares
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, mm, region, server.debug))
@@ -131,11 +121,6 @@ func WithReadOnly() Option {
return func(s *S3ApiServer) { s.readonly = true }
}
// WithHostStyle enabled host-style bucket addressing on the server
func WithHostStyle(virtualDomain string) Option {
return func(s *S3ApiServer) { s.virtualDomain = virtualDomain }
}
func (sa *S3ApiServer) Serve() (err error) {
if sa.cert != nil {
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)

View File

@@ -18,19 +18,13 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
)
const (
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
)
type payloadType string
const (
@@ -102,33 +96,18 @@ func IsStreamingPayload(str string) bool {
}
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
decContLengthStr := ctx.Get("X-Amz-Decoded-Content-Length")
if decContLengthStr == "" {
debuglogger.Logf("missing required header 'X-Amz-Decoded-Content-Length'")
return nil, s3err.GetAPIError(s3err.ErrMissingContentLength)
decContLength := ctx.Get("X-Amz-Decoded-Content-Length")
if decContLength == "" {
return nil, s3err.GetAPIError(s3err.ErrMissingDecodedContentLength)
}
decContLength, err := strconv.ParseInt(decContLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
debuglogger.Logf("invalid value for 'X-Amz-Decoded-Content-Length': %v", decContLengthStr)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if decContLength > maxObjSizeLimit {
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, maxObjSizeLimit)
return nil, s3err.GetAPIError(s3err.ErrEntityTooLarge)
}
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
if !contentSha256.isValid() {
//TODO: Add proper APIError
debuglogger.Logf("invalid value for 'X-Amz-Content-Sha256': %v", contentSha256)
return nil, fmt.Errorf("invalid x-amz-content-sha256: %v", string(contentSha256))
}
checksumType := checksumType(strings.ToLower(ctx.Get("X-Amz-Trailer")))
if contentSha256 != payloadTypeStreamingSigned && !checksumType.isValid() {
debuglogger.Logf("invalid value for 'X-Amz-Trailer': %v", checksumType)
return nil, s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
}
@@ -143,7 +122,6 @@ func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secr
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER
default:
debuglogger.Logf("unsupported chunk reader algorithm: %v", contentSha256)
return nil, getPayloadTypeNotSupportedErr(contentSha256)
}
}

55
s3api/utils/logger.go Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"fmt"
"log"
"github.com/gofiber/fiber/v2"
)
func LogCtxDetails(ctx *fiber.Ctx, respBody []byte) {
isDebug, ok := ctx.Locals("isDebug").(bool)
_, notLogReqBody := ctx.Locals("logReqBody").(bool)
_, notLogResBody := ctx.Locals("logResBody").(bool)
if isDebug && ok {
// Log request body
if !notLogReqBody {
fmt.Println()
log.Printf("Request Body: %s", ctx.Request().Body())
}
// Log path parameters
fmt.Println()
log.Println("Path parameters: ")
for key, val := range ctx.AllParams() {
log.Printf("%s: %s", key, val)
}
// Log response headers
fmt.Println()
log.Println("Response Headers: ")
ctx.Response().Header.VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
// Log response body
if !notLogResBody && len(respBody) > 0 {
fmt.Println()
log.Printf("Response body %s", ctx.Response().Body())
}
}
}

View File

@@ -31,7 +31,6 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
)
@@ -90,17 +89,11 @@ func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string,
if chType != "" {
checksumHasher, err := getHasher(chType)
if err != nil {
debuglogger.Logf("failed to initialize hash calculator: %v", err)
return nil, err
}
chRdr.checksumHash = checksumHasher
}
if chType == "" {
debuglogger.Infof("initializing signed chunk reader")
} else {
debuglogger.Infof("initializing signed chunk reader with '%v' trailing checksum", chType)
}
return chRdr, nil
}
@@ -157,13 +150,11 @@ func (cr *ChunkReader) getStringToSignPrefix(algo string) string {
func (cr *ChunkReader) getChunkStringToSign() string {
prefix := cr.getStringToSignPrefix(streamPayloadAlgo)
chunkHash := cr.chunkHash.Sum(nil)
strToSign := fmt.Sprintf("%s\n%s\n%s\n%s",
return fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
cr.prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "STRING TO SIGN", strToSign, 64)
return strToSign
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
@@ -175,15 +166,11 @@ func (cr *ChunkReader) getTrailerChunkStringToSign() string {
prefix := cr.getStringToSignPrefix(streamPayloadTrailerAlgo)
strToSign := fmt.Sprintf("%s\n%s\n%s",
return fmt.Sprintf("%s\n%s\n%s",
prefix,
cr.prevSig,
sig,
)
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "TRAILER STRING TO SIGN", strToSign, 64)
return strToSign
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
@@ -193,7 +180,6 @@ func (cr *ChunkReader) verifyTrailerSignature() error {
sig := hex.EncodeToString(hmac256(cr.signingKey, []byte(strToSign)))
if sig != cr.trailerSig {
debuglogger.Logf("incorrect trailing signature: (calculated): %v, (got): %v", sig, cr.trailerSig)
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
@@ -206,7 +192,6 @@ func (cr *ChunkReader) verifyChecksum() error {
checksum := base64.StdEncoding.EncodeToString(checksumHash)
if checksum != cr.parsedChecksum {
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-")))
debuglogger.Logf("incorrect trailing checksum: (calculated): %v, (got): %v", checksum, cr.parsedChecksum)
return s3err.GetChecksumBadDigestErr(algo)
}
@@ -220,7 +205,6 @@ func (cr *ChunkReader) checkSignature() error {
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.prevSig != cr.parsedSig {
debuglogger.Logf("incorrect signature: (calculated): %v, (got) %v", cr.prevSig, cr.parsedSig)
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
cr.parsedSig = ""
@@ -246,20 +230,18 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
}
}
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n], &n)
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
debuglogger.Logf("failed to parse chunk headers: %v", err)
return 0, err
}
cr.parsedSig = sig
// If we hit the final chunk, calculate and validate the final
// chunk signature and finish reading
if chunkSize == 0 {
debuglogger.Infof("final chunk parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
cr.chunkHash.Reset()
err := cr.checkSignature()
if err != nil {
@@ -267,7 +249,6 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
}
if cr.trailer != "" {
debuglogger.Infof("final chunk trailers parsed:\nchecksum: %v\ntrailing signature: %v", cr.parsedChecksum, cr.trailerSig)
err := cr.verifyChecksum()
if err != nil {
return 0, err
@@ -280,7 +261,6 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
return 0, io.EOF
}
debuglogger.Infof("chunk headers parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
// move data up to remove chunk header
copy(p, p[bufOffset:n])
@@ -296,7 +276,6 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
debuglogger.Logf("exceeding the limit of maximum integer allowed: (value): %v, (limit): %v", chunkSize+int64(n), math.MaxInt)
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
@@ -319,7 +298,6 @@ func getSigningKey(secret, region string, date time.Time) []byte {
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
debuglogger.Infof("signing key: %s", hex.EncodeToString(signingKey))
return signingKey
}
@@ -341,14 +319,12 @@ const (
// This returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte, l *int) (int64, string, int, error) {
stashLen := len(cr.stash)
if stashLen > maxHeaderSize {
debuglogger.Logf("the stash length exceeds the maximum allowed chunk header size: (stash len): %v, (header limit): %v", stashLen, maxHeaderSize)
return 0, "", 0, errInvalidChunkFormat
}
if cr.stash != nil {
debuglogger.Logf("recovering the stash: (stash len): %v", stashLen)
tmp := make([]byte, stashLen+len(header))
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
@@ -360,35 +336,33 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
// After the first chunk each chunk header should start
// with "\n\r\n"
if !cr.isFirstHeader {
if !cr.isFirstHeader && stashLen == 0 {
err := readAndSkip(rdr, '\r', '\n')
if err != nil {
debuglogger.Logf("failed to read chunk header first 2 bytes: (should be): \\r\\n, (got): %q", header[:2])
return cr.handleRdrErr(err, header)
}
copy(header, header[2:])
*l = *l - 2
}
// read and parse the chunk size
chunkSizeStr, err := readAndTrim(rdr, ';')
if err != nil {
debuglogger.Logf("failed to read chunk size: %v", err)
return cr.handleRdrErr(err, header)
}
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
if err != nil {
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
return 0, "", 0, errInvalidChunkFormat
}
// read the chunk signature
err = readAndSkip(rdr, 'c', 'h', 'u', 'n', 'k', '-', 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', '=')
if err != nil {
debuglogger.Logf("failed to read 'chunk-signature=': %v", err)
return cr.handleRdrErr(err, header)
}
sig, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read '\\r', after chunk signature: %v", err)
return cr.handleRdrErr(err, header)
}
@@ -397,17 +371,14 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
if cr.trailer != "" {
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n before the trailer: %v", err)
return cr.handleRdrErr(err, header)
}
// parse and validate the trailing header
trailer, err := readAndTrim(rdr, ':')
if err != nil {
debuglogger.Logf("failed to read trailer prefix: %v", err)
return cr.handleRdrErr(err, header)
}
if trailer != string(cr.trailer) {
debuglogger.Logf("incorrect trailer prefix: (expected): %v, (got): %v", cr.trailer, trailer)
return 0, "", 0, errInvalidChunkFormat
}
@@ -416,36 +387,30 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
// parse the checksum
checksum, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read checksum value: %v", err)
return cr.handleRdrErr(err, header)
}
if !IsValidChecksum(checksum, algo) {
debuglogger.Logf("invalid checksum value: %v", checksum)
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
}
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n after checksum: %v", err)
return cr.handleRdrErr(err, header)
}
// parse the trailing signature
trailerSigPrefix, err := readAndTrim(rdr, ':')
if err != nil {
debuglogger.Logf("failed to read trailing signature prefix: %v", err)
return cr.handleRdrErr(err, header)
}
if trailerSigPrefix != trailerSignatureHeader {
debuglogger.Logf("invalid trailing signature prefix: (expected): %v, (got): %v", trailerSignatureHeader, trailerSigPrefix)
return 0, "", 0, errInvalidChunkFormat
}
trailerSig, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read trailing signature: %v", err)
return cr.handleRdrErr(err, header)
}
@@ -456,7 +421,6 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
// "\r\n\r\n" is followed after the last chunk
err = readAndSkip(rdr, '\n', '\r', '\n')
if err != nil {
debuglogger.Logf("failed to read \\n\\r\\n at the end of chunk header: %v", err)
return cr.handleRdrErr(err, header)
}
@@ -465,30 +429,19 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n at the end of chunk header: %v", err)
return cr.handleRdrErr(err, header)
}
// find the index of chunk ending: '\r\n'
// skip the first 2 bytes as it is the starting '\r\n'
// the first chunk doesn't contain the starting '\r\n', but
// anyway, trimming the first 2 bytes doesn't pollute the logic.
ind := bytes.Index(header[2:], []byte{'\r', '\n'})
ind := bytes.Index(header, []byte{'\r', '\n'})
cr.isFirstHeader = false
// the offset is the found index + 4 - the stash length
// where:
// ind is the index of '\r\n'
// 4 specifies the trimmed 2 bytes plus 2 to shift the index at the end of '\r\n'
offset := ind + 4 - stashLen
return chunkSize, sig, offset, nil
return chunkSize, sig, ind + len(chunkHdrDelim) - stashLen, nil
}
// Stashes the header in cr.stash and returns "errskipHeader"
func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, error) {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
debuglogger.Logf("stashing the header: (header length): %v", len(header))
return 0, "", 0, errskipHeader
}
@@ -498,7 +451,6 @@ func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, er
func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, int, error) {
if err == io.EOF {
if cr.isEOF {
debuglogger.Logf("incomplete chunk encoding, EOF reached")
return 0, "", 0, errInvalidChunkFormat
}
return cr.stashAndSkipHeader(header)

View File

@@ -29,8 +29,6 @@ import (
"math/bits"
"strconv"
"strings"
"github.com/versity/versitygw/s3api/debuglogger"
)
var (
@@ -44,28 +42,27 @@ type UnsignedChunkReader struct {
expectedChecksum string
hasher hash.Hash
stash []byte
chunkCounter int
offset int
}
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
hasher, err := getHasher(ct)
if err != nil {
debuglogger.Logf("failed to initialize hash calculator: %v", err)
return nil, err
}
debuglogger.Infof("initializing unsigned chunk reader")
return &UnsignedChunkReader{
reader: bufio.NewReader(r),
checksumType: ct,
stash: make([]byte, 0),
hasher: hasher,
chunkCounter: 1,
}, nil
}
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// First read any stashed data
if len(ucr.stash) != 0 {
debuglogger.Infof("recovering the stash: (stash length): %v", len(ucr.stash))
n := copy(p, ucr.stash)
ucr.offset += n
@@ -92,24 +89,22 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// Read and cache the payload
_, err = io.ReadFull(rdr, payload)
if err != nil {
debuglogger.Logf("failed to read chunk data: %v", err)
return 0, err
}
// Skip the trailing "\r\n"
if err := ucr.readAndSkip('\r', '\n'); err != nil {
debuglogger.Logf("failed to read trailing \\r\\n after chunk data: %v", err)
return 0, err
}
// Copy the payload into the io.Reader buffer
n := copy(p[ucr.offset:], payload)
ucr.offset += n
ucr.chunkCounter++
if int64(n) < chunkSize {
// stash the remaining data
ucr.stash = payload[n:]
debuglogger.Infof("stashing the remaining data: (stash length): %v", len(ucr.stash))
dataRead := ucr.offset
ucr.offset = 0
return dataRead, nil
@@ -118,7 +113,6 @@ func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// Read and validate trailers
if err := ucr.readTrailer(); err != nil {
debuglogger.Logf("failed to read trailer: %v", err)
return 0, err
}
@@ -148,19 +142,15 @@ func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
line, err := ucr.reader.ReadString('\n')
if err != nil {
debuglogger.Logf("failed to parse chunk size: %v", err)
return 0, errMalformedEncoding
}
line = strings.TrimSpace(line)
chunkSize, err := strconv.ParseInt(line, 16, 64)
if err != nil {
debuglogger.Logf("failed to convert chunk size: %v", err)
return 0, errMalformedEncoding
}
debuglogger.Infof("chunk size extracted: %v", chunkSize)
return chunkSize, nil
}
@@ -171,7 +161,6 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
for {
v, err := ucr.reader.ReadByte()
if err != nil {
debuglogger.Logf("failed to read byte: %v", err)
if err == io.EOF {
return io.ErrUnexpectedEOF
}
@@ -184,14 +173,12 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
var tmp [3]byte
_, err = io.ReadFull(ucr.reader, tmp[:])
if err != nil {
debuglogger.Logf("failed to read chunk ending: \\n\\r\\n: %v", err)
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if !bytes.Equal(tmp[:], trailerDelim) {
debuglogger.Logf("incorrect trailer delimiter: (expected): \\n\\r\\n, (got): %q", tmp[:])
return errMalformedEncoding
}
break
@@ -202,18 +189,15 @@ func (ucr *UnsignedChunkReader) readTrailer() error {
trailerHeader = strings.TrimSpace(trailerHeader)
trailerHeaderParts := strings.Split(trailerHeader, ":")
if len(trailerHeaderParts) != 2 {
debuglogger.Logf("invalid trailer header parts: %v", trailerHeaderParts)
return errMalformedEncoding
}
if trailerHeaderParts[0] != string(ucr.checksumType) {
debuglogger.Logf("invalid checksum type: %v", trailerHeaderParts[0])
//TODO: handle the error
return errMalformedEncoding
}
ucr.expectedChecksum = trailerHeaderParts[1]
debuglogger.Infof("parsed the trailing header:\n%v:%v", trailerHeaderParts[0], trailerHeaderParts[1])
// Validate checksum
return ucr.validateChecksum()
@@ -225,7 +209,6 @@ func (ucr *UnsignedChunkReader) validateChecksum() error {
checksum := base64.StdEncoding.EncodeToString(csum)
if checksum != ucr.expectedChecksum {
debuglogger.Logf("incorrect checksum: (expected): %v, (got): %v", ucr.expectedChecksum, checksum)
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
}

View File

@@ -17,7 +17,6 @@ package utils
import (
"bytes"
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"io"
@@ -29,9 +28,9 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/encoding/httpbinding"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -41,6 +40,10 @@ var (
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
)
const (
upperhex = "0123456789ABCDEF"
)
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
metadata = make(map[string]string)
headers.DisableNormalizing()
@@ -66,9 +69,9 @@ func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength
body = bytes.NewReader(req.Body())
}
uri := ctx.OriginalURL()
escapedURI := escapeOriginalURI(ctx)
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
httpReq, err := http.NewRequest(string(req.Header.Method()), escapedURI, body)
if err != nil {
return nil, errors.New("error in creating an http request")
}
@@ -121,7 +124,8 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
body = bytes.NewReader(req.Body())
}
uri, _, _ := strings.Cut(ctx.OriginalURL(), "?")
uri := string(ctx.Request().URI().Path())
uri = httpbinding.EscapePath(uri, false)
isFirst := true
ctx.Request().URI().QueryArgs().VisitAll(func(key, value []byte) {
@@ -177,11 +181,9 @@ func ParseUint(str string) (int32, error) {
}
num, err := strconv.ParseInt(str, 10, 32)
if err != nil {
debuglogger.Logf("invalid intager provided: %v\n", err)
return 1000, fmt.Errorf("invalid int: %w", err)
}
if num < 0 {
debuglogger.Logf("negative intager provided: %v\n", num)
return 1000, fmt.Errorf("negative uint: %v", num)
}
if num > 1000 {
@@ -210,18 +212,15 @@ func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
func IsValidBucketName(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
return false
}
// Checks to contain only digits, lowercase letters, dot, hyphen.
// Checks to start and end with only digits and lowercase letters.
if !bucketNameRegexp.MatchString(bucket) {
debuglogger.Logf("invalid bucket name: %v\n", bucket)
return false
}
// Checks not to be a valid IP address
if bucketNameIpRegexp.MatchString(bucket) {
debuglogger.Logf("bucket name is an ip address: %v\n", bucket)
return false
}
return true
@@ -306,7 +305,6 @@ func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]stru
for _, a := range oattrs {
attr := s3response.ObjectAttributes(a)
if !attr.IsValid() {
debuglogger.Logf("invalid object attribute: %v\n", attr)
err = s3err.GetAPIError(s3err.ErrInvalidObjectAttributes)
break
}
@@ -320,7 +318,6 @@ func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]stru
}
if len(attrs) == 0 {
debuglogger.Logf("empty get object attributes")
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
}
@@ -339,7 +336,6 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
objLockDate := ctx.Get("X-Amz-Object-Lock-Retain-Until-Date")
if (objLockDate != "" && objLockModeHdr == "") || (objLockDate == "" && objLockModeHdr != "") {
debuglogger.Logf("one of 2 required params is missing: (lock date): %v, (lock mode): %v\n", objLockDate, objLockModeHdr)
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)
}
@@ -347,11 +343,9 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
if objLockDate != "" {
rDate, err := time.Parse(time.RFC3339, objLockDate)
if err != nil {
debuglogger.Logf("failed to parse retain until date: %v\n", err)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if rDate.Before(time.Now()) {
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
}
retainUntilDate = rDate
@@ -362,14 +356,12 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
if objLockMode != "" &&
objLockMode != types.ObjectLockModeCompliance &&
objLockMode != types.ObjectLockModeGovernance {
debuglogger.Logf("invalid object lock mode: %v\n", objLockMode)
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectLockMode)
}
legalHold := types.ObjectLockLegalHoldStatus(legalHoldHdr)
if legalHold != "" && legalHold != types.ObjectLockLegalHoldStatusOff && legalHold != types.ObjectLockLegalHoldStatusOn {
debuglogger.Logf("invalid object lock legal hold status: %v\n", legalHold)
return nil, s3err.GetAPIError(s3err.ErrInvalidLegalHoldStatus)
}
@@ -389,40 +381,90 @@ func IsValidOwnership(val types.ObjectOwnership) bool {
case types.ObjectOwnershipObjectWriter:
return true
default:
debuglogger.Logf("invalid object ownership: %v\n", val)
return false
}
}
type ChecksumValues map[types.ChecksumAlgorithm]string
func escapeOriginalURI(ctx *fiber.Ctx) string {
path := ctx.Path()
// Headers concatinates checksum algorithm by prefixing each
// with 'x-amz-checksum-'
// e.g.
// "x-amz-checksum-crc64nvme, x-amz-checksum-sha1"
func (cv ChecksumValues) Headers() string {
result := ""
isFirst := false
// Escape the URI original path
escapedURI := escapePath(path)
for key := range cv {
if !isFirst {
result += ", "
}
result += fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(key)))
// Add the URI query params
query := string(ctx.Request().URI().QueryArgs().QueryString())
if query != "" {
escapedURI = escapedURI + "?" + query
}
return result
return escapedURI
}
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, ChecksumValues, error) {
// Escapes the path string
// Most of the parts copied from std url
func escapePath(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case shouldEscape(c):
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
// Checks if the character needs to be escaped
func shouldEscape(c byte) bool {
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '.', '~', '/':
return false
}
return true
}
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, map[types.ChecksumAlgorithm]string, error) {
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
err := IsChecksumAlgorithmValid(sdkAlgorithm)
if err != nil {
debuglogger.Logf("invalid checksum algorithm: %v\n", sdkAlgorithm)
return "", nil, err
}
checksums := ChecksumValues{}
checksums := map[types.ChecksumAlgorithm]string{}
var hdrErr error
// Parse and validate checksum headers
@@ -435,7 +477,6 @@ func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, ChecksumValu
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
err := IsChecksumAlgorithmValid(algo)
if err != nil {
debuglogger.Logf("invalid checksum header: %s\n", key)
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
return
}
@@ -448,7 +489,6 @@ func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, ChecksumValu
}
if len(checksums) > 1 {
debuglogger.Logf("multiple checksum headers provided: %v\n", checksums.Headers())
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
@@ -478,22 +518,15 @@ var checksumLengths = map[types.ChecksumAlgorithm]int{
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
decoded, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
debuglogger.Logf("failed to parse checksum base64: %v\n", err)
return false
}
expectedLength, exists := checksumLengths[algorithm]
if !exists {
debuglogger.Logf("unknown checksum algorithm: %v\n", algorithm)
return false
}
isValid := len(decoded) == expectedLength
if !isValid {
debuglogger.Logf("decoded checksum length: (expected): %v, (got): %v\n", expectedLength, len(decoded))
}
return isValid
return len(decoded) == expectedLength
}
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
@@ -504,7 +537,6 @@ func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
alg != types.ChecksumAlgorithmSha1 &&
alg != types.ChecksumAlgorithmSha256 &&
alg != types.ChecksumAlgorithmCrc64nvme {
debuglogger.Logf("invalid checksum algorithm: %v\n", alg)
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
}
@@ -516,7 +548,6 @@ func IsChecksumTypeValid(t types.ChecksumType) error {
if t != "" &&
t != types.ChecksumTypeComposite &&
t != types.ChecksumTypeFullObject {
debuglogger.Logf("invalid checksum type: %v\n", t)
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
}
return nil
@@ -560,7 +591,6 @@ func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType
typeSchema := checksumMap[algo]
_, ok := typeSchema[t]
if !ok {
debuglogger.Logf("checksum type and algorithm mismatch: (type): %v, (algorithm): %v\n", t, algo)
return s3err.GetChecksumSchemaMismatchErr(algo, t)
}
@@ -582,7 +612,6 @@ func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, type
// Verify if checksum algorithm is provided, if
// checksum type is specified
if chType != "" && algo == "" {
debuglogger.Logf("checksum type can only be used with checksum algorithm: (type): %v\n", chType)
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
}
@@ -605,62 +634,3 @@ func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, type
return algo, chType, nil
}
// TagLimit specifies the allowed tag count in a tag set
type TagLimit int
const (
// Tag limit for bucket tagging
TagLimitBucket TagLimit = 50
// Tag limit for object tagging
TagLimitObject TagLimit = 10
)
// Parses and validates tagging
func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
var tagging s3response.TaggingInput
err := xml.Unmarshal(data, &tagging)
if err != nil {
debuglogger.Logf("invalid taggging: %s", data)
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
}
tLen := len(tagging.TagSet.Tags)
if tLen > int(limit) {
switch limit {
case TagLimitObject:
debuglogger.Logf("bucket tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrObjectTaggingLimited)
case TagLimitBucket:
debuglogger.Logf("object tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
}
}
tagSet := make(map[string]string, tLen)
for _, tag := range tagging.TagSet.Tags {
// validate tag key
if len(tag.Key) == 0 || len(tag.Key) > 128 {
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// validate tag value
if len(tag.Value) > 256 {
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// make sure there are no duplicate keys
_, ok := tagSet[tag.Key]
if ok {
debuglogger.Logf("duplicate tag key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
}
tagSet[tag.Key] = tag.Value
}
return tagSet, nil
}

View File

@@ -16,9 +16,6 @@ package utils
import (
"bytes"
"encoding/xml"
"errors"
"math/rand"
"net/http"
"reflect"
"testing"
@@ -28,7 +25,6 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -418,6 +414,128 @@ func TestIsValidOwnership(t *testing.T) {
}
}
func Test_shouldEscape(t *testing.T) {
type args struct {
c byte
}
tests := []struct {
name string
args args
want bool
}{
{
name: "shouldn't-escape-alphanum",
args: args{
c: 'h',
},
want: false,
},
{
name: "shouldn't-escape-unreserved-char",
args: args{
c: '_',
},
want: false,
},
{
name: "shouldn't-escape-unreserved-number",
args: args{
c: '0',
},
want: false,
},
{
name: "shouldn't-escape-path-separator",
args: args{
c: '/',
},
want: false,
},
{
name: "should-escape-special-char-1",
args: args{
c: '&',
},
want: true,
},
{
name: "should-escape-special-char-2",
args: args{
c: '*',
},
want: true,
},
{
name: "should-escape-special-char-3",
args: args{
c: '(',
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := shouldEscape(tt.args.c); got != tt.want {
t.Errorf("shouldEscape() = %v, want %v", got, tt.want)
}
})
}
}
func Test_escapePath(t *testing.T) {
type args struct {
s string
}
tests := []struct {
name string
args args
want string
}{
{
name: "empty-string",
args: args{
s: "",
},
want: "",
},
{
name: "alphanum-path",
args: args{
s: "/test-bucket/test-key",
},
want: "/test-bucket/test-key",
},
{
name: "path-with-unescapable-chars",
args: args{
s: "/test~bucket/test.key",
},
want: "/test~bucket/test.key",
},
{
name: "path-with-escapable-chars",
args: args{
s: "/bucket-*(/test=key&",
},
want: "/bucket-%2A%28/test%3Dkey%26",
},
{
name: "path-with-space",
args: args{
s: "/test-bucket/my key",
},
want: "/test-bucket/my%20key",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := escapePath(tt.args.s); got != tt.want {
t.Errorf("escapePath() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsChecksumAlgorithmValid(t *testing.T) {
type args struct {
alg types.ChecksumAlgorithm
@@ -739,162 +857,3 @@ func Test_checkChecksumTypeAndAlgo(t *testing.T) {
})
}
}
func TestParseTagging(t *testing.T) {
genRandStr := func(lgth int) string {
b := make([]byte, lgth)
for i := range b {
b[i] = byte(rand.Intn(95) + 32) // 126 - 32 + 1 = 95 printable characters
}
return string(b)
}
getTagSet := func(lgth int) s3response.TaggingInput {
res := s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{},
},
}
for i := 0; i < lgth; i++ {
res.TagSet.Tags = append(res.TagSet.Tags, s3response.Tag{
Key: genRandStr(10),
Value: genRandStr(20),
})
}
return res
}
type args struct {
data s3response.TaggingInput
overrideXML []byte
limit TagLimit
}
tests := []struct {
name string
args args
want map[string]string
wantErr error
}{
{
name: "valid tags within limit",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{
{Key: "key1", Value: "value1"},
{Key: "key2", Value: "value2"},
},
},
},
limit: TagLimitObject,
},
want: map[string]string{"key1": "value1", "key2": "value2"},
wantErr: nil,
},
{
name: "malformed XML",
args: args{
overrideXML: []byte("invalid xml"),
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrMalformedXML),
},
{
name: "exceeds bucket tag limit",
args: args{
data: getTagSet(51),
limit: TagLimitBucket,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrBucketTaggingLimited),
},
{
name: "exceeds object tag limit",
args: args{
data: getTagSet(11),
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrObjectTaggingLimited),
},
{
name: "invalid 0 length tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: "", Value: "value1"}},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
},
{
name: "invalid long tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: genRandStr(130), Value: "value1"}},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagKey),
},
{
name: "invalid long tag value",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{{Key: "key", Value: genRandStr(257)}},
},
},
limit: TagLimitBucket,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrInvalidTagValue),
},
{
name: "duplicate tag key",
args: args{
data: s3response.TaggingInput{
TagSet: s3response.TagSet{
Tags: []s3response.Tag{
{Key: "key", Value: "value1"},
{Key: "key", Value: "value2"},
},
},
},
limit: TagLimitObject,
},
want: nil,
wantErr: s3err.GetAPIError(s3err.ErrDuplicateTagKey),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var data []byte
if tt.args.overrideXML != nil {
data = tt.args.overrideXML
} else {
var err error
data, err = xml.Marshal(tt.args.data)
if err != nil {
t.Fatalf("error marshalling input: %v", err)
}
}
got, err := ParseTagging(data, tt.args.limit)
if !errors.Is(err, tt.wantErr) {
t.Errorf("expected error %v, got %v", tt.wantErr, err)
}
if err == nil && !reflect.DeepEqual(got, tt.want) {
t.Errorf("expected result %v, got %v", tt.want, got)
}
})
}
}

View File

@@ -84,12 +84,7 @@ const (
ErrInvalidCopyDest
ErrInvalidCopySource
ErrInvalidCopySourceRange
ErrInvalidTagKey
ErrInvalidTagValue
ErrDuplicateTagKey
ErrBucketTaggingLimited
ErrObjectTaggingLimited
ErrInvalidURLEncodedTagging
ErrInvalidTag
ErrAuthHeaderEmpty
ErrSignatureVersionNotSupported
ErrMalformedPOSTRequest
@@ -118,7 +113,7 @@ const (
ErrSignatureTerminationStr
ErrSignatureIncorrService
ErrContentSHA256Mismatch
ErrMissingContentLength
ErrMissingDecodedContentLength
ErrInvalidAccessKeyID
ErrRequestNotReadyYet
ErrMissingDateHeader
@@ -311,34 +306,9 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagKey: {
Code: "InvalidTag",
Description: "The TagKey you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagValue: {
Code: "InvalidTag",
Description: "The TagValue you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrDuplicateTagKey: {
Code: "InvalidTag",
Description: "Cannot provide multiple Tags with the same key",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketTaggingLimited: {
Code: "BadRequest",
Description: "Bucket tag count cannot be greater than 50",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectTaggingLimited: {
Code: "BadRequest",
Description: "Object tags cannot be greater than 10",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidURLEncodedTagging: {
ErrInvalidTag: {
Code: "InvalidArgument",
Description: "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.",
Description: "The Tag value you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedXML: {
@@ -486,7 +456,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingContentLength: {
ErrMissingDecodedContentLength: {
Code: "MissingContentLength",
Description: "You must provide the Content-Length HTTP header.",
HTTPStatusCode: http.StatusLengthRequired,
@@ -523,7 +493,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
},
ErrInvalidRange: {
Code: "InvalidRange",
Description: "The requested range is not satisfiable",
Description: "The requested range is not valid for the request. Try another range.",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrInvalidURI: {

View File

@@ -141,12 +141,7 @@ func InitEventSender(cfg *EventConfig) (S3EventSender, error) {
func createEventSchema(ctx *fiber.Ctx, meta EventMeta, configId ConfigurationId) EventSchema {
path := strings.Split(ctx.Path(), "/")
var bucket, object string
if len(path) > 1 {
bucket, object = path[1], strings.Join(path[2:], "/")
}
bucket, object := path[1], strings.Join(path[2:], "/")
acc := ctx.Locals("account").(auth.Account)
return EventSchema{

View File

@@ -68,10 +68,7 @@ func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
access := "-"
reqURI := ctx.OriginalURL()
path := strings.Split(ctx.Path(), "/")
var bucket, object string
if len(path) > 1 {
bucket, object = path[1], strings.Join(path[2:], "/")
}
bucket, object := path[1], strings.Join(path[2:], "/")
errorCode := ""
httpStatus := 200
startTime := ctx.Locals("startTime").(time.Time)

View File

@@ -65,10 +65,7 @@ func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMet
access := "-"
reqURI := ctx.OriginalURL()
path := strings.Split(ctx.Path(), "/")
var bucket, object string
if len(path) > 1 {
bucket, object = path[1], strings.Join(path[2:], "/")
}
bucket, object := path[1], strings.Join(path[2:], "/")
errorCode := ""
httpStatus := 200
startTime := ctx.Locals("startTime").(time.Time)

View File

@@ -62,7 +62,7 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
Alias: (*Alias)(&p),
}
aux.LastModified = p.LastModified.UTC().Format(time.RFC3339)
aux.LastModified = p.LastModified.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -172,7 +172,7 @@ type ListObjectsV2Result struct {
Name *string
Prefix *string
StartAfter *string
ContinuationToken *string `xml:"ContinuationToken,omitempty"`
ContinuationToken *string
NextContinuationToken *string
KeyCount *int32
MaxKeys *int32
@@ -198,14 +198,15 @@ type Object struct {
func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias Object
aux := &struct {
LastModified string `xml:"LastModified,omitempty"`
LastModified *string `xml:"LastModified,omitempty"`
*Alias
}{
Alias: (*Alias)(&o),
}
if o.LastModified != nil {
aux.LastModified = o.LastModified.UTC().Format(time.RFC3339)
formattedTime := o.LastModified.UTC().Format(iso8601TimeFormat)
aux.LastModified = &formattedTime
}
return e.EncodeElement(aux, start)
@@ -232,7 +233,7 @@ func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
Alias: (*Alias)(&u),
}
aux.Initiated = u.Initiated.UTC().Format(time.RFC3339)
aux.Initiated = u.Initiated.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -329,7 +330,7 @@ func (r ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement
Alias: (*Alias)(&r),
}
aux.CreationDate = r.CreationDate.UTC().Format(time.RFC3339)
aux.CreationDate = r.CreationDate.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -343,44 +344,11 @@ type CanonicalUser struct {
DisplayName string
}
type CopyObjectOutput struct {
BucketKeyEnabled *bool
CopyObjectResult *CopyObjectResult
CopySourceVersionId *string
Expiration *string
SSECustomerAlgorithm *string
SSECustomerKeyMD5 *string
SSEKMSEncryptionContext *string
SSEKMSKeyId *string
ServerSideEncryption types.ServerSideEncryption
VersionId *string
}
type CopyObjectResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumCRC64NVME *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumType types.ChecksumType
ETag *string
LastModified *time.Time
}
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyObjectResult
aux := &struct {
LastModified string `xml:"LastModified,omitempty"`
*Alias
}{
Alias: (*Alias)(&r),
}
if r.LastModified != nil {
aux.LastModified = r.LastModified.UTC().Format(time.RFC3339)
}
return e.EncodeElement(aux, start)
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
LastModified time.Time
ETag string
CopySourceVersionId string `xml:"-"`
}
type CopyPartResult struct {
@@ -397,35 +365,20 @@ type CopyPartResult struct {
CopySourceVersionId string `xml:"-"`
}
func (r CopyPartResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyPartResult
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyObjectResult
aux := &struct {
LastModified string `xml:"LastModified,omitempty"`
LastModified string `xml:"LastModified"`
*Alias
}{
Alias: (*Alias)(&r),
}
if !r.LastModified.IsZero() {
aux.LastModified = r.LastModified.UTC().Format(time.RFC3339)
}
aux.LastModified = r.LastModified.UTC().Format(iso8601TimeFormat)
return e.EncodeElement(aux, start)
}
type CompleteMultipartUploadResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
Location *string
Bucket *string
Key *string
ETag *string
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
ChecksumType *types.ChecksumType
}
type AccessControlPolicy struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy" json:"-"`
Owner CanonicalUser
@@ -480,37 +433,7 @@ type ListVersionsResult struct {
NextVersionIdMarker *string
Prefix *string
VersionIdMarker *string
Versions []ObjectVersion `xml:"Version"`
}
type ObjectVersion struct {
ChecksumAlgorithm []types.ChecksumAlgorithm
ChecksumType types.ChecksumType
ETag *string
IsLatest *bool
Key *string
LastModified *time.Time
Owner *types.Owner
RestoreStatus *types.RestoreStatus
Size *int64
StorageClass types.ObjectVersionStorageClass
VersionId *string
}
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias ObjectVersion
aux := &struct {
LastModified string `xml:"LastModified"`
*Alias
}{
Alias: (*Alias)(&o),
}
if o.LastModified != nil {
aux.LastModified = o.LastModified.UTC().Format(time.RFC3339)
}
return e.EncodeElement(aux, start)
Versions []types.ObjectVersion `xml:"Version"`
}
type GetBucketVersioningOutput struct {

View File

@@ -25,8 +25,5 @@ USERNAME_TWO=HIJKLMN
PASSWORD_TWO=OPQRSTU
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
RECREATE_BUCKETS=true
DELETE_BUCKETS_AFTER_TEST=true
REMOVE_TEST_FILE_FOLDER=true
AUTOGENERATE_USERS=true
USER_AUTOGENERATION_PREFIX=versitygw-docker-
VERSIONING_DIR=/tmp/versioning

View File

@@ -21,7 +21,6 @@ RUN apt-get update && \
jq \
bc \
libxml2-utils \
xmlstarlet \
ca-certificates && \
update-ca-certificates && \
rm -rf /var/lib/apt/lists/*

View File

@@ -110,11 +110,6 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
**ACL_AWS_ACCESS_KEY_ID**, **ACL_AWS_ACCESS_SECRET_KEY**: for direct mode, the ID and key for the S3 user in the **ACL_AWS_CANONICAL_ID** account.
**USER_ID_{role}_{id}**, **USERNAME_{role}_{id}**, **PASSWORD_{role}_{id}**: for setup_user_v2 non-autocreated users, the format for the user.
* example: USER_ID_USER_1={name}: user ID corresponding to the first user with **user** permissions in the test.
####
### Non-Secret
**VERSITY_EXE**: location of the versity executable relative to test folder.
@@ -155,7 +150,7 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
**COVERAGE_DB**: database to store client command coverage info and usage counts, if using.
**USERNAME_ONE**, **PASSWORD_ONE**, **USERNAME_TWO**, **PASSWORD_TWO**: credentials for users created and tested for non-root user **versitygw** operations (non-setup_user_v2).
**USERNAME_ONE**, **PASSWORD_ONE**, **USERNAME_TWO**, **PASSWORD_TWO**: credentials for users created and tested for non-root user **versitygw** operations.
**TEST_FILE_FOLDER**: where to put temporary test files.
@@ -171,10 +166,6 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
**DELETE_BUCKETS_AFTER_TEST**: whether or not to delete buckets after individual tests, useful for debugging if the post-test bucket state needs to be checked
**AUTOCREATE_USERS**: setup_user_v2, whether or not to autocreate users for tests. If set to **false**, users must be pre-created (see `Secret` section above).
**USER_AUTOCREATION_PREFIX**: setup_user_v2, if **AUTOCREATE_USERS** is set to **true**, the prefix for the autocreated username.
## REST Scripts
REST scripts are included for calls to S3's REST API in the `./tests/rest_scripts/` folder. To call a script, the following parameters are needed:

View File

@@ -20,14 +20,15 @@ source ./tests/report.sh
# param: bucket name
# return 0 for success, 1 for failure
create_bucket() {
log 6 "create_bucket"
if ! check_param_count "create_bucket" "command type, bucket" 2 $#; then
if [ $# -ne 2 ]; then
log 2 "create bucket missing command type, bucket name"
return 1
fi
record_command "create-bucket" "client:$1"
local exit_code=0
local error
log 6 "create bucket"
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
@@ -49,8 +50,8 @@ create_bucket() {
}
create_bucket_with_user() {
log 6 "create_bucket_with_user"
if ! check_param_count "create_bucket_with_user" "command type, bucket, access ID, secret key" 4 $#; then
if [ $# -ne 4 ]; then
log 2 "create bucket missing command type, bucket name, access, secret"
return 1
fi
local exit_code=0
@@ -72,9 +73,9 @@ create_bucket_with_user() {
}
create_bucket_object_lock_enabled() {
log 6 "create_bucket_object_lock_enabled"
record_command "create-bucket" "client:s3api"
if ! check_param_count "create_bucket_object_lock_enabled" "bucket" 1 $#; then
if [ $# -ne 1 ]; then
log 2 "create bucket missing bucket name"
return 1
fi

View File

@@ -122,11 +122,5 @@ create_multipart_upload_rest() {
log 2 "put-object-retention returned code $result: $(cat "$TEST_FILE_FOLDER/output.txt")"
return 1
fi
log 5 "result: $(cat "$TEST_FILE_FOLDER/output.txt")"
if ! upload_id=$(get_element_text "$TEST_FILE_FOLDER/output.txt" "InitiateMultipartUploadResult" "UploadId"); then
log 2 "error getting upload ID: $upload_id"
return 1
fi
echo "$upload_id"
return 0
}

View File

@@ -18,7 +18,8 @@
delete_object() {
log 6 "delete_object"
record_command "delete-object" "client:$1"
if ! check_param_count "delete_object" "command type, bucket, key" 3 $#; then
if [ $# -ne 3 ]; then
log 2 "delete object command requires command type, bucket, key"
return 1
fi
local exit_code=0
@@ -46,7 +47,8 @@ delete_object() {
}
delete_object_bypass_retention() {
if ! check_param_count "delete_object_bypass_retention" "bucket, key, user, password" 4 $#; then
if [[ $# -ne 4 ]]; then
log 2 "'delete-object with bypass retention' requires bucket, key, user, password"
return 1
fi
if ! delete_object_error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --bypass-governance-retention 2>&1); then
@@ -57,7 +59,8 @@ delete_object_bypass_retention() {
}
delete_object_version() {
if ! check_param_count "delete_object_version" "bucket, key, version ID" 3 $#; then
if [[ $# -ne 3 ]]; then
log 2 "'delete_object_version' requires bucket, key, version ID"
return 1
fi
if ! delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --version-id "$3" 2>&1); then
@@ -68,7 +71,8 @@ delete_object_version() {
}
delete_object_version_bypass_retention() {
if ! check_param_count "delete_object_version_bypass_retention" "bucket, key, version ID" 3 $#; then
if [[ $# -ne 3 ]]; then
log 2 "'delete_object_version_bypass_retention' requires bucket, key, version ID"
return 1
fi
if ! delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --version-id "$3" --bypass-governance-retention 2>&1); then
@@ -80,14 +84,15 @@ delete_object_version_bypass_retention() {
delete_object_with_user() {
record_command "delete-object" "client:$1"
if ! check_param_count "delete_object_version_bypass_retention" "command type, bucket, key, access ID, secret key" 5 $#; then
if [ $# -ne 5 ]; then
log 2 "delete object with user command requires command type, bucket, key, access ID, secret key"
return 1
fi
local exit_code=0
if [[ $1 == 's3' ]]; then
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" --bypass-governance-retention 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
delete_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$?
else
@@ -96,22 +101,47 @@ delete_object_with_user() {
fi
if [ $exit_code -ne 0 ]; then
log 2 "error deleting object: $delete_object_error"
export delete_object_error
return 1
fi
return 0
}
delete_object_rest() {
if ! check_param_count "delete_object_rest" "bucket, key" 2 $#; then
if [ $# -ne 2 ]; then
log 2 "'delete_object_rest' requires bucket name, object name"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/delete_object.sh 2>&1); then
log 2 "error deleting object: $result"
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="DELETE
/$1/$2
host:$aws_endpoint_url_address
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
UNSIGNED-PAYLOAD"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
if [ "$result" != "204" ]; then
delete_object_error=$(cat "$TEST_FILE_FOLDER/result.txt")
log 2 "expected '204', was '$result' ($delete_object_error)"
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -ks -w "%{http_code}" -X DELETE "$header://$aws_endpoint_url_address/$1/$2" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: UNSIGNED-PAYLOAD" \
-H "x-amz-date: $current_date_time" \
-o "$TEST_FILE_FOLDER"/delete_object_error.txt 2>&1)
if [[ "$reply" != "204" ]]; then
log 2 "delete object command returned error: $(cat "$TEST_FILE_FOLDER"/delete_object_error.txt)"
return 1
fi
return 0

View File

@@ -15,20 +15,18 @@
# under the License.
get_bucket_policy() {
log 6 "get_bucket_policy '$1' '$2'"
record_command "get-bucket-policy" "client:$1"
if ! check_param_count "get_bucket_policy" "command type, bucket" 2 $#; then
if [[ $# -ne 2 ]]; then
log 2 "get bucket policy command requires command type, bucket"
return 1
fi
local get_bucket_policy_result=0
if [[ $1 == 's3api' ]]; then
get_bucket_policy_s3api "$2" || get_bucket_policy_result=$?
get_bucket_policy_aws "$2" || get_bucket_policy_result=$?
elif [[ $1 == 's3cmd' ]]; then
get_bucket_policy_s3cmd "$2" || get_bucket_policy_result=$?
elif [[ $1 == 'mc' ]]; then
get_bucket_policy_mc "$2" || get_bucket_policy_result=$?
elif [ "$1" == 'rest' ]; then
get_bucket_policy_rest "$2" || get_bucket_policy_result=$?
else
log 2 "command 'get bucket policy' not implemented for '$1'"
return 1
@@ -40,10 +38,10 @@ get_bucket_policy() {
return 0
}
get_bucket_policy_s3api() {
log 6 "get_bucket_policy_s3api '$1'"
get_bucket_policy_aws() {
record_command "get-bucket-policy" "client:s3api"
if ! check_param_count "get_bucket_policy_s3api" "bucket" 1 $#; then
if [[ $# -ne 1 ]]; then
log 2 "aws 'get bucket policy' command requires bucket"
return 1
fi
policy_json=$(send_command aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1) || local get_result=$?
@@ -64,7 +62,8 @@ get_bucket_policy_s3api() {
get_bucket_policy_with_user() {
record_command "get-bucket-policy" "client:s3api"
if ! check_param_count "get_bucket_policy_with_user" "bucket, username, password" 3 $#; then
if [[ $# -ne 3 ]]; then
log 2 "'get bucket policy with user' command requires bucket, username, password"
return 1
fi
if policy_json=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" send_command aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1); then
@@ -83,7 +82,8 @@ get_bucket_policy_with_user() {
get_bucket_policy_s3cmd() {
record_command "get-bucket-policy" "client:s3cmd"
if ! check_param_count "get_bucket_policy_s3cmd" "bucket" 1 $#; then
if [[ $# -ne 1 ]]; then
log 2 "s3cmd 'get bucket policy' command requires bucket"
return 1
fi
@@ -106,7 +106,8 @@ get_bucket_policy_s3cmd() {
}
get_bucket_policy_rest() {
if ! check_param_count "get_bucket_policy_rest" "bucket" 1 $#; then
if [[ $# -ne 1 ]]; then
log 2 "s3cmd 'get bucket policy' command requires bucket name"
return 1
fi
if ! get_bucket_policy_rest_expect_code "$1" "200"; then
@@ -117,7 +118,8 @@ get_bucket_policy_rest() {
}
get_bucket_policy_rest_expect_code() {
if ! check_param_count "get_bucket_policy_rest_expect_code" "bucket, code" 2 $#; then
if [[ $# -ne 2 ]]; then
log 2 "s3cmd 'get bucket policy' command requires bucket name, expected code"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/policy.txt" ./tests/rest_scripts/get_bucket_policy.sh); then
@@ -167,7 +169,8 @@ search_for_first_policy_line_or_full_policy() {
get_bucket_policy_mc() {
record_command "get-bucket-policy" "client:mc"
if ! check_param_count "get_bucket_policy_mc" "bucket" 1 $#; then
if [[ $# -ne 1 ]]; then
log 2 "aws 'get bucket policy' command requires bucket"
return 1
fi
bucket_policy=$(send_command mc --insecure anonymous get-json "$MC_ALIAS/$1") || get_result=$?

View File

@@ -14,26 +14,17 @@
# specific language governing permissions and limitations
# under the License.
source ./tests/drivers/drivers.sh
get_object_lock_configuration() {
record_command "get-object-lock-configuration" "client:s3api"
if ! check_param_count "get_object_lock_configuration" "client, bucket name" 2 $#; then
if [[ $# -ne 1 ]]; then
log 2 "'get object lock configuration' command missing bucket name"
return 1
fi
if [ "$1" == 'rest' ]; then
if ! get_object_lock_configuration_rest "$2"; then
log 2 "error getting REST object lock configuration"
get_object_lock_config_err=$(cat "$TEST_FILE_FOLDER/object-lock-config.txt")
return 1
fi
else
if ! lock_config=$(send_command aws --no-verify-ssl s3api get-object-lock-configuration --bucket "$2" 2>&1); then
log 2 "error obtaining lock config: $lock_config"
# shellcheck disable=SC2034
get_object_lock_config_err=$lock_config
return 1
fi
if ! lock_config=$(send_command aws --no-verify-ssl s3api get-object-lock-configuration --bucket "$1" 2>&1); then
log 2 "error obtaining lock config: $lock_config"
# shellcheck disable=SC2034
get_object_lock_config_err=$lock_config
return 1
fi
lock_config=$(echo "$lock_config" | grep -v "InsecureRequestWarning")
return 0
@@ -41,7 +32,8 @@ get_object_lock_configuration() {
get_object_lock_configuration_rest() {
log 6 "get_object_lock_configuration_rest"
if ! check_param_count "get_object_lock_configuration_rest" "bucket name" 1 $#; then
if [ $# -ne 1 ]; then
log 2 "'get_object_lock_configuration_rest' requires bucket name"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/object-lock-config.txt" ./tests/rest_scripts/get_object_lock_config.sh); then
@@ -52,6 +44,5 @@ get_object_lock_configuration_rest() {
log 2 "expected '200', returned '$result': $(cat "$TEST_FILE_FOLDER/object-lock-config.txt")"
return 1
fi
lock_config="$(cat "$TEST_FILE_FOLDER/object-lock-config.txt")"
return 0
}

View File

@@ -22,9 +22,10 @@ source ./tests/report.sh
# 1 - bucket does not exist
# 2 - misc error
head_bucket() {
log 6 "head_bucket '$1' '$2'"
log 6 "head_bucket"
record_command "head-bucket" "client:$1"
if ! check_param_count "head_bucket" "client, bucket name" 2 $#; then
if [ $# -ne 2 ]; then
log 2 "'head_bucket' command requires client, bucket name"
return 1
fi
local exit_code=0
@@ -36,7 +37,6 @@ head_bucket() {
bucket_info=$(send_command mc --insecure stat "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
elif [[ $1 == 'rest' ]]; then
bucket_info=$(head_bucket_rest "$2") || exit_code=$?
log 5 "head bucket rest exit code: $exit_code"
return $exit_code
else
log 2 "invalid command type $1"
@@ -54,21 +54,19 @@ head_bucket() {
}
head_bucket_rest() {
log 6 "head_bucket_rest '$1'"
if ! check_param_count "head_bucket_rest" "bucket" 1 $#; then
if [ $# -ne 1 ]; then
log 2 "'head_bucket_rest' requires bucket name"
return 2
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/head_bucket.sh 2>&1); then
log 2 "error getting head bucket: $result"
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/head_bucket.sh 2>&1); then
log 2 "error getting head bucket"
return 2
fi
if [ "$result" == "200" ]; then
bucket_info="$(cat "$TEST_FILE_FOLDER/result.txt")"
echo "$bucket_info"
log 5 "bucket info: $bucket_info"
return 0
elif [ "$result" == "404" ]; then
log 5 "bucket '$1' not found"
return 1
fi
log 2 "unexpected response code '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"

View File

@@ -22,24 +22,25 @@ source ./tests/commands/command.sh
list_objects() {
log 6 "list_objects"
record_command "list-objects" "client:$1"
if ! check_param_count "list_object" "client, bucket" 2 $#; then
if [ $# -ne 2 ]; then
log 2 "'list_objects' command requires client, bucket"
return 1
fi
local output
local list_objects_result=0
local result=0
if [[ $1 == 's3' ]]; then
output=$(send_command aws --no-verify-ssl s3 ls s3://"$2" 2>&1) || list_objects_result=$?
output=$(send_command aws --no-verify-ssl s3 ls s3://"$2" 2>&1) || result=$?
elif [[ $1 == 's3api' ]]; then
list_objects_s3api "$2" || list_objects_result=$?
return $list_objects_result
list_objects_s3api "$2" || result=$?
return $result
elif [[ $1 == 's3cmd' ]]; then
output=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2" 2>&1) || list_objects_result=$?
output=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2" 2>&1) || result=$?
elif [[ $1 == 'mc' ]]; then
output=$(send_command mc --insecure ls "$MC_ALIAS"/"$2" 2>&1) || list_objects_result=$?
output=$(send_command mc --insecure ls "$MC_ALIAS"/"$2" 2>&1) || result=$?
elif [[ $1 == 'rest' ]]; then
list_objects_rest "$2" || list_objects_result=$?
return $list_objects_result
list_objects_rest "$2" || result=$?
return $result
else
fail "invalid command type $1"
return 1
@@ -62,7 +63,8 @@ list_objects() {
# fail if unable to list
list_objects_s3api() {
log 6 "list_objects_s3api"
if ! check_param_count "list_objects_s3api" "bucket" 1 $#; then
if [ $# -ne 1 ]; then
log 2 "'list_objects_s3api' requires bucket"
return 1
fi
if ! output=$(send_command aws --no-verify-ssl s3api list-objects --bucket "$1" 2>&1); then
@@ -105,7 +107,8 @@ list_objects_s3api_v1() {
}
list_objects_with_prefix() {
if ! check_param_count "list_objects_with_prefix" "client, bucket, prefix" 3 $#; then
if [ $# -ne 3 ]; then
log 2 "'list_objects_with_prefix' command requires, client, bucket, prefix"
return 1
fi
local result=0
@@ -131,7 +134,8 @@ list_objects_with_prefix() {
}
list_objects_rest() {
if ! check_param_count "list_objects_rest" "bucket" 1 $#; then
if [ $# -ne 1 ]; then
log 2 "'list_objects_rest' requires bucket name"
return 1
fi
log 5 "bucket name: $1"

View File

@@ -19,11 +19,11 @@ list_parts() {
log 2 "'list-parts' command requires bucket, key, upload ID"
return 1
fi
if ! list_parts_with_user "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$2" "$3"; then
log 2 "error listing parts with user"
record_command "list-parts" "client:s3api"
if ! listed_parts=$(send_command aws --no-verify-ssl s3api list-parts --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
log 2 "Error listing multipart upload parts: $listed_parts"
return 1
fi
return 0
}
list_parts_with_user() {
@@ -36,7 +36,4 @@ list_parts_with_user() {
log 2 "Error listing multipart upload parts: $listed_parts"
return 1
fi
listed_parts="$(echo -n "$listed_parts" | grep -v "InsecureRequestWarning")"
log 5 "listed parts: $listed_parts"
return 0
}

View File

@@ -14,12 +14,10 @@
# specific language governing permissions and limitations
# under the License.
source ./tests/drivers/drivers.sh
put_bucket_policy() {
log 6 "put_bucket_policy '$1' '$2' '$3'"
record_command "put-bucket-policy" "client:$1"
if ! check_param_count "put_bucket_policy" "command type, bucket, policy file" 3 $#; then
if [[ $# -ne 3 ]]; then
log 2 "'put bucket policy' command requires command type, bucket, policy file"
return 1
fi
local put_policy_result=0
@@ -29,9 +27,6 @@ put_bucket_policy() {
policy=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate setpolicy "$3" "s3://$2" 2>&1) || put_policy_result=$?
elif [[ $1 == 'mc' ]]; then
policy=$(send_command mc --insecure anonymous set-json "$3" "$MC_ALIAS/$2" 2>&1) || put_policy_result=$?
elif [ "$1" == 'rest' ]; then
put_bucket_policy_rest "$2" "$3" || put_policy_result=$?
return $put_policy_result
else
log 2 "command 'put bucket policy' not implemented for '$1'"
return 1
@@ -42,16 +37,13 @@ put_bucket_policy() {
export put_bucket_policy_error
return 1
fi
# direct can take some time to take effect
if [ "$DIRECT" == "true" ]; then
sleep 10
fi
return 0
}
put_bucket_policy_with_user() {
record_command "put-bucket-policy" "client:s3api"
if ! check_param_count "put_bucket_policy_with_user" "bucket, policy file, username, password" 4 $#; then
if [[ $# -ne 4 ]]; then
log 2 "'put bucket policy with user' command requires bucket, policy file, username, password"
return 1
fi
if ! policy=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3api put-bucket-policy --bucket "$1" --policy "file://$2" 2>&1); then
@@ -62,18 +54,3 @@ put_bucket_policy_with_user() {
fi
return 0
}
put_bucket_policy_rest() {
if ! check_param_count "put_bucket_policy_rest" "bucket, policy file" 2 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" POLICY_FILE="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/put_bucket_policy.sh); then
log 2 "error putting bucket policy: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "expected '200', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
return 1
fi
return 0
}

View File

@@ -141,32 +141,3 @@ put_object_rest_user_bad_signature() {
fi
return 0
}
put_object_multiple() {
if [ $# -ne 3 ]; then
log 2 "put object command requires command type, source, destination"
return 1
fi
local exit_code=0
local error
if [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
# shellcheck disable=SC2086
error=$(aws --no-verify-ssl s3 cp "$(dirname "$2")" s3://"$3" --recursive --exclude="*" --include="$2" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
# shellcheck disable=SC2086
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate put $2 "s3://$3/" 2>&1) || exit_code=$?
elif [[ $1 == 'mc' ]]; then
# shellcheck disable=SC2086
error=$(mc --insecure cp $2 "$MC_ALIAS"/"$3" 2>&1) || exit_code=$?
else
log 2 "invalid command type $1"
return 1
fi
if [ $exit_code -ne 0 ]; then
log 2 "error copying object to bucket: $error"
return 1
else
log 5 "$error"
fi
return 0
}

View File

@@ -16,33 +16,12 @@
put_object_legal_hold() {
record_command "put-object-legal-hold" "client:s3api"
if ! check_param_count "put_object_legal_hold" "client, bucket, key, hold status ('ON' or 'OFF')" 4 $#; then
if [[ $# -ne 3 ]]; then
log 2 "'put object legal hold' command requires bucket, key, hold status ('ON' or 'OFF')"
return 1
fi
if [ "$1" == "rest" ]; then
if ! put_object_legal_hold_rest "$2" "$3" "$4"; then
log 2 "error updating legal hold status w/REST"
return 1
fi
else
if ! error=$(send_command aws --no-verify-ssl s3api put-object-legal-hold --bucket "$2" --key "$3" --legal-hold "{\"Status\": \"$4\"}" 2>&1); then
log 2 "error putting object legal hold: $error"
return 1
fi
fi
return 0
}
put_object_legal_hold_rest() {
if ! check_param_count "put_object_legal_hold_rest" "bucket, key, hold status ('ON' or 'OFF')" 3 $#; then
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" STATUS="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/put_object_legal_hold.sh 2>&1); then
log 2 "error putting object legal hold: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "expected '200', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
if ! error=$(send_command aws --no-verify-ssl s3api put-object-legal-hold --bucket "$1" --key "$2" --legal-hold "{\"Status\": \"$3\"}" 2>&1); then
log 2 "error putting object legal hold: $error"
return 1
fi
return 0
@@ -50,7 +29,8 @@ put_object_legal_hold_rest() {
put_object_legal_hold_version_id() {
record_command "put-object-legal-hold" "client:s3api"
if ! check_param_count "put_object_legal_hold_version_id" "bucket, key, version ID, hold status ('ON' or 'OFF')" 4 $#; then
if [[ $# -ne 4 ]]; then
log 2 "'put_object_legal_hold_version_id' command requires bucket, key, version ID, hold status ('ON' or 'OFF')"
return 1
fi
local error=""

View File

@@ -19,21 +19,9 @@ upload_part() {
log 2 "upload multipart part function must have bucket, key, upload ID, file name, part number"
return 1
fi
if ! upload_part_with_user "$1" "$2" "$3" "$4" "$5" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"; then
log 2 "error uploading part with user"
return 1
fi
return 0
}
upload_part_with_user() {
if [ $# -ne 7 ]; then
log 2 "upload multipart part function must have bucket, key, upload ID, file name, part number, username, password"
return 1
fi
local etag_json
record_command "upload-part" "client:s3api"
if ! etag_json=$(AWS_ACCESS_KEY_ID="$6" AWS_SECRET_ACCESS_KEY="$7" send_command aws --no-verify-ssl s3api upload-part --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --body "$4-$(($5-1))" 2>&1); then
if ! etag_json=$(send_command aws --no-verify-ssl s3api upload-part --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --body "$4-$(($5-1))" 2>&1); then
log 2 "Error uploading part $5: $etag_json"
return 1
fi

View File

@@ -1,76 +0,0 @@
#!/usr/bin/env bash
# Copyright 2024 Versity Software
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
attempt_copy_object_to_directory_with_same_name() {
if [ $# -ne 3 ]; then
log 2 "'attempt_copy_object_to_directory_with_same_name' requires bucket name, key name, copy source"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2/" COPY_SOURCE="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/copy_object.sh); then
log 2 "error copying object: $result"
return 1
fi
if [ "$result" != "409" ]; then
log 2 "expected '409', was '$result'"
return 1
fi
if ! check_xml_error_contains "$TEST_FILE_FOLDER/result.txt" "ObjectParentIsFile" "Object parent already exists as a file"; then
log 2 "error checking XML"
return 1
fi
return 0
}
copy_object_invalid_copy_source() {
if [ $# -ne 1 ]; then
log 2 "'copy_object_invalid_copy_source' requires bucket name"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OBJECT_KEY="dummy-copy" COPY_SOURCE="dummy" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/copy_object.sh); then
log 2 "error copying object: $result"
return 1
fi
if [ "$result" != "400" ]; then
log 2 "expected '400', was '$result' $(cat "$TEST_FILE_FOLDER/result.txt")"
return 1
fi
if ! check_xml_element_contains "$TEST_FILE_FOLDER/result.txt" "InvalidArgument" "Error" "Code"; then
log 2 "error checking XML error code"
return 1
fi
return 0
}
copy_object_copy_source_and_payload() {
if [ $# -ne 3 ]; then
log 2 "'copy_object_copy_source_and_payload' requires bucket name, source key, and local data file"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="${2}-copy" COPY_SOURCE="$1/$2" DATA_FILE="$3" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/copy_object.sh); then
log 2 "error copying object: $result"
return 1
fi
if [ "$result" != "400" ]; then
log 2 "expected '400', was '$result' $(cat "$TEST_FILE_FOLDER/result.txt")"
return 1
fi
log 5 "result: $(cat "$TEST_FILE_FOLDER/result.txt")"
if ! check_xml_element_contains "$TEST_FILE_FOLDER/result.txt" "InvalidRequest" "Error" "Code"; then
log 2 "error checking XML error code"
return 1
fi
return 0
}

View File

@@ -1,39 +0,0 @@
#!/usr/bin/env bash
# Copyright 2024 Versity Software
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
check_param_count() {
if [ $# -ne 4 ]; then
log 2 "'check_param_count' requires function name, params list, expected, actual"
return 1
fi
if [ "$3" -ne "$4" ]; then
log 2 "function $1 requires $2"
return 1
fi
return 0
}
check_param_count_gt() {
if [ $# -ne 4 ]; then
log 2 "'check_param_count_gt' requires function name, params list, expected minimum, actual"
return 1
fi
if [ "$3" -gt "$4" ]; then
log 2 "function $1 requires $2"
return 1
fi
return 0
}

View File

@@ -97,12 +97,16 @@ check_bucket_vars() {
log 1 "BUCKET_TWO_NAME missing"
exit 1
fi
if [ -z "$RECREATE_BUCKETS" ]; then
log 1 "RECREATE_BUCKETS missing"
exit 1
fi
if [ "$RECREATE_BUCKETS" != "true" ] && [ "$RECREATE_BUCKETS" != "false" ]; then
log 1 "RECREATE_BUCKETS must be 'true' or 'false'"
exit 1
fi
if [ "$DELETE_BUCKETS_AFTER_TEST" != "true" ] && [ "$DELETE_BUCKETS_AFTER_TEST" != "false" ]; then
log 1 "DELETE_BUCKETS_AFTER_TEST must be 'true' or 'false'"
if [ "$RECREATE_BUCKETS" != "true" ] && [ "$RECREATE_BUCKETS" != "false" ]; then
log 1 "RECREATE_BUCKETS must be 'true' or 'false'"
exit 1
fi
if [ "$RECREATE_BUCKETS" == "false" ] && [ "$DELETE_BUCKETS_AFTER_TEST" == "true" ]; then
@@ -234,18 +238,6 @@ check_user_vars() {
log 1 "PASSWORD_TWO missing"
exit 1
fi
if [ "$AUTOGENERATE_USERS" != "true" ] && [ "$AUTOGENERATE_USERS" != "false" ]; then
log 1 "AUTOGENERATE_USERS must be 'true' or 'false'"
return 1
fi
if [ "$AUTOGENERATE_USERS" == "true" ] && [ "$USER_AUTOGENERATION_PREFIX" == "" ]; then
log 1 "USER_AUTOGENERATION_PREFIX is required if AUTOGENERATE_USERS is 'true'"
return 1
fi
if [ "$AUTOGENERATE_USERS" == "false" ] && [ "$CREATE_STATIC_USERS_IF_NONEXISTENT" != "true" ] && [ "$CREATE_STATIC_USERS_IF_NONEXISTENT" != "false" ]; then
log 1 "If AUTOGENERATE_USERS is 'false', 'CREATE_STATIC_USERS_IF_NONEXISTENT' must be true or false"
return 1
fi
if [[ -z "$IAM_TYPE" ]]; then
export IAM_TYPE="folder"

View File

@@ -1,27 +0,0 @@
FROM golang:latest
WORKDIR /app
COPY go.mod ./
RUN go mod download
COPY ./ ./
WORKDIR /app/cmd/versitygw
ENV CGO_ENABLED=0
RUN go build -o versitygw
FROM alpine:latest
# These arguments can be overriden when building the image
ARG IAM_DIR=/tmp/vgw
ARG SETUP_DIR=/tmp/vgw
RUN mkdir -p $IAM_DIR
RUN mkdir -p $SETUP_DIR
COPY --from=0 /app/cmd/versitygw/versitygw /app/versitygw
WORKDIR /app
ENTRYPOINT [ "/app/versitygw" ]

View File

@@ -1,2 +0,0 @@
address=/.dev/172.20.0.10
no-resolv

View File

@@ -1,62 +0,0 @@
services:
dnsmasq:
image: strm/dnsmasq
container_name: dns-resolver
restart: on-failure
volumes:
- './dnsmasq.conf:/etc/dnsmasq.conf'
ports:
- "53/udp"
cap_add:
- NET_ADMIN
healthcheck:
test: 'if [ -z "$(netstat -nltu |grep \:53)" ]; then exit 1;else exit 0;fi'
interval: 2s
timeout: 2s
retries: 20
networks:
devnet:
ipv4_address: 172.20.0.53
server:
build:
context: ../..
dockerfile: tests/host-style-tests/Dockerfile
depends_on:
dnsmasq:
condition: service_healthy
command: ["-a", "user", "-s", "pass", "--virtual-domain", "dev:7070", "--health", "/health", "--iam-dir", "/tmp/vgw", "posix", "/tmp/vgw"]
dns:
- 172.20.0.53
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:7070/health"]
interval: 2s
timeout: 2s
retries: 20
networks:
devnet:
ipv4_address: 172.20.0.10
ports:
- "7070:7070" # Optional: if you want to reach it from host
test:
build:
context: ../..
dockerfile: tests/host-style-tests/Dockerfile
depends_on:
server:
condition: service_healthy
dnsmasq:
condition: service_healthy
command: ["test", "-a", "user", "-s", "pass", "--host-style", "-e", "http://dev:7070", "full-flow"]
dns:
- 172.20.0.53
networks:
devnet:
networks:
devnet:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16

View File

@@ -118,8 +118,6 @@ func TestDeleteBucketOwnershipControls(s *S3Conf) {
func TestPutBucketTagging(s *S3Conf) {
PutBucketTagging_non_existing_bucket(s)
PutBucketTagging_long_tags(s)
PutBucketTagging_duplicate_keys(s)
PutBucketTagging_tag_count_limit(s)
PutBucketTagging_success(s)
PutBucketTagging_success_status(s)
}
@@ -139,7 +137,7 @@ func TestDeleteBucketTagging(s *S3Conf) {
func TestPutObject(s *S3Conf) {
PutObject_non_existing_bucket(s)
PutObject_special_chars(s)
PutObject_tagging(s)
PutObject_invalid_long_tags(s)
PutObject_missing_object_lock_retention_config(s)
PutObject_with_object_lock(s)
PutObject_invalid_legal_hold(s)
@@ -168,7 +166,6 @@ func TestHeadObject(s *S3Conf) {
HeadObject_directory_object_noslash(s)
HeadObject_non_existing_dir_object(s)
HeadObject_invalid_parent_dir(s)
HeadObject_with_range(s)
//TODO: remove the condition after implementing checksums in azure
if !s.azureTests {
HeadObject_not_enabled_checksum_mode(s)
@@ -194,7 +191,8 @@ func TestGetObjectAttributes(s *S3Conf) {
func TestGetObject(s *S3Conf) {
GetObject_non_existing_key(s)
GetObject_directory_object_noslash(s)
GetObject_with_range(s)
GetObject_should_succeed_for_invalid_ranges(s)
GetObject_content_ranges(s)
GetObject_invalid_parent(s)
GetObject_large_object(s)
//TODO: remove the condition after implementing checksums in azure
@@ -203,6 +201,7 @@ func TestGetObject(s *S3Conf) {
}
GetObject_success(s)
GetObject_directory_success(s)
GetObject_by_range_success(s)
GetObject_by_range_resp_status(s)
GetObject_non_existing_dir_object(s)
}
@@ -273,7 +272,6 @@ func TestCopyObject(s *S3Conf) {
CopyObject_not_owned_source_bucket(s)
CopyObject_copy_to_itself(s)
CopyObject_copy_to_itself_invalid_directive(s)
CopyObject_should_replace_tagging(s)
CopyObject_should_copy_tagging(s)
CopyObject_invalid_tagging_directive(s)
CopyObject_to_itself_with_new_metadata(s)
@@ -299,8 +297,6 @@ func TestCopyObject(s *S3Conf) {
func TestPutObjectTagging(s *S3Conf) {
PutObjectTagging_non_existing_object(s)
PutObjectTagging_long_tags(s)
PutObjectTagging_duplicate_keys(s)
PutObjectTagging_tag_count_limit(s)
PutObjectTagging_success(s)
}
@@ -320,6 +316,7 @@ func TestDeleteObjectTagging(s *S3Conf) {
func TestCreateMultipartUpload(s *S3Conf) {
CreateMultipartUpload_non_existing_bucket(s)
CreateMultipartUpload_with_metadata(s)
CreateMultipartUpload_with_invalid_tagging(s)
CreateMultipartUpload_with_tagging(s)
CreateMultipartUpload_with_object_lock(s)
CreateMultipartUpload_with_object_lock_not_enabled(s)
@@ -386,7 +383,6 @@ func TestListParts(s *S3Conf) {
//TODO: remove the condition after implementing checksums in azure
if !s.azureTests {
ListParts_with_checksums(s)
ListParts_null_checksums(s)
}
ListParts_success(s)
}
@@ -659,99 +655,6 @@ func TestPosix(s *S3Conf) {
}
}
func TestScoutfs(s *S3Conf) {
TestAuthentication(s)
TestPresignedAuthentication(s)
TestCreateBucket(s)
TestHeadBucket(s)
TestListBuckets(s)
TestDeleteBucket(s)
TestPutBucketOwnershipControls(s)
TestGetBucketOwnershipControls(s)
TestDeleteBucketOwnershipControls(s)
TestPutBucketTagging(s)
TestGetBucketTagging(s)
TestDeleteBucketTagging(s)
TestPutObject(s)
TestHeadObject(s)
TestGetObjectAttributes(s)
TestGetObject(s)
TestListObjects(s)
TestListObjectsV2(s)
TestListObjectVersions_VD(s)
TestDeleteObject(s)
TestDeleteObjects(s)
TestCopyObject(s)
TestPutObjectTagging(s)
TestDeleteObjectTagging(s)
TestUploadPart(s)
TestUploadPartCopy(s)
TestListParts(s)
TestListMultipartUploads(s)
TestAbortMultipartUpload(s)
TestPutBucketAcl(s)
TestGetBucketAcl(s)
TestPutBucketPolicy(s)
TestGetBucketPolicy(s)
TestDeleteBucketPolicy(s)
TestPutObjectLockConfiguration(s)
TestGetObjectLockConfiguration(s)
TestPutObjectRetention(s)
TestGetObjectRetention(s)
TestPutObjectLegalHold(s)
TestGetObjectLegalHold(s)
TestWORMProtection(s)
TestAccessControl(s)
CreateMultipartUpload_non_existing_bucket(s)
CreateMultipartUpload_with_tagging(s)
CreateMultipartUpload_with_object_lock(s)
CreateMultipartUpload_with_object_lock_not_enabled(s)
CreateMultipartUpload_with_object_lock_invalid_retention(s)
CreateMultipartUpload_past_retain_until_date(s)
CreateMultipartUpload_invalid_legal_hold(s)
CreateMultipartUpload_invalid_object_lock_mode(s)
CreateMultipartUpload_invalid_checksum_algorithm(s)
CreateMultipartUpload_empty_checksum_algorithm_with_checksum_type(s)
CreateMultipartUpload_invalid_checksum_type(s)
CreateMultipartUpload_valid_checksum_algorithm(s)
CreateMultipartUpload_success(s)
CompletedMultipartUpload_non_existing_bucket(s)
CompleteMultipartUpload_incorrect_part_number(s)
CompleteMultipartUpload_invalid_part_number(s)
CompleteMultipartUpload_invalid_ETag(s)
CompleteMultipartUpload_small_upload_size(s)
CompleteMultipartUpload_empty_parts(s)
CompleteMultipartUpload_incorrect_parts_order(s)
CompleteMultipartUpload_mpu_object_size(s)
CompleteMultipartUpload_invalid_checksum_type(s)
CompleteMultipartUpload_invalid_checksum_part(s)
CompleteMultipartUpload_multiple_checksum_part(s)
CompleteMultipartUpload_incorrect_checksum_part(s)
CompleteMultipartUpload_different_checksum_part(s)
CompleteMultipartUpload_missing_part_checksum(s)
CompleteMultipartUpload_multiple_final_checksums(s)
CompleteMultipartUpload_invalid_final_checksums(s)
CompleteMultipartUpload_checksum_type_mismatch(s)
CompleteMultipartUpload_should_ignore_the_final_checksum(s)
CompleteMultipartUpload_success(s)
CompleteMultipartUpload_racey_success(s)
// posix/scoutfs specific tests
PutObject_overwrite_dir_obj(s)
PutObject_overwrite_file_obj(s)
PutObject_overwrite_file_obj_with_nested_obj(s)
PutObject_dir_obj_with_data(s)
CreateMultipartUpload_dir_obj(s)
PutObject_name_too_long(s)
HeadObject_name_too_long(s)
DeleteObject_name_too_long(s)
CopyObject_overwrite_same_dir_object(s)
CopyObject_overwrite_same_file_object(s)
DeleteObject_directory_not_empty(s)
}
func TestIAM(s *S3Conf) {
IAM_user_access_denied(s)
IAM_userplus_access_denied(s)
@@ -947,8 +850,6 @@ func GetIntTests() IntTests {
"DeleteBucketOwnershipControls_success": DeleteBucketOwnershipControls_success,
"PutBucketTagging_non_existing_bucket": PutBucketTagging_non_existing_bucket,
"PutBucketTagging_long_tags": PutBucketTagging_long_tags,
"PutBucketTagging_duplicate_keys": PutBucketTagging_duplicate_keys,
"PutBucketTagging_tag_count_limit": PutBucketTagging_tag_count_limit,
"PutBucketTagging_success": PutBucketTagging_success,
"PutBucketTagging_success_status": PutBucketTagging_success_status,
"GetBucketTagging_non_existing_bucket": GetBucketTagging_non_existing_bucket,
@@ -959,7 +860,7 @@ func GetIntTests() IntTests {
"DeleteBucketTagging_success": DeleteBucketTagging_success,
"PutObject_non_existing_bucket": PutObject_non_existing_bucket,
"PutObject_special_chars": PutObject_special_chars,
"PutObject_tagging": PutObject_tagging,
"PutObject_invalid_long_tags": PutObject_invalid_long_tags,
"PutObject_success": PutObject_success,
"PutObject_racey_success": PutObject_racey_success,
"HeadObject_non_existing_object": HeadObject_non_existing_object,
@@ -970,7 +871,6 @@ func GetIntTests() IntTests {
"HeadObject_non_existing_dir_object": HeadObject_non_existing_dir_object,
"HeadObject_name_too_long": HeadObject_name_too_long,
"HeadObject_invalid_parent_dir": HeadObject_invalid_parent_dir,
"HeadObject_with_range": HeadObject_with_range,
"HeadObject_not_enabled_checksum_mode": HeadObject_not_enabled_checksum_mode,
"HeadObject_checksums": HeadObject_checksums,
"HeadObject_success": HeadObject_success,
@@ -984,12 +884,14 @@ func GetIntTests() IntTests {
"GetObjectAttributes_checksums": GetObjectAttributes_checksums,
"GetObject_non_existing_key": GetObject_non_existing_key,
"GetObject_directory_object_noslash": GetObject_directory_object_noslash,
"GetObject_with_range": GetObject_with_range,
"GetObject_should_succeed_for_invalid_ranges": GetObject_should_succeed_for_invalid_ranges,
"GetObject_content_ranges": GetObject_content_ranges,
"GetObject_invalid_parent": GetObject_invalid_parent,
"GetObject_large_object": GetObject_large_object,
"GetObject_checksums": GetObject_checksums,
"GetObject_success": GetObject_success,
"GetObject_directory_success": GetObject_directory_success,
"GetObject_by_range_success": GetObject_by_range_success,
"GetObject_by_range_resp_status": GetObject_by_range_resp_status,
"GetObject_non_existing_dir_object": GetObject_non_existing_dir_object,
"ListObjects_non_existing_bucket": ListObjects_non_existing_bucket,
@@ -1035,7 +937,6 @@ func GetIntTests() IntTests {
"CopyObject_not_owned_source_bucket": CopyObject_not_owned_source_bucket,
"CopyObject_copy_to_itself": CopyObject_copy_to_itself,
"CopyObject_copy_to_itself_invalid_directive": CopyObject_copy_to_itself_invalid_directive,
"CopyObject_should_replace_tagging": CopyObject_should_replace_tagging,
"CopyObject_should_copy_tagging": CopyObject_should_copy_tagging,
"CopyObject_invalid_tagging_directive": CopyObject_invalid_tagging_directive,
"CopyObject_to_itself_with_new_metadata": CopyObject_to_itself_with_new_metadata,
@@ -1055,8 +956,6 @@ func GetIntTests() IntTests {
"CopyObject_success": CopyObject_success,
"PutObjectTagging_non_existing_object": PutObjectTagging_non_existing_object,
"PutObjectTagging_long_tags": PutObjectTagging_long_tags,
"PutObjectTagging_duplicate_keys": PutObjectTagging_duplicate_keys,
"PutObjectTagging_tag_count_limit": PutObjectTagging_tag_count_limit,
"PutObjectTagging_success": PutObjectTagging_success,
"GetObjectTagging_non_existing_object": GetObjectTagging_non_existing_object,
"GetObjectTagging_unset_tags": GetObjectTagging_unset_tags,
@@ -1067,6 +966,7 @@ func GetIntTests() IntTests {
"DeleteObjectTagging_success": DeleteObjectTagging_success,
"CreateMultipartUpload_non_existing_bucket": CreateMultipartUpload_non_existing_bucket,
"CreateMultipartUpload_with_metadata": CreateMultipartUpload_with_metadata,
"CreateMultipartUpload_with_invalid_tagging": CreateMultipartUpload_with_invalid_tagging,
"CreateMultipartUpload_with_tagging": CreateMultipartUpload_with_tagging,
"CreateMultipartUpload_with_object_lock": CreateMultipartUpload_with_object_lock,
"CreateMultipartUpload_with_object_lock_not_enabled": CreateMultipartUpload_with_object_lock_not_enabled,
@@ -1112,7 +1012,6 @@ func GetIntTests() IntTests {
"ListParts_default_max_parts": ListParts_default_max_parts,
"ListParts_truncated": ListParts_truncated,
"ListParts_with_checksums": ListParts_with_checksums,
"ListParts_null_checksums": ListParts_null_checksums,
"ListParts_success": ListParts_success,
"ListMultipartUploads_non_existing_bucket": ListMultipartUploads_non_existing_bucket,
"ListMultipartUploads_empty_result": ListMultipartUploads_empty_result,

View File

@@ -36,8 +36,8 @@ type S3Conf struct {
awsSecret string
awsRegion string
endpoint string
hostStyle bool
checksumDisable bool
pathStyle bool
PartSize int64
Concurrency int
debug bool
@@ -87,8 +87,8 @@ func WithEndpoint(e string) Option {
func WithDisableChecksum() Option {
return func(s *S3Conf) { s.checksumDisable = true }
}
func WithHostStyle() Option {
return func(s *S3Conf) { s.hostStyle = true }
func WithPathStyle() Option {
return func(s *S3Conf) { s.pathStyle = true }
}
func WithPartSize(p int64) Option {
return func(s *S3Conf) { s.PartSize = p }
@@ -122,12 +122,7 @@ func (c *S3Conf) getCreds() credentials.StaticCredentialsProvider {
}
func (c *S3Conf) GetClient() *s3.Client {
return s3.NewFromConfig(c.Config(), func(o *s3.Options) {
if c.hostStyle {
o.BaseEndpoint = &c.endpoint
o.UsePathStyle = false
}
})
return s3.NewFromConfig(c.Config())
}
func (c *S3Conf) Config() aws.Config {

File diff suppressed because it is too large Load Diff

View File

@@ -35,7 +35,6 @@ import (
"net/http"
"net/url"
"os/exec"
"slices"
"sort"
"strings"
"time"
@@ -59,7 +58,7 @@ func getBucketName() string {
}
func setup(s *S3Conf, bucket string, opts ...setupOpt) error {
s3client := s.GetClient()
s3client := s3.NewFromConfig(s.Config())
cfg := new(setupCfg)
for _, opt := range opts {
@@ -95,7 +94,7 @@ func setup(s *S3Conf, bucket string, opts ...setupOpt) error {
}
func teardown(s *S3Conf, bucket string) error {
s3client := s.GetClient()
s3client := s3.NewFromConfig(s.Config())
deleteObject := func(bucket, key, versionId *string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
@@ -200,7 +199,7 @@ func actionHandler(s *S3Conf, testName string, handler func(s3client *s3.Client,
failF("%v: failed to create a bucket: %v", testName, err)
return fmt.Errorf("%v: failed to create a bucket: %w", testName, err)
}
client := s.GetClient()
client := s3.NewFromConfig(s.Config())
handlerErr := handler(client, bucketName)
if handlerErr != nil {
failF("%v: %v", testName, handlerErr)
@@ -222,7 +221,7 @@ func actionHandler(s *S3Conf, testName string, handler func(s3client *s3.Client,
func actionHandlerNoSetup(s *S3Conf, testName string, handler func(s3client *s3.Client, bucket string) error, _ ...setupOpt) error {
runF(testName)
client := s.GetClient()
client := s3.NewFromConfig(s.Config())
handlerErr := handler(client, "")
if handlerErr != nil {
failF("%v: %v", testName, handlerErr)
@@ -263,7 +262,7 @@ func authHandler(s *S3Conf, cfg *authConfig, handler func(req *http.Request) err
func presignedAuthHandler(s *S3Conf, testName string, handler func(client *s3.PresignClient) error) error {
runF(testName)
clt := s3.NewPresignClient(s.GetClient())
clt := s3.NewPresignClient(s3.NewFromConfig(s.Config()))
err := handler(clt)
if err != nil {
@@ -421,7 +420,7 @@ func hasObjNames(objs []types.Object, names []string) bool {
}
for _, obj := range objs {
if slices.Contains(names, *obj.Key) {
if contains(names, *obj.Key) {
continue
}
return false
@@ -436,7 +435,7 @@ func hasPrefixName(prefixes []types.CommonPrefix, names []string) bool {
}
for _, prefix := range prefixes {
if slices.Contains(names, *prefix.Prefix) {
if contains(names, *prefix.Prefix) {
continue
}
return false
@@ -445,6 +444,15 @@ func hasPrefixName(prefixes []types.CommonPrefix, names []string) bool {
return true
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
type putObjectOutput struct {
csum [32]byte
data []byte
@@ -502,8 +510,18 @@ func createMp(s3client *s3.Client, bucket, key string, opts ...mpOpt) (*s3.Creat
return out, err
}
func isSameData(a, b []byte) bool {
return bytes.Equal(a, b)
func isEqual(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i, d := range a {
if d != b[i] {
return false
}
}
return true
}
func compareMultipartUploads(list1, list2 []types.MultipartUpload) bool {
@@ -989,7 +1007,7 @@ func getUserS3Client(usr user, cfg *S3Conf) *s3.Client {
config.awsID = usr.access
config.awsSecret = usr.secret
return config.GetClient()
return s3.NewFromConfig(config.Config())
}
// if true enables, otherwise disables
@@ -1115,7 +1133,7 @@ func createObjVersions(client *s3.Client, bucket, object string, count int, opts
}
versions := []types.ObjectVersion{}
for i := range count {
for i := 0; i < count; i++ {
rNumber, err := rand.Int(rand.Reader, big.NewInt(100000))
dataLength := rNumber.Int64()
if err != nil {

View File

@@ -24,27 +24,28 @@ bucket_name="$BUCKET_NAME"
key="$OBJECT_KEY"
# shellcheck disable=SC2153
copy_source="$COPY_SOURCE"
# for testing only
# shellcheck disable=SC2153
data_file="$DATA_FILE"
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
cr_data=("PUT" "/$bucket_name/$key" "")
cr_data+=("host:$host" "x-amz-content-sha256:UNSIGNED-PAYLOAD")
cr_data+=("x-amz-copy-source:$copy_source")
cr_data+=("x-amz-date:$current_date_time")
build_canonical_request "${cr_data[@]}"
canonical_request="PUT
/$bucket_name/$key
host:$host
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-copy-source:$copy_source
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-copy-source;x-amz-date
UNSIGNED-PAYLOAD"
# shellcheck disable=SC2119
create_canonical_hash_sts_and_signature
curl_command+=(curl -ks -w "\"%{http_code}\"" -X PUT "$AWS_ENDPOINT_URL/$bucket_name/$key")
curl_command+=(-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=$param_list,Signature=$signature\"")
curl_command+=("${header_fields[@]}")
curl_command+=(-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-copy-source;x-amz-date,Signature=$signature\"")
curl_command+=(-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
-H "\"x-amz-copy-source: $copy_source\""
-H "\"x-amz-date: $current_date_time\"")
curl_command+=(-o "$OUTPUT_FILE")
if [ "$data_file" != "" ]; then
curl_command+=(-T "$data_file")
fi
# shellcheck disable=SC2154
eval "${curl_command[*]}" 2>&1
eval "${curl_command[*]}" 2>&1

View File

@@ -1,42 +0,0 @@
#!/usr/bin/env bash
# Copyright 2024 Versity Software
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
source ./tests/rest_scripts/rest.sh
# Fields
# shellcheck disable=SC2153
bucket_name="$BUCKET_NAME"
# shellcheck disable=SC2154
key=$(echo -n "$OBJECT_KEY" | jq -sRr 'split("/") | map(@uri) | join("/")')
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
#x-amz-object-attributes:ETag
canonical_request_data+=("DELETE" "/$bucket_name/$key" "" "host:$host")
canonical_request_data+=("x-amz-content-sha256:UNSIGNED-PAYLOAD" "x-amz-date:$current_date_time")
build_canonical_request "${canonical_request_data[@]}"
# shellcheck disable=SC2119
create_canonical_hash_sts_and_signature
curl_command+=(curl -ks -w "\"%{http_code}\"" -X DELETE "$AWS_ENDPOINT_URL/$bucket_name/$key"
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=$param_list,Signature=$signature\"")
curl_command+=("${header_fields[@]}")
curl_command+=(-o "$OUTPUT_FILE")
# shellcheck disable=SC2154
eval "${curl_command[*]}" 2>&1

View File

@@ -21,7 +21,7 @@ source ./tests/rest_scripts/rest.sh
# shellcheck disable=SC2153
bucket_name="$BUCKET_NAME"
# shellcheck disable=SC2153
key="$(echo -n "$OBJECT_KEY" | jq -sRr @uri)"
key="$OBJECT_KEY"
# shellcheck disable=SC2153
status="$STATUS"
# shellcheck disable=SC2153

View File

@@ -101,22 +101,6 @@ log_rest() {
fi
}
add_cr_parameters_and_header_fields() {
canonical_request+="$line
"
if [[ "$line" == *":"* ]]; then
local key="${line%%:*}"
local value="${line#*:}"
if [ "$key" == "x-amz-content-sha256" ]; then
payload="$value"
fi
if [[ "$value" != "" ]]; then
param_list=$(add_parameter "$param_list" "$key" ";")
header_fields+=(-H "\"$key: $value\"")
fi
fi
}
build_canonical_request() {
if [ $# -lt 0 ]; then
log_rest 2 "'build_canonical_request' requires parameters"
@@ -127,7 +111,19 @@ build_canonical_request() {
local payload=""
header_fields=()
for line in "$@"; do
add_cr_parameters_and_header_fields
canonical_request+="$line
"
if [[ "$line" == *":"* ]]; then
local key="${line%%:*}"
local value="${line#*:}"
if [ "$key" == "x-amz-content-sha256" ]; then
payload="$value"
fi
if [[ "$value" != "" ]]; then
param_list=$(add_parameter "$param_list" "$key" ";")
header_fields+=(-H "\"$key: $value\"")
fi
fi
done
canonical_request+="
$param_list

View File

@@ -27,45 +27,31 @@ upload_id="$UPLOAD_ID"
# shellcheck disable=SC2153
data=$DATA_FILE
if [ "$data" != "" ]; then
payload_hash="$(sha256sum "$data" | awk '{print $1}')"
else
payload_hash="$(echo -n "" | sha256sum | awk '{print $1}')"
fi
payload_hash="$(sha256sum "$data" | awk '{print $1}')"
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
# shellcheck disable=SC2034
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
cr_data=("PUT" "/$bucket_name/$key")
query_params=""
if [ "$part_number" != "" ]; then
query_params=$(add_parameter "$query_params" "partNumber=$part_number")
fi
if [ "$upload_id" != "" ]; then
query_params=$(add_parameter "$query_params" "uploadId=$upload_id")
fi
cr_data+=("$query_params")
cr_data+=("host:$host" "x-amz-content-sha256:$payload_hash" "x-amz-date:$current_date_time")
build_canonical_request "${cr_data[@]}"
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
# shellcheck disable=SC2034
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="PUT
/$bucket_name/$key
partNumber=$part_number&uploadId=$upload_id
host:$aws_endpoint_url_address
x-amz-content-sha256:$payload_hash
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
$payload_hash"
# shellcheck disable=SC2119
create_canonical_hash_sts_and_signature
url="'$AWS_ENDPOINT_URL/$bucket_name/$key"
if [ "$query_params" != "" ]; then
url+="?$query_params"
fi
url+="'"
curl_command+=(curl -isk -w "\"%{http_code}\"" -X PUT "$url"
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=$param_list,Signature=$signature\"")
if [ "$data" == "" ]; then
curl_command+=(-H "\"Content-Length: 0\"")
fi
curl_command+=("${header_fields[@]}")
curl_command+=(-o "$OUTPUT_FILE")
if [ "$data" != "" ]; then
curl_command+=(-T "$data")
fi
curl_command+=(curl -isk -w "\"%{http_code}\"" "\"$AWS_ENDPOINT_URL/$bucket_name/$key?partNumber=$part_number&uploadId=$upload_id\""
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
-H "\"x-amz-content-sha256: $payload_hash\""
-H "\"x-amz-date: $current_date_time\""
-o "\"$OUTPUT_FILE\""
-T "\"$data\"")
# shellcheck disable=SC2154
eval "${curl_command[*]}" 2>&1

View File

@@ -28,34 +28,29 @@ upload_id="$UPLOAD_ID"
part_location=$PART_LOCATION
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
# shellcheck disable=SC2034
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
cr_data=("PUT" "/$bucket_name/$key")
query_params=""
if [ "$part_number" != "" ]; then
query_params=$(add_parameter "$query_params" "partNumber=$part_number")
fi
if [ "$upload_id" != "" ]; then
query_params=$(add_parameter "$query_params" "uploadId=$upload_id")
fi
cr_data+=("$query_params")
cr_data+=("host:$host" "x-amz-content-sha256:UNSIGNED-PAYLOAD")
cr_data+=("x-amz-copy-source:$part_location")
cr_data+=("x-amz-date:$current_date_time")
build_canonical_request "${cr_data[@]}"
canonical_request="PUT
/$bucket_name/$key
partNumber=$part_number&uploadId=$upload_id
host:$aws_endpoint_url_address
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-copy-source:$part_location
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-copy-source;x-amz-date
UNSIGNED-PAYLOAD"
# shellcheck disable=SC2119
create_canonical_hash_sts_and_signature
url="'$AWS_ENDPOINT_URL/$bucket_name/$key"
if [ "$query_params" != "" ]; then
url+="?$query_params"
fi
url+="'"
curl_command+=(curl -ks -w "\"%{http_code}\"" -X PUT "$url"
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=$param_list,Signature=$signature\"")
curl_command+=("${header_fields[@]}")
curl_command+=(-o "$OUTPUT_FILE")
curl_command+=(curl -ks -w "\"%{http_code}\"" -X PUT "\"$AWS_ENDPOINT_URL/$bucket_name/$key?partNumber=$part_number&uploadId=$upload_id\""
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-copy-source;x-amz-date,Signature=$signature\""
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
-H "\"x-amz-copy-source: $part_location\""
-H "\"x-amz-date: $current_date_time\""
-o "\"$OUTPUT_FILE\"")
# shellcheck disable=SC2154
eval "${curl_command[*]}" 2>&1

View File

@@ -20,41 +20,6 @@ source ./tests/setup_mc.sh
source ./tests/util/util_object.sh
source ./tests/versity.sh
check_secrets_line() {
if [[ $secrets_line =~ ^(USER_ID_(ADMIN|USERPLUS|USER)_[0-9])= ]]; then
match=${BASH_REMATCH[1]}
role=$(echo -n "${BASH_REMATCH[2]}" | tr '[:upper:]' '[:lower:]')
if [ -z "${!match}" ]; then
log 2 "$match secrets parameter missing"
return 1
fi
username_env="${match/USER_ID/USERNAME}"
password_env="${match/USER_ID/PASSWORD}"
if [ -z "${!username_env}" ]; then
log 2 "$username_env secrets parameter missing"
return 1
fi
if [ -z "${!password_env}" ]; then
log 2 "$password_env secrets parameter missing"
return 1
fi
if ! user_exists "${!username_env}" && ! create_user_versitygw "${!username_env}" "${!password_env}" "$role"; then
log 2 "error creating user"
return 1
fi
fi
return 0
}
static_user_versitygw_setup() {
while read -r secrets_line || [ -n "$secrets_line" ]; do
if ! check_secrets_line; then
return 1
fi
done < "$SECRETS_FILE"
}
# bats setup function
setup() {
base_setup
@@ -67,13 +32,6 @@ setup() {
export TEST_LOG_FILE
fi
if [ "$DIRECT" != "true" ] && [ "$CREATE_STATIC_USERS_IF_NONEXISTENT" == "true" ]; then
if ! static_user_versitygw_setup; then
log 2 "error setting up static versitygw users"
return 1
fi
fi
log 4 "Running test $BATS_TEST_NAME"
if [[ $LOG_LEVEL -ge 5 ]] || [[ -n "$TIME_LOG" ]]; then
start_time=$(date +%s)
@@ -106,7 +64,32 @@ delete_temp_log_if_exists() {
return 0
}
post_versity_cleanup() {
# bats teardown function
teardown() {
# shellcheck disable=SC2154
log 4 "********** BEGIN TEARDOWN **********"
if [ "$DELETE_BUCKETS_AFTER_TEST" != "false" ]; then
log 5 "deleting or clearing buckets"
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
log 3 "error deleting bucket $BUCKET_ONE_NAME or contents"
fi
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_TWO_NAME"; then
log 3 "error deleting bucket $BUCKET_TWO_NAME or contents"
fi
fi
if user_exists "$USERNAME_ONE" && ! delete_user "$USERNAME_ONE"; then
log 3 "error deleting user $USERNAME_ONE"
fi
if user_exists "$USERNAME_TWO" && ! delete_user "$USERNAME_TWO"; then
log 3 "error deleting user $USERNAME_TWO"
fi
if [ "$REMOVE_TEST_FILE_FOLDER" == "true" ]; then
log 6 "removing test file folder"
if ! error=$(rm -rf "${TEST_FILE_FOLDER:?}" 2>&1); then
log 3 "unable to remove test file folder: $error"
fi
fi
stop_versity
if [[ $LOG_LEVEL -ge 5 ]] || [[ -n "$TIME_LOG" ]]; then
end_time=$(date +%s)
total_time=$((end_time - start_time))
@@ -134,42 +117,10 @@ post_versity_cleanup() {
fi
if [ -e "$TEST_LOG_FILE.tmp" ]; then
if ! error=$(cat "$TEST_LOG_FILE.tmp" >> "$TEST_LOG_FILE" 2>&1); then
log 3 "error appending temp log to main log: $error"
log 2 "error appending temp log to main log: $error"
fi
if ! delete_temp_log_if_exists; then
log 3 "error deleting temp log"
log 2 "error deleting temp log"
fi
fi
}
# bats teardown function
teardown() {
# shellcheck disable=SC2154
log 4 "********** BEGIN TEARDOWN **********"
if [ "$DELETE_BUCKETS_AFTER_TEST" != "false" ]; then
log 5 "deleting or clearing buckets"
if ! bucket_cleanup_if_bucket_exists "$BUCKET_ONE_NAME"; then
log 3 "error deleting bucket $BUCKET_ONE_NAME or contents"
fi
if ! bucket_cleanup_if_bucket_exists "$BUCKET_TWO_NAME"; then
log 3 "error deleting bucket $BUCKET_TWO_NAME or contents"
fi
fi
if user_exists "$USERNAME_ONE" && ! delete_user "$USERNAME_ONE"; then
log 3 "error deleting user $USERNAME_ONE"
fi
if user_exists "$USERNAME_TWO" && ! delete_user "$USERNAME_TWO"; then
log 3 "error deleting user $USERNAME_TWO"
fi
if [ "$AUTOGENERATE_USERS" == "true" ] && ! delete_autogenerated_users; then
log 3 "error deleting autocreated users"
fi
if [ "$REMOVE_TEST_FILE_FOLDER" == "true" ]; then
log 6 "removing test file folder"
if ! error=$(rm -rf "${TEST_FILE_FOLDER:?}" 2>&1); then
log 3 "unable to remove test file folder: $error"
fi
fi
stop_versity
post_versity_cleanup
}

View File

@@ -14,36 +14,36 @@
# specific language governing permissions and limitations
# under the License.
source ./tests/drivers/drivers.sh
source ./tests/env.sh
source ./tests/util/util_object.sh
source ./tests/commands/create_bucket.sh
create_bucket_if_not_exists() {
if ! check_param_count "create_bucket_if_not_exists" "bucket name" 1 $#; then
if [[ $# -ne 2 ]]; then
log 2 "create_bucket_if_not_exists command missing command type, name"
return 1
fi
bucket_exists "$1" || local exists_result=$?
bucket_exists "$1" "$2" || local exists_result=$?
if [[ $exists_result -eq 2 ]]; then
log 2 "error checking if bucket exists"
return 1
fi
if [[ $exists_result -eq 0 ]]; then
echo "bucket '$1' already exists, skipping"
echo "bucket '$2' already exists, skipping"
return 0
fi
if ! create_bucket_object_lock_enabled "$1"; then
if ! create_bucket_object_lock_enabled "$2"; then
log 2 "error creating bucket"
return 1
fi
echo "bucket '$1' successfully created"
echo "bucket '$2' successfully created"
return 0
}
base_setup
if ! create_bucket_if_not_exists "$BUCKET_ONE_NAME"; then
if ! create_bucket_if_not_exists "s3api" "$BUCKET_ONE_NAME"; then
log 2 "error creating static bucket one"
elif ! create_bucket_if_not_exists "$BUCKET_TWO_NAME"; then
elif ! create_bucket_if_not_exists "s3api" "$BUCKET_TWO_NAME"; then
log 2 "error creating static bucket two"
fi

View File

@@ -37,13 +37,11 @@ source ./tests/commands/put_bucket_tagging.sh
source ./tests/commands/put_object_tagging.sh
source ./tests/commands/put_object.sh
source ./tests/commands/put_public_access_block.sh
source ./tests/drivers/drivers.sh
# param: command type
# fail on test failure
test_common_multipart_upload() {
run check_param_count "test_common_multipart_upload" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
bucket_file="largefile"
run setup_bucket_and_large_file "$BUCKET_ONE_NAME" "$bucket_file"
@@ -64,6 +62,9 @@ test_common_multipart_upload() {
run download_and_compare_file "$1" "$TEST_FILE_FOLDER/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER/$bucket_file-copy"
assert_success
bucket_cleanup "$1" "$BUCKET_ONE_NAME"
delete_test_files $bucket_file
}
# common test for creating, deleting buckets
@@ -74,31 +75,28 @@ test_common_create_delete_bucket() {
return
fi
run check_param_count "test_common_create_delete_bucket" "client type" 1 "$#"
assert [ $# -eq 1 ]
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
run bucket_cleanup_if_bucket_exists "$BUCKET_ONE_NAME"
run bucket_exists "$1" "$BUCKET_ONE_NAME"
assert_success
run create_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
run bucket_exists "$BUCKET_ONE_NAME"
assert_success
run delete_bucket "$1" "$BUCKET_ONE_NAME"
run bucket_cleanup "$1" "$BUCKET_ONE_NAME"
assert_success
}
test_common_copy_object() {
run check_param_count "test_common_copy_object" "client type" 1 "$#"
assert_success
if [[ $# -ne 1 ]]; then
fail "copy object test requires command type"
fi
local object_name="test-object"
run create_test_file "$object_name"
assert_success
run setup_buckets "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
run setup_buckets "$1" "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
assert_success
if [[ $1 == 's3' ]]; then
@@ -122,8 +120,7 @@ test_common_copy_object() {
# param: client
# fail on error
test_common_put_object_with_data() {
run check_param_count "test_common_put_object_with_data" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
local object_name="test-object"
run create_test_file "$object_name"
@@ -135,8 +132,7 @@ test_common_put_object_with_data() {
# param: client
# fail on error
test_common_put_object_no_data() {
run check_param_count "test_common_put_object_no_data" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
local object_name="test-object"
run create_test_file "$object_name" 0
@@ -148,10 +144,9 @@ test_common_put_object_no_data() {
# params: client, filename
# fail on test failure
test_common_put_object() {
run check_param_count "test_common_put_object" "client type, file" 2 "$#"
assert_success
assert [ $# -eq 2 ]
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
# s3 erases file locally, so we need to copy it first
@@ -179,8 +174,9 @@ test_common_put_object() {
}
test_common_put_get_object() {
run check_param_count "test_common_put_get_object" "client type" 1 "$#"
assert_success
if [[ $# -ne 1 ]]; then
fail "put, get object test requires client"
fi
local object_name="test-object"
run setup_bucket_and_file "$BUCKET_ONE_NAME" "$object_name"
@@ -204,10 +200,11 @@ test_common_put_get_object() {
# param: "aws" or "s3cmd"
# pass if buckets are properly listed, fail if not
test_common_list_buckets() {
run check_param_count "test_common_list_buckets" "client type" 1 "$#"
assert_success
if [[ $# -ne 1 ]]; then
fail "List buckets test requires one argument"
fi
run setup_buckets "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
run setup_buckets "$1" "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
assert_success
run list_and_check_buckets "$1" "$BUCKET_ONE_NAME" "$BUCKET_TWO_NAME"
@@ -215,8 +212,10 @@ test_common_list_buckets() {
}
test_common_list_objects() {
run check_param_count "test_common_list_objects" "client type" 1 "$#"
assert_success
if [[ $# -ne 1 ]]; then
log 2 "common test function for listing objects requires command type"
return 1
fi
object_one="test-file-one"
object_two="test-file-two"
@@ -234,13 +233,12 @@ test_common_list_objects() {
}
test_common_set_get_delete_bucket_tags() {
run check_param_count "test_common_set_get_delete_bucket_tags" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
local key="test_key"
local value="test_value"
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
run verify_no_bucket_tags "$1" "$BUCKET_ONE_NAME"
@@ -260,8 +258,7 @@ test_common_set_get_delete_bucket_tags() {
}
test_common_set_get_object_tags() {
run check_param_count "test_common_set_get_object_tags" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
local bucket_file="bucket-file"
local key="test_key"
@@ -284,8 +281,10 @@ test_common_set_get_object_tags() {
}
test_common_presigned_url_utf8_chars() {
run check_param_count "test_common_presigned_url_utf8_chars" "client type" 1 "$#"
assert_success
if [[ $# -ne 1 ]]; then
log 2 "presigned url command missing command type"
return 1
fi
local bucket_file="my-$%^&*;"
local bucket_file_copy="bucket-file-copy"
@@ -295,7 +294,7 @@ test_common_presigned_url_utf8_chars() {
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
assert_success
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
run put_object "$1" "$TEST_FILE_FOLDER"/"$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file"
@@ -309,13 +308,12 @@ test_common_presigned_url_utf8_chars() {
}
test_common_list_objects_file_count() {
run check_param_count "test_common_list_objects_file_count" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
run create_test_file_count 1001
assert_success
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
run put_object_multiple "$1" "$TEST_FILE_FOLDER/file_*" "$BUCKET_ONE_NAME"
@@ -326,8 +324,7 @@ test_common_list_objects_file_count() {
}
test_common_delete_object_tagging() {
run check_param_count "test_common_delete_object_tagging" "client type" 1 "$#"
assert_success
[[ $# -eq 1 ]] || fail "test common delete object tagging requires command type"
bucket_file="bucket_file"
tag_key="key"
@@ -353,10 +350,9 @@ test_common_delete_object_tagging() {
}
test_common_get_bucket_location() {
run check_param_count "test_common_get_bucket_location" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
run get_check_bucket_location "$1" "$BUCKET_ONE_NAME"
@@ -364,8 +360,7 @@ test_common_get_bucket_location() {
}
test_common_get_put_delete_bucket_policy() {
run check_param_count "test_common_get_put_delete_bucket_policy" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
policy_file="policy_file"
@@ -381,7 +376,7 @@ test_common_get_put_delete_bucket_policy() {
assert_success
log 5 "POLICY: $(cat "$TEST_FILE_FOLDER/$policy_file")"
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
run check_for_empty_policy "$1" "$BUCKET_ONE_NAME"
@@ -401,15 +396,12 @@ test_common_get_put_delete_bucket_policy() {
}
test_common_ls_directory_object() {
run check_param_count "test_common_ls_directory_object" "client type" 1 "$#"
assert_success
test_file="a"
run create_test_file "$test_file" 0
assert_success
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "$1" "$BUCKET_ONE_NAME"
assert_success
if [ "$1" == 's3cmd' ]; then

View File

@@ -14,17 +14,12 @@
# specific language governing permissions and limitations
# under the License.
if [ "$SKIP_ACL_TESTING" == "true" ]; then
skip "Skipping ACL tests"
exit 0
fi
test_put_bucket_acl_s3cmd() {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/963"
fi
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "s3cmd" "$BUCKET_ONE_NAME"
assert_success
username=$USERNAME_ONE
@@ -48,19 +43,8 @@ test_put_bucket_acl_s3cmd() {
assert_success
}
get_grantee_type_and_id() {
if [[ $DIRECT == "true" ]]; then
grantee_type="Group"
grantee_id="http://acs.amazonaws.com/groups/global/AllUsers"
else
grantee_type="CanonicalUser"
grantee_id="$username"
fi
}
test_common_put_bucket_acl() {
run check_param_count "test_common_put_bucket_acl" "client type" 1 "$#"
assert_success
assert [ $# -eq 1 ]
run setup_bucket_and_user "$BUCKET_ONE_NAME" "$USERNAME_ONE" "$PASSWORD_ONE" "user"
assert_success
@@ -77,7 +61,13 @@ test_common_put_bucket_acl() {
run create_test_files "$acl_file"
assert_success
get_grantee_type_and_id
if [[ $DIRECT == "true" ]]; then
grantee_type="Group"
grantee_id="http://acs.amazonaws.com/groups/global/AllUsers"
else
grantee_type="CanonicalUser"
grantee_id="$username"
fi
run setup_acl_json "$TEST_FILE_FOLDER/$acl_file" "$grantee_type" "$grantee_id" "READ" "$AWS_ACCESS_KEY_ID"
assert_success

View File

@@ -48,7 +48,7 @@ export RUN_MC=true
if [[ $RECREATE_BUCKETS == "false" ]]; then
skip "will not test bucket deletion in static bucket test config"
fi
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "mc" "$BUCKET_ONE_NAME"
assert_success
run delete_bucket "mc" "$BUCKET_ONE_NAME"
@@ -125,7 +125,7 @@ export RUN_MC=true
}
@test "test_get_bucket_info_mc" {
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "mc" "$BUCKET_ONE_NAME"
assert_success
run bucket_info_contains_bucket "mc" "$BUCKET_ONE_NAME"
@@ -133,7 +133,7 @@ export RUN_MC=true
}
@test "test_get_bucket_info_doesnt_exist_mc" {
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "mc" "$BUCKET_ONE_NAME"
assert_success
run head_bucket "mc" "$BUCKET_ONE_NAME"a

View File

@@ -29,7 +29,6 @@ source ./tests/commands/put_bucket_versioning.sh
source ./tests/commands/put_object.sh
source ./tests/commands/put_object_retention.sh
source ./tests/commands/put_object_tagging.sh
source ./tests/drivers/copy_object/copy_object_rest.sh
source ./tests/logger.sh
source ./tests/setup.sh
source ./tests/util/util_acl.sh
@@ -74,7 +73,10 @@ test_file="test_file"
run put_object "rest" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
assert_success
run download_and_compare_file "rest" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
run get_object "rest" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
assert_success
run compare_files "$TEST_FILE_FOLDER/$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
assert_success
run delete_object "rest" "$BUCKET_ONE_NAME" "$test_file"
@@ -111,10 +113,10 @@ test_file="test_file"
test_key="TestKey"
test_value="TestValue"
run bucket_cleanup_if_bucket_exists "$BUCKET_ONE_NAME"
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
assert_success
# in static bucket config, bucket will still exist
if ! bucket_exists "$BUCKET_ONE_NAME"; then
if ! bucket_exists "rest" "$BUCKET_ONE_NAME"; then
run create_bucket_object_lock_enabled "$BUCKET_ONE_NAME"
assert_success
fi
@@ -146,7 +148,7 @@ test_file="test_file"
}
@test "REST - multipart upload create then abort" {
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
run create_abort_multipart_upload_rest "$BUCKET_ONE_NAME" "$test_file"
@@ -164,13 +166,16 @@ test_file="test_file"
"$TEST_FILE_FOLDER/$test_file-0" "$TEST_FILE_FOLDER/$test_file-1" "$TEST_FILE_FOLDER/$test_file-2" "$TEST_FILE_FOLDER/$test_file-3"
assert_success
run download_and_compare_file "rest" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
run get_object "rest" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
assert_success
run compare_files "$TEST_FILE_FOLDER/$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
assert_success
}
@test "REST - get object attributes" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1001"
skip "https://github.com/versity/versitygw/issues/1000"
fi
run setup_bucket_and_large_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
@@ -212,7 +217,7 @@ test_file="test_file"
skip "https://github.com/versity/versitygw/issues/959"
fi
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
run get_and_check_no_policy_error "$BUCKET_ONE_NAME"
@@ -220,13 +225,13 @@ test_file="test_file"
}
@test "REST - put policy" {
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
run setup_user_versitygw_or_direct "$USERNAME_ONE" "$PASSWORD_ONE" "user" "$BUCKET_ONE_NAME"
assert_success
log 5 "username: ${lines[1]}"
log 5 "password: ${lines[2]}"
log 5 "username: ${lines[0]}"
log 5 "password: ${lines[1]}"
sleep 5
@@ -269,7 +274,7 @@ test_file="test_file"
skip "https://github.com/versity/versitygw/issues/999"
fi
test_file_two="test_file_2"
run setup_bucket_and_files "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_two"
run setup_bucket "s3api" "$BUCKET_ONE_NAME" "$test_file" "$test_file_two"
assert_success
run put_object "rest" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
@@ -293,7 +298,7 @@ test_file="test_file"
assert_success
}
@test "REST - upload part copy (UploadPartCopy)" {
@test "REST - upload part copy" {
run setup_bucket_and_large_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
@@ -342,7 +347,7 @@ test_file="test_file"
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1040"
fi
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
run delete_objects_no_content_md5_header "$BUCKET_ONE_NAME"
@@ -377,6 +382,9 @@ test_file="test_file"
}
@test "REST - put object w/STREAMING-AWS4-HMAC-SHA256-PAYLOAD without content length" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1043"
fi
run setup_bucket_and_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
@@ -386,13 +394,13 @@ test_file="test_file"
@test "REST - HeadObject does not return 405 with versioning, after file deleted" {
if [ "$RECREATE_BUCKETS" == "false" ] || [[ ( -z "$VERSIONING_DIR" ) && ( "$DIRECT" != "true" ) ]]; then
skip "test isn't valid for this configuration"
skip
fi
run bucket_cleanup_if_bucket_exists "$BUCKET_ONE_NAME"
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
assert_success
# in static bucket config, bucket will still exist
if ! bucket_exists "$BUCKET_ONE_NAME"; then
if ! bucket_exists "rest" "$BUCKET_ONE_NAME"; then
run create_bucket_object_lock_enabled "$BUCKET_ONE_NAME"
assert_success
fi
@@ -412,13 +420,13 @@ test_file="test_file"
@test "REST - HeadObject returns 405 when querying DeleteMarker" {
if [ "$RECREATE_BUCKETS" == "false" ] || [[ ( -z "$VERSIONING_DIR" ) && ( "$DIRECT" != "true" ) ]]; then
skip "test isn't valid for this configuration"
skip
fi
run bucket_cleanup_if_bucket_exists "$BUCKET_ONE_NAME"
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
assert_success
# in static bucket config, bucket will still exist
if ! bucket_exists "$BUCKET_ONE_NAME"; then
if ! bucket_exists "rest" "$BUCKET_ONE_NAME"; then
run create_bucket_object_lock_enabled "$BUCKET_ONE_NAME"
assert_success
fi
@@ -525,107 +533,3 @@ test_file="test_file"
run rest_check_legal_hold "$BUCKET_ONE_NAME" "$test_file"
assert_success
}
@test "REST - UploadPartCopy w/o upload ID" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1226"
fi
run upload_part_copy_without_upload_id_or_part_number "$BUCKET_ONE_NAME" "$test_file" "1" "" \
400 "InvalidArgument" "This operation does not accept partNumber without uploadId"
assert_success
}
@test "REST - UploadPartCopy w/o part number" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1229"
fi
run upload_part_copy_without_upload_id_or_part_number "$BUCKET_ONE_NAME" "$test_file" "" "dummy" \
405 "MethodNotAllowed" "The specified method is not allowed against this resource"
assert_success
}
@test "REST - UploadPartCopy - ETag is quoted" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1235"
fi
run setup_bucket_and_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
run put_object "rest" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
assert_success
run upload_part_copy_check_etag_header "$BUCKET_ONE_NAME" "$test_file"-mp "$BUCKET_ONE_NAME/$test_file"
assert_success
}
@test "REST - UploadPart - ETag is quoted" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1233"
fi
run setup_bucket_and_large_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
run split_file "$TEST_FILE_FOLDER/$test_file" 4
assert_success
run create_multipart_upload_rest "$BUCKET_ONE_NAME" "$test_file"
assert_success
# shellcheck disable=SC2030
upload_id=$output
run upload_part_check_etag_header "$BUCKET_ONE_NAME" "$test_file" "$upload_id"
assert_success
}
@test "REST - UploadPart w/o part number" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1236"
fi
run setup_bucket_and_large_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
run split_file "$TEST_FILE_FOLDER/$test_file" 4
assert_success
run upload_part_without_upload_id "$BUCKET_ONE_NAME" "$test_file"
assert_success
}
@test "REST - UploadPart w/o upload ID" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1237"
fi
run setup_bucket_and_large_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
run split_file "$TEST_FILE_FOLDER/$test_file" 4
assert_success
run upload_part_without_upload_id "$BUCKET_ONE_NAME" "$test_file"
assert_success
}
@test "REST - copy object w/invalid copy source" {
run setup_bucket_and_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
run put_object "rest" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
assert_success
run copy_object_invalid_copy_source "$BUCKET_ONE_NAME"
assert_success
}
@test "REST - copy object w/copy source and payload" {
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/1242"
fi
run setup_bucket_and_file "$BUCKET_ONE_NAME" "$test_file"
assert_success
run put_object "rest" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
assert_success
run copy_object_copy_source_and_payload "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file"
assert_success
}

View File

@@ -26,13 +26,8 @@ source ./tests/util/util_setup.sh
export RUN_USERS=true
if [ "$SKIP_ACL_TESTING" == "true" ]; then
skip "Skipping ACL tests"
exit 0
fi
@test "REST - get ACL" {
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
run get_and_check_acl_rest "$BUCKET_ONE_NAME"
@@ -110,7 +105,7 @@ fi
if [ "$DIRECT" != "true" ]; then
skip "https://github.com/versity/versitygw/issues/986"
fi
run setup_bucket "$BUCKET_ONE_NAME"
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
assert_success
run put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred"

Some files were not shown because too many files have changed in this diff Show More