Compare commits

..

4 Commits

Author SHA1 Message Date
Ben McClelland
7086579590 fix: list objects trim common prefixes that match marker prefix
This checks to see if the common prefix is before the marker and
thus would have been returned in earlier list objects request.

The error case was aws cli listing multiple entries for the same
common prefix when the listing required multiple pagination
requests.

Fixes #778
2024-09-16 14:55:55 -07:00
jonaustin09
dea5e0c0b2 feat: Implemented object versioning for multipart uploads. Implemented integration tests for the versioning implementation for multipart uploads 2024-09-16 14:51:22 -07:00
jonaustin09
16995acc17 feat: Added integration tests for bucket object versioning. Made a couple of bug fixes in the versioning implementation 2024-09-16 14:51:20 -07:00
jonaustin09
1f41f91f2d feat: basic logic implementation of bucket object versioning in posix backend
New posix backend option --versioning-dir will enable storing object versions
in specified directory.
2024-09-16 14:42:24 -07:00
240 changed files with 7828 additions and 25883 deletions

View File

@@ -1,37 +0,0 @@
name: azurite functional tests
on: pull_request
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 'stable'
id: go
- name: Set up Docker Compose
run: |
docker compose -f tests/docker-compose.yml --env-file .env.dev --project-directory . up -d azurite azuritegw
- name: Wait for Azurite to be ready
run: sleep 40
- name: Get Dependencies
run: |
go mod download
- name: Build and Run
run: |
make
./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow --azure
- name: Shut down services
run: |
docker compose -f tests/docker-compose.yml --env-file .env.dev --project-directory . down azurite azuritegw

28
.github/workflows/docker-bats.yaml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: docker bats tests
on: pull_request
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image
run: |
mv tests/.env.docker.default tests/.env.docker
mv tests/.secrets.default tests/.secrets
docker build --build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" --build-arg="MC_FOLDER=linux-amd64" \
--progress=plain -f tests/Dockerfile_test_bats -t bats_test .
- name: Set up Docker Compose
run: sudo apt-get install -y docker-compose
- name: Run Docker Container
run: docker-compose -f tests/docker-compose-bats.yml up --exit-code-from posix_backend posix_backend

View File

@@ -1,29 +0,0 @@
name: docker bats tests
on: pull_request
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build Docker Image
run: |
cp tests/.env.docker.default tests/.env.docker
cp tests/.secrets.default tests/.secrets
# see https://github.com/versity/versitygw/issues/1034
docker build \
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \
--build-arg="MC_FOLDER=linux-amd64" \
--progress=plain \
-f tests/Dockerfile_test_bats \
-t bats_test .
- name: Run Docker Container
run: |
docker compose -f tests/docker-compose-bats.yml --project-directory . \
up --exit-code-from s3api_np_only s3api_np_only

View File

@@ -1,8 +1,7 @@
name: functional tests
on: pull_request
jobs:
build:
name: RunTests
runs-on: ubuntu-latest
@@ -19,7 +18,7 @@ jobs:
- name: Get Dependencies
run: |
go mod download
go get -v -t -d ./...
- name: Build and Run
run: |

View File

@@ -8,96 +8,115 @@ jobs:
fail-fast: false
matrix:
include:
- set: "mc, posix, non-file count, non-static, folder IAM"
- set: "s3cmd, posix"
LOCAL_FOLDER: /tmp/gw1
BUCKET_ONE_NAME: versity-gwtest-bucket-one-1
BUCKET_TWO_NAME: versity-gwtest-bucket-two-1
IAM_TYPE: folder
RUN_SET: "mc-non-file-count"
USERS_FOLDER: /tmp/iam1
AWS_ENDPOINT_URL: https://127.0.0.1:7070
RUN_SET: "s3cmd"
RECREATE_BUCKETS: "true"
PORT: 7070
BACKEND: "posix"
- set: "mc, posix, file count, non-static, folder IAM"
- set: "s3, posix"
LOCAL_FOLDER: /tmp/gw2
BUCKET_ONE_NAME: versity-gwtest-bucket-one-2
BUCKET_TWO_NAME: versity-gwtest-bucket-two-2
IAM_TYPE: folder
RUN_SET: "mc-file-count"
USERS_FOLDER: /tmp/iam2
AWS_ENDPOINT_URL: https://127.0.0.1:7071
RUN_SET: "s3"
RECREATE_BUCKETS: "true"
PORT: 7071
BACKEND: "posix"
- set: "REST, posix, non-static, all, folder IAM"
- set: "s3api, posix"
LOCAL_FOLDER: /tmp/gw3
BUCKET_ONE_NAME: versity-gwtest-bucket-one-3
BUCKET_TWO_NAME: versity-gwtest-bucket-two-3
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam3
AWS_ENDPOINT_URL: https://127.0.0.1:7072
RUN_SET: "s3api"
RECREATE_BUCKETS: "true"
PORT: 7072
BACKEND: "posix"
- set: "mc, posix"
LOCAL_FOLDER: /tmp/gw4
BUCKET_ONE_NAME: versity-gwtest-bucket-one-4
BUCKET_TWO_NAME: versity-gwtest-bucket-two-4
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam4
AWS_ENDPOINT_URL: https://127.0.0.1:7073
RUN_SET: "mc"
RECREATE_BUCKETS: "true"
PORT: 7073
BACKEND: "posix"
- set: "s3api-user, posix, s3 IAM"
LOCAL_FOLDER: /tmp/gw5
BUCKET_ONE_NAME: versity-gwtest-bucket-one-5
BUCKET_TWO_NAME: versity-gwtest-bucket-two-5
IAM_TYPE: s3
USERS_BUCKET: versity-gwtest-iam
AWS_ENDPOINT_URL: https://127.0.0.1:7074
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "true"
PORT: 7074
BACKEND: "posix"
- set: "s3api non-policy, static buckets"
LOCAL_FOLDER: /tmp/gw6
BUCKET_ONE_NAME: versity-gwtest-bucket-one-6
BUCKET_TWO_NAME: versity-gwtest-bucket-two-6
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam6
AWS_ENDPOINT_URL: https://127.0.0.1:7075
RUN_SET: "s3api-non-policy"
RECREATE_BUCKETS: "false"
PORT: 7075
BACKEND: "posix"
- set: "s3api, s3 backend"
LOCAL_FOLDER: /tmp/gw7
BUCKET_ONE_NAME: versity-gwtest-bucket-one-7
BUCKET_TWO_NAME: versity-gwtest-bucket-two-7
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam7
AWS_ENDPOINT_URL: https://127.0.0.1:7076
RUN_SET: "s3api"
RECREATE_BUCKETS: "true"
PORT: 7076
BACKEND: "s3"
- set: "REST, posix"
LOCAL_FOLDER: /tmp/gw8
BUCKET_ONE_NAME: versity-gwtest-bucket-one-7
BUCKET_TWO_NAME: versity-gwtest-bucket-two-7
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam8
AWS_ENDPOINT_URL: https://127.0.0.1:7077
RUN_SET: "rest"
RECREATE_BUCKETS: "true"
PORT: 7077
BACKEND: "posix"
- set: "s3, posix, non-file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-non-file-count"
RECREATE_BUCKETS: "true"
BACKEND: "posix"
- set: "s3, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-file-count"
RECREATE_BUCKETS: "true"
BACKEND: "posix"
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
RECREATE_BUCKETS: "true"
BACKEND: "posix"
- set: "s3api, posix, policy, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "true"
BACKEND: "posix"
- set: "s3api, posix, user, non-static, s3 IAM"
IAM_TYPE: s3
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "true"
BACKEND: "posix"
- set: "s3api, posix, bucket, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket"
RECREATE_BUCKETS: "false"
BACKEND: "posix"
- set: "s3api, posix, multipart, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-multipart"
RECREATE_BUCKETS: "false"
BACKEND: "posix"
- set: "s3api, posix, object, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-object"
RECREATE_BUCKETS: "false"
BACKEND: "posix"
- set: "s3api, posix, policy, static, folder IAM"
- set: "s3api policy, static buckets"
LOCAL_FOLDER: /tmp/gw9
BUCKET_ONE_NAME: versity-gwtest-bucket-one-8
BUCKET_TWO_NAME: versity-gwtest-bucket-two-8
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam9
AWS_ENDPOINT_URL: https://127.0.0.1:7078
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "false"
PORT: 7078
BACKEND: "posix"
- set: "s3api, posix, user, static, folder IAM"
- set: "s3api user, static buckets"
LOCAL_FOLDER: /tmp/gw10
BUCKET_ONE_NAME: versity-gwtest-bucket-one-9
BUCKET_TWO_NAME: versity-gwtest-bucket-two-9
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam10
AWS_ENDPOINT_URL: https://127.0.0.1:7079
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "false"
BACKEND: "posix"
# TODO fix/debug s3 gateway
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
# IAM_TYPE: folder
# RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
# RECREATE_BUCKETS: "true"
# BACKEND: "s3"
#- set: "s3api, s3, policy|user, non-static, folder IAM"
# IAM_TYPE: folder
# RUN_SET: "s3api-policy,s3api-user"
# RECREATE_BUCKETS: "true"
# BACKEND: "s3"
- set: "s3cmd, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-file-count"
RECREATE_BUCKETS: "true"
BACKEND: "posix"
- set: "s3cmd, posix, non-user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-non-user"
RECREATE_BUCKETS: "true"
BACKEND: "posix"
- set: "s3cmd, posix, user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-user"
RECREATE_BUCKETS: "true"
PORT: 7079
BACKEND: "posix"
steps:
- name: Check out code into the Go module directory
@@ -133,18 +152,17 @@ jobs:
run: |
sudo apt-get install libxml2-utils
# see https://github.com/versity/versitygw/issues/1034
- name: Install AWS cli
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.22.35.zip" -o "awscliv2.zip"
unzip -o awscliv2.zip
./aws/install -i ${{ github.workspace }}/aws-cli -b ${{ github.workspace }}/bin
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Build and run
- name: Build and run, posix backend
env:
LOCAL_FOLDER: ${{ matrix.LOCAL_FOLDER }}
BUCKET_ONE_NAME: ${{ matrix.BUCKET_ONE_NAME }}
BUCKET_TWO_NAME: ${{ matrix.BUCKET_TWO_NAME }}
USERS_FOLDER: ${{ matrix.USERS_FOLDER }}
USERS_BUCKET: ${{ matrix.USERS_BUCKET }}
IAM_TYPE: ${{ matrix.IAM_TYPE }}
AWS_ENDPOINT_URL: ${{ matrix.AWS_ENDPOINT_URL }}
RUN_SET: ${{ matrix.RUN_SET }}
PORT: ${{ matrix.PORT }}
AWS_PROFILE: versity
VERSITY_EXE: ${{ github.workspace }}/versitygw
RUN_VERSITYGW: true
@@ -152,13 +170,6 @@ jobs:
RECREATE_BUCKETS: ${{ matrix.RECREATE_BUCKETS }}
CERT: ${{ github.workspace }}/cert.pem
KEY: ${{ github.workspace }}/versitygw.pem
LOCAL_FOLDER: /tmp/gw
BUCKET_ONE_NAME: versity-gwtest-bucket-one
BUCKET_TWO_NAME: versity-gwtest-bucket-two
USERS_FOLDER: /tmp/iam
USERS_BUCKET: versity-gwtest-iam
AWS_ENDPOINT_URL: https://127.0.0.1:7070
PORT: 7070
S3CMD_CONFIG: tests/s3cfg.local.default
MC_ALIAS: versity
LOG_LEVEL: 4
@@ -168,11 +179,6 @@ jobs:
USERNAME_TWO: HIJKLMN
PASSWORD_TWO: 8901234
TEST_FILE_FOLDER: ${{ github.workspace }}/versity-gwtest-files
REMOVE_TEST_FILE_FOLDER: true
VERSIONING_DIR: ${{ github.workspace }}/versioning
COMMAND_LOG: command.log
TIME_LOG: time.log
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
run: |
make testbin
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
@@ -180,7 +186,6 @@ jobs:
export AWS_REGION=us-east-1
export AWS_ACCESS_KEY_ID_TWO=user
export AWS_SECRET_ACCESS_KEY_TWO=pass
export AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity
aws configure set aws_region $AWS_REGION --profile versity
@@ -194,9 +199,6 @@ jobs:
fi
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/run.sh $RUN_SET
- name: Time report
run: cat ${{ github.workspace }}/time.log
- name: Coverage report
run: |
go tool covdata percent -i=cover

6
.gitignore vendored
View File

@@ -62,8 +62,4 @@ tests/!s3cfg.local.default
*.patch
# grafana's local database (kept on filesystem for survival between instantiations)
metrics-exploration/grafana_data/**
# bats tools
/tests/bats-assert
/tests/bats-support
metrics-exploration/grafana_data/**

View File

@@ -18,10 +18,6 @@ GOBUILD=$(GOCMD) build
GOCLEAN=$(GOCMD) clean
GOTEST=$(GOCMD) test
# docker-compose
DCCMD=docker-compose
DOCKERCOMPOSE=$(DCCMD) -f tests/docker-compose.yml --env-file .env.dev --project-directory .
BIN=versitygw
VERSION := $(shell if test -e VERSION; then cat VERSION; else git describe --abbrev=0 --tags HEAD; fi)
@@ -75,19 +71,19 @@ dist:
# Creates and runs S3 gateway instance in a docker container
.PHONY: up-posix
up-posix:
$(DOCKERCOMPOSE) up posix
docker compose --env-file .env.dev up posix
# Creates and runs S3 gateway proxy instance in a docker container
.PHONY: up-proxy
up-proxy:
$(DOCKERCOMPOSE) up proxy
docker compose --env-file .env.dev up proxy
# Creates and runs S3 gateway to azurite instance in a docker container
.PHONY: up-azurite
up-azurite:
$(DOCKERCOMPOSE) up azurite azuritegw
docker compose --env-file .env.dev up azurite azuritegw
# Creates and runs both S3 gateway and proxy server instances in docker containers
.PHONY: up-app
up-app:
$(DOCKERCOMPOSE) up
docker compose --env-file .env.dev up

View File

@@ -40,7 +40,7 @@ Versity Gateway, a simple to use tool for seamless inline translation between AW
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage, Versitys open source ScoutFS filesystem, Azure Blob Storage, and other S3 servers.
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage and Versitys open source ScoutFS filesystem.
The gateway is completely stateless. Multiple Versity Gateway instances may be deployed in a cluster to increase aggregate throughput. The Versity Gateways stateless architecture allows any request to be serviced by any gateway thereby distributing workloads and enhancing performance. Load balancers may be used to evenly distribute requests across the cluster of gateways for optimal performance.

View File

@@ -17,7 +17,6 @@ package auth
import (
"context"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"strings"
@@ -34,13 +33,12 @@ type ACL struct {
}
type Grantee struct {
Permission Permission
Permission types.Permission
Access string
Type types.Type
}
type GetBucketAclOutput struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy"`
Owner *types.Owner
AccessControlList AccessControlList
}
@@ -61,124 +59,20 @@ type AccessControlPolicy struct {
Owner *types.Owner
}
func (acp *AccessControlPolicy) Validate() error {
if !acp.AccessControlList.isValid() {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
// The Owner can't be nil
if acp.Owner == nil {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
// The Owner ID can't be empty
if acp.Owner.ID == nil || *acp.Owner.ID == "" {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
return nil
}
type AccessControlList struct {
Grants []Grant `xml:"Grant"`
}
// Validates the AccessControlList
func (acl *AccessControlList) isValid() bool {
for _, el := range acl.Grants {
if !el.isValid() {
return false
}
}
return true
}
type Permission string
const (
PermissionFullControl Permission = "FULL_CONTROL"
PermissionWrite Permission = "WRITE"
PermissionWriteAcp Permission = "WRITE_ACP"
PermissionRead Permission = "READ"
PermissionReadAcp Permission = "READ_ACP"
)
// Check if the permission is valid
func (p Permission) isValid() bool {
return p == PermissionFullControl ||
p == PermissionRead ||
p == PermissionReadAcp ||
p == PermissionWrite ||
p == PermissionWriteAcp
}
type Grant struct {
Grantee *Grt `xml:"Grantee"`
Permission Permission `xml:"Permission"`
}
// Checks if Grant is valid
func (g *Grant) isValid() bool {
return g.Permission.isValid() && g.Grantee.isValid()
Grantee *Grt
Permission types.Permission
}
type Grt struct {
XMLNS string `xml:"xmlns:xsi,attr"`
Type types.Type `xml:"xsi:type,attr"`
ID string `xml:"ID"`
}
// Custom Unmarshalling for Grt to parse xsi:type properly
func (g *Grt) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Iterate through the XML tokens to process the attributes
for _, attr := range start.Attr {
// Check if the attribute is xsi:type and belongs to the xsi namespace
if attr.Name.Space == "http://www.w3.org/2001/XMLSchema-instance" && attr.Name.Local == "type" {
g.Type = types.Type(attr.Value)
}
// Handle xmlns:xsi
if attr.Name.Local == "xmlns:xsi" {
g.XMLNS = attr.Value
}
}
// Decode the inner XML elements like ID
for {
t, err := d.Token()
if err != nil {
return err
}
switch se := t.(type) {
case xml.StartElement:
if se.Name.Local == "ID" {
if err := d.DecodeElement(&g.ID, &se); err != nil {
return err
}
}
case xml.EndElement:
if se.Name.Local == start.Name.Local {
return nil
}
}
}
}
// Validates Grt
func (g *Grt) isValid() bool {
// Validate the Type
// Only these 2 types are supported in the gateway
if g.Type != types.TypeCanonicalUser && g.Type != types.TypeGroup {
return false
}
// The ID prop shouldn't be empty
if g.ID == "" {
return false
}
return true
XMLNS string `xml:"xmlns:xsi,attr"`
XMLXSI types.Type `xml:"xsi:type,attr"`
Type types.Type `xml:"Type"`
ID string `xml:"ID"`
}
func ParseACL(data []byte) (ACL, error) {
@@ -193,32 +87,22 @@ func ParseACL(data []byte) (ACL, error) {
return acl, nil
}
func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
grants := []Grant{}
if len(data) == 0 {
return GetBucketAclOutput{
Owner: &types.Owner{
ID: &owner,
},
AccessControlList: AccessControlList{
Grants: grants,
},
}, nil
}
func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
var acl ACL
if err := json.Unmarshal(data, &acl); err != nil {
return GetBucketAclOutput{}, fmt.Errorf("parse acl: %w", err)
}
grants := []Grant{}
for _, elem := range acl.Grantees {
acs := elem.Access
grants = append(grants, Grant{
Grantee: &Grt{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
ID: acs,
Type: elem.Type,
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: elem.Type,
ID: acs,
Type: elem.Type,
},
Permission: elem.Permission,
})
@@ -241,7 +125,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
defaultGrantees := []Grantee{
{
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Access: acl.Owner,
Type: types.TypeCanonicalUser,
},
@@ -252,19 +136,19 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
switch input.ACL {
case types.BucketCannedACLPublicRead:
defaultGrantees = append(defaultGrantees, Grantee{
Permission: PermissionRead,
Permission: types.PermissionRead,
Access: "all-users",
Type: types.TypeGroup,
})
case types.BucketCannedACLPublicReadWrite:
defaultGrantees = append(defaultGrantees, []Grantee{
{
Permission: PermissionRead,
Permission: types.PermissionRead,
Access: "all-users",
Type: types.TypeGroup,
},
{
Permission: PermissionWrite,
Permission: types.PermissionWrite,
Access: "all-users",
Type: types.TypeGroup,
},
@@ -281,7 +165,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range fullControlList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Type: types.TypeCanonicalUser,
})
}
@@ -291,7 +175,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range readList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionRead,
Permission: types.PermissionRead,
Type: types.TypeCanonicalUser,
})
}
@@ -301,7 +185,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range readACPList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionReadAcp,
Permission: types.PermissionReadAcp,
Type: types.TypeCanonicalUser,
})
}
@@ -311,7 +195,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range writeList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionWrite,
Permission: types.PermissionWrite,
Type: types.TypeCanonicalUser,
})
}
@@ -321,7 +205,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range writeACPList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionWriteAcp,
Permission: types.PermissionWriteAcp,
Type: types.TypeCanonicalUser,
})
}
@@ -378,8 +262,8 @@ func CheckIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
result = append(result, acc)
continue
}
if errors.Is(err, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)) {
return nil, err
if err == ErrNotSupported {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
return nil, fmt.Errorf("check user account: %w", err)
}
@@ -402,7 +286,7 @@ func splitUnique(s, divider string) []string {
return result
}
func verifyACL(acl ACL, access string, permission Permission) error {
func verifyACL(acl ACL, access string, permission types.Permission) error {
grantee := Grantee{
Access: access,
Permission: permission,
@@ -410,7 +294,7 @@ func verifyACL(acl ACL, access string, permission Permission) error {
}
granteeFullCtrl := Grantee{
Access: access,
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Type: types.TypeCanonicalUser,
}
granteeAllUsers := Grantee{
@@ -469,7 +353,7 @@ func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
type AccessOptions struct {
Acl ACL
AclPermission Permission
AclPermission types.Permission
IsRoot bool
Acc Account
Bucket string
@@ -480,7 +364,7 @@ type AccessOptions struct {
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
if opts.Readonly {
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
if opts.AclPermission == types.PermissionWrite || opts.AclPermission == types.PermissionWriteAcp {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
@@ -538,7 +422,7 @@ func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource
if err := VerifyAccess(ctx, be, AccessOptions{
Acl: srcBucketAcl,
AclPermission: PermissionRead,
AclPermission: types.PermissionRead,
IsRoot: opts.IsRoot,
Acc: opts.Acc,
Bucket: srcBucket,

View File

@@ -48,19 +48,18 @@ func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
}
func (bp *BucketPolicy) isAllowed(principal string, action Action, resource string) bool {
var isAllowed bool
for _, statement := range bp.Statement {
if statement.findMatch(principal, action, resource) {
switch statement.Effect {
case BucketPolicyAccessTypeAllow:
isAllowed = true
return true
case BucketPolicyAccessTypeDeny:
return false
}
}
}
return isAllowed
return false
}
type BucketPolicyItem struct {

View File

@@ -102,45 +102,21 @@ func (r Resources) Validate(bucket string) error {
func (r Resources) FindMatch(resource string) bool {
for res := range r {
if r.Match(res, resource) {
return true
if strings.HasSuffix(res, "*") {
pattern := strings.TrimSuffix(res, "*")
if strings.HasPrefix(resource, pattern) {
return true
}
} else {
if res == resource {
return true
}
}
}
return false
}
// Match checks if the input string matches the given pattern with wildcards (`*`, `?`).
// - `?` matches exactly one occurrence of any character.
// - `*` matches arbitrary many (including zero) occurrences of any character.
func (r Resources) Match(pattern, input string) bool {
pIdx, sIdx := 0, 0
starIdx, matchIdx := -1, 0
for sIdx < len(input) {
if pIdx < len(pattern) && (pattern[pIdx] == '?' || pattern[pIdx] == input[sIdx]) {
sIdx++
pIdx++
} else if pIdx < len(pattern) && pattern[pIdx] == '*' {
starIdx = pIdx
matchIdx = sIdx
pIdx++
} else if starIdx != -1 {
pIdx = starIdx + 1
matchIdx++
sIdx = matchIdx
} else {
return false
}
}
for pIdx < len(pattern) && pattern[pIdx] == '*' {
pIdx++
}
return pIdx == len(pattern)
}
// Checks the resource to have arn prefix and not starting with /
func isValidResource(rc string) (isValid bool, pattern string) {
if !strings.HasPrefix(rc, ResourceArnPrefix) {

View File

@@ -1,182 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"encoding/json"
"testing"
)
func TestUnmarshalJSON(t *testing.T) {
var r Resources
cases := []struct {
input string
expected int
wantErr bool
}{
{`"arn:aws:s3:::my-bucket/*"`, 1, false},
{`["arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket"]`, 2, false},
{`""`, 0, true},
{`[]`, 0, true},
{`["invalid-bucket"]`, 0, true},
}
for _, tc := range cases {
r = Resources{}
err := json.Unmarshal([]byte(tc.input), &r)
if (err != nil) != tc.wantErr {
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
}
if len(r) != tc.expected {
t.Errorf("Expected %d resources, got %d", tc.expected, len(r))
}
}
}
func TestAdd(t *testing.T) {
r := Resources{}
cases := []struct {
input string
wantErr bool
}{
{"arn:aws:s3:::valid-bucket/*", false},
{"arn:aws:s3:::valid-bucket/object", false},
{"invalid-bucket/*", true},
{"/invalid-start", true},
}
for _, tc := range cases {
err := r.Add(tc.input)
if (err != nil) != tc.wantErr {
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
}
}
}
func TestContainsObjectPattern(t *testing.T) {
cases := []struct {
resources []string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket/my-object"}, true},
{[]string{"arn:aws:s3:::my-bucket/*"}, true},
{[]string{"arn:aws:s3:::my-bucket"}, false},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.ContainsObjectPattern() != tc.expected {
t.Errorf("Expected object pattern to be %v for %v", tc.expected, tc.resources)
}
}
}
func TestContainsBucketPattern(t *testing.T) {
cases := []struct {
resources []string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket"}, true},
{[]string{"arn:aws:s3:::my-bucket/*"}, false},
{[]string{"arn:aws:s3:::my-bucket/object"}, false},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.ContainsBucketPattern() != tc.expected {
t.Errorf("Expected bucket pattern to be %v for %v", tc.expected, tc.resources)
}
}
}
func TestValidate(t *testing.T) {
cases := []struct {
resources []string
bucket string
expected bool
}{
{[]string{"arn:aws:s3:::valid-bucket/*"}, "valid-bucket", true},
{[]string{"arn:aws:s3:::wrong-bucket/*"}, "valid-bucket", false},
{[]string{"arn:aws:s3:::valid-bucket/*", "arn:aws:s3:::valid-bucket/object/*"}, "valid-bucket", true},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if (r.Validate(tc.bucket) == nil) != tc.expected {
t.Errorf("Expected validation to be %v for bucket %s", tc.expected, tc.bucket)
}
}
}
func TestFindMatch(t *testing.T) {
cases := []struct {
resources []string
input string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket/*"}, "my-bucket/my-object", true},
{[]string{"arn:aws:s3:::my-bucket/object"}, "other-bucket/my-object", false},
{[]string{"arn:aws:s3:::my-bucket/object"}, "my-bucket/object", true},
{[]string{"arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket/*"}, "other-bucket/something", true},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.FindMatch(tc.input) != tc.expected {
t.Errorf("Expected FindMatch to be %v for input %s", tc.expected, tc.input)
}
}
}
func TestMatch(t *testing.T) {
r := Resources{}
cases := []struct {
pattern string
input string
expected bool
}{
{"my-bucket/*", "my-bucket/object", true},
{"my-bucket/?bject", "my-bucket/object", true},
{"my-bucket/*", "other-bucket/object", false},
{"*", "any-bucket/object", true},
{"my-bucket/*", "my-bucket/subdir/object", true},
{"my-bucket/*", "other-bucket", false},
{"my-bucket/*/*", "my-bucket/hello", false},
{"my-bucket/*/*", "my-bucket/hello/world", true},
{"foo/???/bar", "foo/qux/bar", true},
{"foo/???/bar", "foo/quxx/bar", false},
{"foo/???/bar/*/?", "foo/qux/bar/hello/g", true},
{"foo/???/bar/*/?", "foo/qux/bar/hello/smth", false},
}
for _, tc := range cases {
if r.Match(tc.pattern, tc.input) != tc.expected {
t.Errorf("Match(%s, %s) failed, expected %v", tc.pattern, tc.input, tc.expected)
}
}
}

View File

@@ -28,19 +28,6 @@ const (
RoleUserPlus Role = "userplus"
)
func (r Role) IsValid() bool {
switch r {
case RoleAdmin:
return true
case RoleUser:
return true
case RoleUserPlus:
return true
default:
return false
}
}
// Account is a gateway IAM account
type Account struct {
Access string `json:"access"`
@@ -50,10 +37,6 @@ type Account struct {
GroupID int `json:"groupID"`
}
type ListUserAccountsResult struct {
Accounts []Account
}
// Mutable props, which could be changed when updating an IAM account
type MutableProps struct {
Secret *string `json:"secret"`
@@ -124,12 +107,6 @@ type Opts struct {
CacheDisable bool
CacheTTL int
CachePrune int
IpaHost string
IpaVaultName string
IpaUser string
IpaPassword string
IpaInsecure bool
IpaDebug bool
}
func New(o *Opts) (IAMService, error) {
@@ -155,9 +132,6 @@ func New(o *Opts) (IAMService, error) {
o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
case o.IpaHost != "":
svc, err = NewIpaIAMService(o.RootAccount, o.IpaHost, o.IpaVaultName, o.IpaUser, o.IpaPassword, o.IpaInsecure, o.IpaDebug)
fmt.Printf("initializing IPA IAM with %q\n", o.IpaHost)
default:
// if no iam options selected, default to the single user mode
fmt.Println("No IAM service configured, enabling single account mode")

View File

@@ -1,446 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
"strings"
)
const IpaVersion = "2.254"
type IpaIAMService struct {
client http.Client
id int
version string
host string
vaultName string
username string
password string
kraTransportKey *rsa.PublicKey
debug bool
rootAcc Account
}
var _ IAMService = &IpaIAMService{}
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure, debug bool) (*IpaIAMService, error) {
ipa := IpaIAMService{
id: 0,
version: IpaVersion,
host: host,
vaultName: vaultName,
username: username,
password: password,
debug: debug,
rootAcc: rootAcc,
}
jar, err := cookiejar.New(nil)
if err != nil {
// this should never happen
return nil, fmt.Errorf("cookie jar creation: %w", err)
}
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
tr := &http.Transport{
TLSClientConfig: mTLSConfig,
}
ipa.client = http.Client{Jar: jar, Transport: tr}
err = ipa.login()
if err != nil {
return nil, fmt.Errorf("ipa login failed: %w", err)
}
req, err := ipa.newRequest("vaultconfig_show/1", []string{}, map[string]any{"all": true})
if err != nil {
return nil, fmt.Errorf("ipa vaultconfig_show: %w", err)
}
vaultConfig := struct {
Kra_Server_Server []string
Transport_Cert Base64EncodedWrapped
Wrapping_default_algorithm string
Wrapping_supported_algorithms []string
}{}
err = ipa.rpc(req, &vaultConfig)
if err != nil {
return nil, fmt.Errorf("ipa vault config: %w", err)
}
cert, err := x509.ParseCertificate(vaultConfig.Transport_Cert)
if err != nil {
return nil, fmt.Errorf("ipa cannot parse vault certificate: %w", err)
}
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
isSupported := false
for _, algo := range vaultConfig.Wrapping_supported_algorithms {
if algo == "aes-128-cbc" {
isSupported = true
break
}
}
if !isSupported {
return nil,
fmt.Errorf("IPA vault does not support aes-128-cbc. Only %v supported",
vaultConfig.Wrapping_supported_algorithms)
}
return &ipa, nil
}
func (ipa *IpaIAMService) CreateAccount(account Account) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
if access == ipa.rootAcc.Access {
return ipa.rootAcc, nil
}
req, err := ipa.newRequest("user_show/1", []string{access}, map[string]any{})
if err != nil {
return Account{}, fmt.Errorf("ipa user_show: %w", err)
}
userResult := struct {
Gidnumber []string
Uidnumber []string
}{}
err = ipa.rpc(req, &userResult)
if err != nil {
return Account{}, err
}
uid, err := strconv.Atoi(userResult.Uidnumber[0])
if err != nil {
return Account{}, fmt.Errorf("ipa uid invalid: %w", err)
}
gid, err := strconv.Atoi(userResult.Gidnumber[0])
if err != nil {
return Account{}, fmt.Errorf("ipa gid invalid: %w", err)
}
account := Account{
Access: access,
Role: RoleUser,
UserID: uid,
GroupID: gid,
}
session_key := make([]byte, 16)
_, err = rand.Read(session_key)
if err != nil {
return account, fmt.Errorf("ipa cannot generate session key: %w", err)
}
encryptedKey, err := rsa.EncryptPKCS1v15(rand.Reader, ipa.kraTransportKey, session_key)
if err != nil {
return account, fmt.Errorf("ipa vault secret retrieval: %w", err)
}
req, err = ipa.newRequest("vault_retrieve_internal/1", []string{ipa.vaultName},
map[string]any{"username": access,
"session_key": Base64EncodedWrapped(encryptedKey),
"wrapping_algo": "aes-128-cbc"})
if err != nil {
return Account{}, fmt.Errorf("ipa vault_retrieve_internal: %w", err)
}
data := struct {
Vault_data Base64EncodedWrapped
Nonce Base64EncodedWrapped
}{}
err = ipa.rpc(req, &data)
if err != nil {
return account, err
}
aes, err := aes.NewCipher(session_key)
if err != nil {
return account, fmt.Errorf("ipa cannot create AES cipher: %w", err)
}
cbc := cipher.NewCBCDecrypter(aes, data.Nonce)
cbc.CryptBlocks(data.Vault_data, data.Vault_data)
secretUnpaddedJson, err := pkcs7Unpad(data.Vault_data, 16)
if err != nil {
return account, fmt.Errorf("ipa cannot unpad decrypted result: %w", err)
}
secret := struct {
Data Base64Encoded
}{}
json.Unmarshal(secretUnpaddedJson, &secret)
account.Secret = string(secret.Data)
return account, nil
}
func (ipa *IpaIAMService) UpdateUserAccount(access string, props MutableProps) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) DeleteUserAccount(access string) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) ListUserAccounts() ([]Account, error) {
return []Account{}, fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) Shutdown() error {
return nil
}
// Implementation
func (ipa *IpaIAMService) login() error {
form := url.Values{}
form.Set("user", ipa.username)
form.Set("password", ipa.password)
req, err := http.NewRequest(
"POST",
fmt.Sprintf("%s/ipa/session/login_password", ipa.host),
strings.NewReader(form.Encode()))
if err != nil {
return err
}
req.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := ipa.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == 401 {
return errors.New("cannot login to FreeIPA: invalid credentials")
}
if resp.StatusCode != 200 {
return fmt.Errorf("cannot login to FreeIPA: status code %d", resp.StatusCode)
}
return nil
}
type rpcRequest = string
type rpcResponse struct {
Result json.RawMessage
Principal string
Id int
Version string
}
func (p rpcResponse) String() string {
return string(p.Result)
}
var errRpc = errors.New("IPA RPC error")
func (ipa *IpaIAMService) rpc(req rpcRequest, value any) error {
err := ipa.login()
if err != nil {
return err
}
res, err := ipa.rpcInternal(req)
if err != nil {
return err
}
return json.Unmarshal(res.Result, value)
}
func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
httpReq, err := http.NewRequest("POST",
fmt.Sprintf("%s/ipa/session/json", ipa.host),
strings.NewReader(req))
if err != nil {
return rpcResponse{}, err
}
ipa.log(fmt.Sprintf("%v", req))
httpReq.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
httpReq.Header.Set("Content-Type", "application/json")
httpResp, err := ipa.client.Do(httpReq)
if err != nil {
return rpcResponse{}, err
}
bytes, err := io.ReadAll(httpResp.Body)
ipa.log(string(bytes))
if err != nil {
return rpcResponse{}, err
}
result := struct {
Result struct {
Json json.RawMessage `json:"result"`
Value string `json:"value"`
Summary any `json:"summary"`
} `json:"result"`
Error json.RawMessage `json:"error"`
Id int `json:"id"`
Principal string `json:"principal"`
Version string `json:"version"`
}{}
err = json.Unmarshal(bytes, &result)
if err != nil {
return rpcResponse{}, err
}
if string(result.Error) != "null" {
return rpcResponse{}, fmt.Errorf("%s: %w", string(result.Error), errRpc)
}
return rpcResponse{
Result: result.Result.Json,
Principal: result.Principal,
Id: result.Id,
Version: result.Version,
}, nil
}
func (ipa *IpaIAMService) newRequest(method string, args []string, dict map[string]any) (rpcRequest, error) {
id := ipa.id
ipa.id++
dict["version"] = ipa.version
jmethod, errMethod := json.Marshal(method)
jargs, errArgs := json.Marshal(args)
jdict, errDict := json.Marshal(dict)
err := errors.Join(errMethod, errArgs, errDict)
if err != nil {
return "", fmt.Errorf("ipa request invalid: %w", err)
}
request := map[string]interface{}{
"id": id,
"method": json.RawMessage(jmethod),
"params": []json.RawMessage{json.RawMessage(jargs), json.RawMessage(jdict)},
}
requestJSON, err := json.Marshal(request)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
return string(requestJSON), nil
}
// pkcs7Unpad validates and unpads data from the given bytes slice.
// The returned value will be 1 to n bytes smaller depending on the
// amount of padding, where n is the block size.
func pkcs7Unpad(b []byte, blocksize int) ([]byte, error) {
if blocksize <= 0 {
return nil, errors.New("invalid blocksize")
}
if len(b) == 0 {
return nil, errors.New("invalid PKCS7 data (empty or not padded)")
}
if len(b)%blocksize != 0 {
return nil, errors.New("invalid padding on input")
}
c := b[len(b)-1]
n := int(c)
if n == 0 || n > len(b) {
return nil, errors.New("invalid padding on input")
}
for i := 0; i < n; i++ {
if b[len(b)-n+i] != c {
return nil, errors.New("invalid padding on input")
}
}
return b[:len(b)-n], nil
}
/*
e.g.
"value" {
"__base64__": "aGVsbG93b3JsZAo="
}
*/
type Base64EncodedWrapped []byte
func (b *Base64EncodedWrapped) UnmarshalJSON(data []byte) error {
intermediate := struct {
Base64 string `json:"__base64__"`
}{}
err := json.Unmarshal(data, &intermediate)
if err != nil {
return err
}
*b, err = base64.StdEncoding.DecodeString(intermediate.Base64)
return err
}
func (b *Base64EncodedWrapped) MarshalJSON() ([]byte, error) {
intermediate := struct {
Base64 string `json:"__base64__"`
}{Base64: base64.StdEncoding.EncodeToString(*b)}
return json.Marshal(intermediate)
}
/*
e.g.
"value": "aGVsbG93b3JsZAo="
*/
type Base64Encoded []byte
func (b *Base64Encoded) UnmarshalJSON(data []byte) error {
var intermediate string
err := json.Unmarshal(data, &intermediate)
if err != nil {
return err
}
*b, err = base64.StdEncoding.DecodeString(intermediate)
return err
}
func (ipa *IpaIAMService) log(msg string) {
if ipa.debug {
log.Println(msg)
}
}

View File

@@ -15,7 +15,7 @@
package auth
import (
"github.com/versity/versitygw/s3err"
"errors"
)
// IAMServiceSingle manages the single tenant (root-only) IAM service
@@ -23,29 +23,31 @@ type IAMServiceSingle struct{}
var _ IAMService = &IAMServiceSingle{}
var ErrNotSupported = errors.New("method is not supported")
// CreateAccount not valid in single tenant mode
func (IAMServiceSingle) CreateAccount(account Account) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// GetUserAccount no accounts in single tenant mode
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
return Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return Account{}, ErrNoSuchUser
}
// UpdateUserAccount no accounts in single tenant mode
func (IAMServiceSingle) UpdateUserAccount(access string, props MutableProps) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// DeleteUserAccount no accounts in single tenant mode
func (IAMServiceSingle) DeleteUserAccount(access string) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// ListUserAccounts no accounts in single tenant mode
func (IAMServiceSingle) ListUserAccounts() ([]Account, error) {
return []Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return []Account{}, nil
}
// Shutdown graceful termination of service

View File

@@ -47,7 +47,7 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath,
tls.ServerCertificate.FromBytes = []byte(serverCert)
if clientCert != "" {
if clientCertKey == "" {
return nil, fmt.Errorf("client certificate and client certificate key should both be specified")
return nil, fmt.Errorf("client certificate and client certificate should both be specified")
}
tls.ClientCertificate.FromBytes = []byte(clientCert)

View File

@@ -25,7 +25,6 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
type BucketLockConfig struct {
@@ -93,12 +92,12 @@ func ParseBucketLockConfigurationOutput(input []byte) (*types.ObjectLockConfigur
}
func ParseObjectLockRetentionInput(input []byte) ([]byte, error) {
var retention s3response.PutObjectRetentionInput
var retention types.ObjectLockRetention
if err := xml.Unmarshal(input, &retention); err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if retention.RetainUntilDate.Before(time.Now()) {
if retention.RetainUntilDate == nil || retention.RetainUntilDate.Before(time.Now()) {
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
}
switch retention.Mode {
@@ -136,7 +135,7 @@ func ParseObjectLegalHoldOutput(status *bool) *types.ObjectLockLegalHold {
}
}
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass bool, be backend.Backend) error {
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []string, bypass bool, be backend.Backend) error {
data, err := be.GetObjectLockConfiguration(ctx, bucket)
if err != nil {
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
@@ -172,15 +171,8 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
}
for _, obj := range objects {
var key, versionId string
if obj.Key != nil {
key = *obj.Key
}
if obj.VersionId != nil {
versionId = *obj.VersionId
}
checkRetention := true
retentionData, err := be.GetObjectRetention(ctx, bucket, key, versionId)
retentionData, err := be.GetObjectRetention(ctx, bucket, obj, "")
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
continue
}
@@ -211,7 +203,7 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
if err != nil {
return err
}
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
err = VerifyBucketPolicy(policy, userAccess, bucket, obj, BypassGovernanceRetentionAction)
if err != nil {
return s3err.GetAPIError(s3err.ErrObjectLocked)
}
@@ -225,11 +217,8 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
checkLegalHold := true
status, err := be.GetObjectLegalHold(ctx, bucket, key, versionId)
status, err := be.GetObjectLegalHold(ctx, bucket, obj, "")
if err != nil {
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
continue
}
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
checkLegalHold = false
} else {
@@ -254,7 +243,7 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
if err != nil {
return err
}
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
err = VerifyBucketPolicy(policy, userAccess, bucket, obj, BypassGovernanceRetentionAction)
if err != nil {
return s3err.GetAPIError(s3err.ErrObjectLocked)
}

View File

@@ -27,6 +27,7 @@ import (
"math"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
@@ -148,8 +149,8 @@ func (az *Azure) String() string {
func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
meta := map[string]*string{
string(keyAclCapital): backend.GetPtrFromString(encodeBytes(acl)),
string(keyOwnership): backend.GetPtrFromString(encodeBytes([]byte(input.ObjectOwnership))),
string(keyAclCapital): backend.GetStringPtr(encodeBytes(acl)),
string(keyOwnership): backend.GetStringPtr(encodeBytes([]byte(input.ObjectOwnership))),
}
acct, ok := ctx.Value("account").(auth.Account)
@@ -169,7 +170,7 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
return fmt.Errorf("parse default bucket lock state: %w", err)
}
meta[string(keyBucketLock)] = backend.GetPtrFromString(encodeBytes(defaultLockParsed))
meta[string(keyBucketLock)] = backend.GetStringPtr(encodeBytes(defaultLockParsed))
}
_, err := az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
@@ -182,67 +183,58 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
var acl auth.ACL
if len(aclBytes) > 0 {
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return fmt.Errorf("unmarshal acl: %w", err)
return fmt.Errorf("unmarshal bucket acl: %w", err)
}
}
if acl.Owner == acct.Access {
return s3err.GetAPIError(s3err.ErrBucketAlreadyOwnedByYou)
}
return s3err.GetAPIError(s3err.ErrBucketAlreadyExists)
}
return azureErrToS3Err(err)
}
func (az *Azure) ListBuckets(ctx context.Context, input s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
func (az *Azure) ListBuckets(ctx context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
pager := az.client.NewListContainersPager(
&service.ListContainersOptions{
Include: service.ListContainersInclude{
Metadata: true,
},
Marker: &input.ContinuationToken,
MaxResults: &input.MaxBuckets,
Prefix: &input.Prefix,
})
var buckets []s3response.ListAllMyBucketsEntry
result := s3response.ListAllMyBucketsResult{
Prefix: input.Prefix,
}
var result s3response.ListAllMyBucketsResult
resp, err := pager.NextPage(ctx)
if err != nil {
return result, azureErrToS3Err(err)
}
for _, v := range resp.ContainerItems {
if input.IsAdmin {
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
Name: *v.Name,
// TODO: using modification date here instead of creation, is that ok?
CreationDate: *v.Properties.LastModified,
})
} else {
acl, err := getAclFromMetadata(v.Metadata, keyAclLower)
if err != nil {
return result, err
}
if acl.Owner == input.Owner {
for pager.More() {
resp, err := pager.NextPage(ctx)
if err != nil {
return result, azureErrToS3Err(err)
}
for _, v := range resp.ContainerItems {
if isAdmin {
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
Name: *v.Name,
// TODO: using modification date here instead of creation, is that ok?
CreationDate: *v.Properties.LastModified,
})
} else {
acl, err := getAclFromMetadata(v.Metadata, keyAclLower)
if err != nil {
return result, err
}
if acl.Owner == owner {
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
Name: *v.Name,
// TODO: using modification date here instead of creation, is that ok?
CreationDate: *v.Properties.LastModified,
})
}
}
}
}
if resp.NextMarker != nil {
result.ContinuationToken = *resp.NextMarker
}
result.Buckets.Bucket = buckets
result.Owner.ID = input.Owner
result.Owner.ID = owner
return result, nil
}
@@ -256,8 +248,8 @@ func (az *Azure) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3
return &s3.HeadBucketOutput{}, nil
}
func (az *Azure) DeleteBucket(ctx context.Context, bucket string) error {
pager := az.client.NewListBlobsFlatPager(bucket, nil)
func (az *Azure) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput) error {
pager := az.client.NewListBlobsFlatPager(*input.Bucket, nil)
pg, err := pager.NextPage(ctx)
if err != nil {
@@ -267,7 +259,7 @@ func (az *Azure) DeleteBucket(ctx context.Context, bucket string) error {
if len(pg.Segment.BlobItems) > 0 {
return s3err.GetAPIError(s3err.ErrBucketNotEmpty)
}
_, err = az.client.DeleteContainer(ctx, bucket, nil)
_, err = az.client.DeleteContainer(ctx, *input.Bucket, nil)
return azureErrToS3Err(err)
}
@@ -309,13 +301,13 @@ func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (s3respon
opts.HTTPHeaders.BlobContentDisposition = po.ContentDisposition
if strings.HasSuffix(*po.Key, "/") {
// Hardcode "application/x-directory" for direcoty objects
opts.HTTPHeaders.BlobContentType = backend.GetPtrFromString(backend.DirContentType)
opts.HTTPHeaders.BlobContentType = backend.GetStringPtr(backend.DirContentType)
} else {
opts.HTTPHeaders.BlobContentType = po.ContentType
}
if opts.HTTPHeaders.BlobContentType == nil {
opts.HTTPHeaders.BlobContentType = backend.GetPtrFromString(backend.DefaultContentType)
opts.HTTPHeaders.BlobContentType = backend.GetStringPtr(backend.DefaultContentType)
}
uploadResp, err := az.client.UploadStream(ctx, *po.Bucket, *po.Key, po.Body, opts)
@@ -389,29 +381,17 @@ func (az *Azure) DeleteBucketTagging(ctx context.Context, bucket string) error {
}
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
client, err := az.getBlobClient(*input.Bucket, *input.Key)
if err != nil {
return nil, err
}
resp, err := client.GetProperties(ctx, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
var opts *azblob.DownloadStreamOptions
if *input.Range != "" {
offset, count, isValid, err := backend.ParseGetObjectRange(*resp.ContentLength, *input.Range)
offset, count, err := backend.ParseRange(0, *input.Range)
if err != nil {
return nil, err
}
if isValid {
opts = &azblob.DownloadStreamOptions{
Range: blob.HTTPRange{
Count: count,
Offset: offset,
},
}
opts = &azblob.DownloadStreamOptions{
Range: blob.HTTPRange{
Count: count,
Offset: offset,
},
}
}
blobDownloadResponse, err := az.client.DownloadStream(ctx, *input.Bucket, *input.Key, opts)
@@ -426,11 +406,11 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
contentType := blobDownloadResponse.ContentType
if contentType == nil {
contentType = backend.GetPtrFromString(backend.DefaultContentType)
contentType = backend.GetStringPtr(backend.DefaultContentType)
}
return &s3.GetObjectOutput{
AcceptRanges: backend.GetPtrFromString("bytes"),
AcceptRanges: input.Range,
ContentLength: blobDownloadResponse.ContentLength,
ContentEncoding: blobDownloadResponse.ContentEncoding,
ContentType: contentType,
@@ -522,22 +502,20 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
return result, nil
}
func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
data, err := az.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: input.Bucket,
Key: input.Key,
})
if err != nil {
return s3response.GetObjectAttributesResponse{}, err
return s3response.GetObjectAttributesResult{}, err
}
return s3response.GetObjectAttributesResponse{
ETag: backend.TrimEtag(data.ETag),
return s3response.GetObjectAttributesResult{
ETag: data.ETag,
LastModified: data.LastModified,
ObjectSize: data.ContentLength,
StorageClass: data.StorageClass,
LastModified: data.LastModified,
VersionId: data.VersionId,
DeleteMarker: data.DeleteMarker,
}, nil
}
@@ -575,7 +553,7 @@ Pager:
break Pager
}
objects = append(objects, s3response.Object{
ETag: backend.GetPtrFromString(fmt.Sprintf("%q", *v.Properties.ETag)),
ETag: (*string)(v.Properties.ETag),
Key: v.Name,
LastModified: v.Properties.LastModified,
Size: v.Properties.ContentLength,
@@ -591,28 +569,23 @@ Pager:
isTruncated = true
break Pager
}
marker := getString(input.Marker)
pfx := strings.TrimSuffix(*v.Name, getString(input.Delimiter))
if marker != "" && strings.HasPrefix(marker, pfx) {
continue
}
cPrefixes = append(cPrefixes, types.CommonPrefix{
Prefix: v.Name,
})
}
}
// TODO: generate common prefixes when appropriate
return s3response.ListObjectsResult{
Contents: objects,
Marker: backend.GetPtrFromString(*input.Marker),
Marker: input.Marker,
MaxKeys: input.MaxKeys,
Name: input.Bucket,
NextMarker: nextMarker,
Prefix: backend.GetPtrFromString(*input.Prefix),
Prefix: input.Prefix,
IsTruncated: &isTruncated,
Delimiter: backend.GetPtrFromString(*input.Delimiter),
Delimiter: input.Delimiter,
CommonPrefixes: cPrefixes,
}, nil
}
@@ -657,7 +630,7 @@ Pager:
break Pager
}
objects = append(objects, s3response.Object{
ETag: backend.GetPtrFromString(fmt.Sprintf("%q", *v.Properties.ETag)),
ETag: (*string)(v.Properties.ETag),
Key: v.Name,
LastModified: v.Properties.LastModified,
Size: v.Properties.ContentLength,
@@ -673,13 +646,6 @@ Pager:
isTruncated = true
break Pager
}
marker := getString(input.ContinuationToken)
pfx := strings.TrimSuffix(*v.Name, getString(input.Delimiter))
if marker != "" && strings.HasPrefix(marker, pfx) {
continue
}
cPrefixes = append(cPrefixes, types.CommonPrefix{
Prefix: v.Name,
})
@@ -688,15 +654,14 @@ Pager:
return s3response.ListObjectsV2Result{
Contents: objects,
ContinuationToken: backend.GetPtrFromString(*input.ContinuationToken),
ContinuationToken: input.ContinuationToken,
MaxKeys: input.MaxKeys,
Name: input.Bucket,
NextContinuationToken: nextMarker,
Prefix: backend.GetPtrFromString(*input.Prefix),
Prefix: input.Prefix,
IsTruncated: &isTruncated,
Delimiter: backend.GetPtrFromString(*input.Delimiter),
Delimiter: input.Delimiter,
CommonPrefixes: cPrefixes,
StartAfter: backend.GetPtrFromString(*input.StartAfter),
}, nil
}
@@ -732,8 +697,8 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput
} else {
errs = append(errs, types.Error{
Key: obj.Key,
Code: backend.GetPtrFromString("InternalError"),
Message: backend.GetPtrFromString(err.Error()),
Code: backend.GetStringPtr("InternalError"),
Message: backend.GetStringPtr(err.Error()),
})
}
}
@@ -869,7 +834,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMult
// set blob legal hold status in metadata
if input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn {
meta[string(keyObjLegalHold)] = backend.GetPtrFromString("1")
meta[string(keyObjLegalHold)] = backend.GetStringPtr("1")
}
// set blob retention date
@@ -882,7 +847,7 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMult
if err != nil {
return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err)
}
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retParsed))
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retParsed))
}
uploadId := uuid.New().String()
@@ -916,9 +881,9 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMult
}
// Each part is translated into an uncommitted block in a newly created blob in staging area
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
return nil, err
return "", err
}
// TODO: request streamable version of StageBlock()
@@ -927,34 +892,32 @@ func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3
// the body in memory to create an io.ReadSeekCloser
rdr, err := getReadSeekCloser(input.Body)
if err != nil {
return nil, err
return "", err
}
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
if err != nil {
return nil, err
return "", err
}
// block id serves as etag here
etag := blockIDInt32ToBase64(*input.PartNumber)
etag = blockIDInt32ToBase64(*input.PartNumber)
_, err = client.StageBlock(ctx, etag, rdr, nil)
if err != nil {
return nil, parseMpError(err)
return "", parseMpError(err)
}
return &s3.UploadPartOutput{
ETag: &etag,
}, nil
return etag, nil
}
func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
if err != nil {
return s3response.CopyPartResult{}, nil
return s3response.CopyObjectResult{}, nil
}
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
return s3response.CopyPartResult{}, err
return s3response.CopyObjectResult{}, err
}
eTag := blockIDInt32ToBase64(*input.PartNumber)
@@ -962,10 +925,10 @@ func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInp
//TODO: the action returns not implemented on azurite, maybe in production this will work?
_, err = client.StageBlockFromURL(ctx, eTag, *input.CopySource, nil)
if err != nil {
return s3response.CopyPartResult{}, parseMpError(err)
return s3response.CopyObjectResult{}, parseMpError(err)
}
return s3response.CopyPartResult{}, nil
return s3response.CopyObjectResult{}, nil
}
// Lists all uncommitted parts from the blob
@@ -1058,8 +1021,7 @@ func (az *Azure) ListMultipartUploads(ctx context.Context, input *s3.ListMultipa
prefix := string(metaTmpMultipartPrefix)
pager := client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
Include: container.ListBlobsInclude{Metadata: true},
Prefix: &prefix,
Prefix: &prefix,
})
for pager.More() {
@@ -1208,53 +1170,27 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
uncommittedBlocks := map[int32]*blockblob.Block{}
for _, el := range blockList.UncommittedBlocks {
ptNumber, err := decodeBlockId(backend.GetStringFromPtr(el.Name))
slices.SortFunc(blockList.UncommittedBlocks, func(a *blockblob.Block, b *blockblob.Block) int {
ptNumber, _ := decodeBlockId(*a.Name)
nextPtNumber, _ := decodeBlockId(*b.Name)
return ptNumber - nextPtNumber
})
for i, block := range blockList.UncommittedBlocks {
ptNumber, err := decodeBlockId(*block.Name)
if err != nil {
return nil, fmt.Errorf("invalid block name: %w", err)
}
uncommittedBlocks[int32(ptNumber)] = el
}
// The initialie values is the lower limit of partNumber: 0
var totalSize int64
var partNumber int32
last := len(blockList.UncommittedBlocks) - 1
for i, part := range input.MultipartUpload.Parts {
if part.PartNumber == nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
block, ok := uncommittedBlocks[*part.PartNumber]
if !ok {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.ETag != *block.Name {
if *input.MultipartUpload.Parts[i].ETag != *block.Name {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
// all parts except the last need to be greater, than
// the minimum allowed size (5 Mib)
if i < last && *block.Size < backend.MinPartSize {
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
if *input.MultipartUpload.Parts[i].PartNumber != int32(ptNumber) {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
totalSize += *block.Size
blockIds = append(blockIds, *block.Name)
}
if input.MpuObjectSize != nil && totalSize != *input.MpuObjectSize {
return nil, s3err.GetIncorrectMpObjectSizeErr(totalSize, *input.MpuObjectSize)
}
opts := &blockblob.CommitBlockListOptions{
Metadata: props.Metadata,
Tags: parseAzTags(tags.BlobTagSet),
@@ -1349,9 +1285,22 @@ func (az *Azure) GetObjectLockConfiguration(ctx context.Context, bucket string)
}
func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
err := az.isBucketObjectLockEnabled(ctx, bucket)
cfg, err := az.getContainerMetaData(ctx, bucket, string(keyBucketLock))
if err != nil {
return err
return azureErrToS3Err(err)
}
if len(cfg) == 0 {
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
}
var bucketLockConfig auth.BucketLockConfig
if err := json.Unmarshal(cfg, &bucketLockConfig); err != nil {
return fmt.Errorf("parse bucket lock config: %w", err)
}
if !bucketLockConfig.Enabled {
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
}
blobClient, err := az.getBlobClient(bucket, object)
@@ -1367,12 +1316,12 @@ func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, version
meta := blobProps.Metadata
if meta == nil {
meta = map[string]*string{
string(keyObjRetention): backend.GetPtrFromString(string(retention)),
string(keyObjRetention): backend.GetStringPtr(string(retention)),
}
} else {
objLockCfg, ok := meta[string(keyObjRetention)]
if !ok {
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retention))
} else {
var lockCfg types.ObjectLockRetention
if err := json.Unmarshal([]byte(*objLockCfg), &lockCfg); err != nil {
@@ -1390,7 +1339,7 @@ func (az *Azure) PutObjectRetention(ctx context.Context, bucket, object, version
}
}
meta[string(keyObjRetention)] = backend.GetPtrFromString(string(retention))
meta[string(keyObjRetention)] = backend.GetStringPtr(string(retention))
}
}
@@ -1412,11 +1361,6 @@ func (az *Azure) GetObjectRetention(ctx context.Context, bucket, object, version
return nil, azureErrToS3Err(err)
}
err = az.isBucketObjectLockEnabled(ctx, bucket)
if err != nil {
return nil, err
}
retentionPtr, ok := props.Metadata[string(keyObjRetention)]
if !ok {
return nil, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)
@@ -1426,9 +1370,22 @@ func (az *Azure) GetObjectRetention(ctx context.Context, bucket, object, version
}
func (az *Azure) PutObjectLegalHold(ctx context.Context, bucket, object, versionId string, status bool) error {
err := az.isBucketObjectLockEnabled(ctx, bucket)
cfg, err := az.getContainerMetaData(ctx, bucket, string(keyBucketLock))
if err != nil {
return err
return azureErrToS3Err(err)
}
if len(cfg) == 0 {
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
}
var bucketLockConfig auth.BucketLockConfig
if err := json.Unmarshal(cfg, &bucketLockConfig); err != nil {
return fmt.Errorf("parse bucket lock config: %w", err)
}
if !bucketLockConfig.Enabled {
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
}
blobClient, err := az.getBlobClient(bucket, object)
@@ -1475,11 +1432,6 @@ func (az *Azure) GetObjectLegalHold(ctx context.Context, bucket, object, version
return nil, azureErrToS3Err(err)
}
err = az.isBucketObjectLockEnabled(ctx, bucket)
if err != nil {
return nil, err
}
retentionPtr, ok := props.Metadata[string(keyObjLegalHold)]
if !ok {
return nil, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)
@@ -1497,10 +1449,7 @@ func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket string, acl []byt
// The action actually returns the containers owned by the user, who initialized the gateway
// TODO: Not sure if there's a way to list all the containers and owners?
func (az *Azure) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.Bucket, err error) {
opts := &service.ListContainersOptions{
Include: service.ListContainersInclude{Metadata: true},
}
pager := az.client.NewListContainersPager(opts)
pager := az.client.NewListContainersPager(nil)
for pager.More() {
resp, err := pager.NextPage(ctx)
@@ -1522,28 +1471,6 @@ func (az *Azure) ListBucketsAndOwners(ctx context.Context) (buckets []s3response
return buckets, nil
}
func (az *Azure) isBucketObjectLockEnabled(ctx context.Context, bucket string) error {
cfg, err := az.getContainerMetaData(ctx, bucket, string(keyBucketLock))
if err != nil {
return azureErrToS3Err(err)
}
if len(cfg) == 0 {
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
}
var bucketLockConfig auth.BucketLockConfig
if err := json.Unmarshal(cfg, &bucketLockConfig); err != nil {
return fmt.Errorf("parse bucket lock config: %w", err)
}
if !bucketLockConfig.Enabled {
return s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration)
}
return nil
}
func (az *Azure) getContainerURL(cntr string) string {
return fmt.Sprintf("%v/%v", strings.TrimRight(az.serviceURL, "/"), cntr)
}
@@ -1741,7 +1668,7 @@ func (az *Azure) setContainerMetaData(ctx context.Context, bucket, key string, v
}
str := encodeBytes(value)
mdmap[key] = backend.GetPtrFromString(str)
mdmap[key] = backend.GetStringPtr(str)
_, err = client.SetMetadata(ctx, &container.SetMetadataOptions{Metadata: mdmap})
if err != nil {
@@ -1776,11 +1703,9 @@ func (az *Azure) deleteContainerMetaData(ctx context.Context, bucket, key string
}
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
var acl auth.ACL
data, ok := meta[string(key)]
if !ok {
return &acl, nil
return nil, s3err.GetAPIError(s3err.ErrInternalError)
}
value, err := decodeString(*data)
@@ -1788,6 +1713,7 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
return nil, err
}
var acl auth.ACL
if len(value) == 0 {
return &acl, nil
}

View File

@@ -32,14 +32,14 @@ type Backend interface {
Shutdown()
// bucket operations
ListBuckets(context.Context, s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error)
ListBuckets(_ context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error)
CreateBucket(_ context.Context, _ *s3.CreateBucketInput, defaultACL []byte) error
PutBucketAcl(_ context.Context, bucket string, data []byte) error
DeleteBucket(_ context.Context, bucket string) error
DeleteBucket(context.Context, *s3.DeleteBucketInput) error
PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error
GetBucketVersioning(_ context.Context, bucket string) (s3response.GetBucketVersioningOutput, error)
GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error)
PutBucketPolicy(_ context.Context, bucket string, policy []byte) error
GetBucketPolicy(_ context.Context, bucket string) ([]byte, error)
DeleteBucketPolicy(_ context.Context, bucket string) error
@@ -53,15 +53,15 @@ type Backend interface {
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error)
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
// standard object operations
PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error)
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error)
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
@@ -108,7 +108,7 @@ func (BackendUnsupported) Shutdown() {}
func (BackendUnsupported) String() string {
return "Unsupported"
}
func (BackendUnsupported) ListBuckets(context.Context, s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
func (BackendUnsupported) ListBuckets(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
@@ -123,14 +123,14 @@ func (BackendUnsupported) CreateBucket(context.Context, *s3.CreateBucketInput, [
func (BackendUnsupported) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteBucket(_ context.Context, bucket string) error {
func (BackendUnsupported) DeleteBucket(context.Context, *s3.DeleteBucketInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetBucketVersioning(_ context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketPolicy(_ context.Context, bucket string, policy []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
@@ -166,11 +166,11 @@ func (BackendUnsupported) ListMultipartUploads(context.Context, *s3.ListMultipar
func (BackendUnsupported) ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error) {
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error) {
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
@@ -185,8 +185,8 @@ func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput) (*s3.Ge
func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
return s3response.GetObjectAttributesResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)

View File

@@ -19,6 +19,7 @@ import (
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
@@ -33,9 +34,6 @@ const (
// this is the media type for directories in AWS and Nextcloud
DirContentType = "application/x-directory"
DefaultContentType = "binary/octet-stream"
// this is the minimum allowed size for mp parts
MinPartSize = 5 * 1024 * 1024
)
func IsValidBucketName(name string) bool { return true }
@@ -52,136 +50,53 @@ func (d ByObjectName) Len() int { return len(d) }
func (d ByObjectName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByObjectName) Less(i, j int) bool { return *d[i].Key < *d[j].Key }
func GetPtrFromString(str string) *string {
if str == "" {
return nil
}
return &str
}
func GetStringFromPtr(str *string) string {
if str == nil {
return ""
}
return *str
func GetStringPtr(s string) *string {
return &s
}
func GetTimePtr(t time.Time) *time.Time {
return &t
}
func TrimEtag(etag *string) *string {
if etag == nil {
return nil
}
return GetPtrFromString(strings.Trim(*etag, "\""))
}
var (
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
)
// ParseGetObjectRange parses input range header and returns startoffset, length, isValid
// and error. If no endoffset specified, then length is set to the object size
// for invalid inputs, it returns no error, but isValid=false
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
func ParseGetObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
if acceptRange == "" {
return 0, size, false, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) != 2 {
return 0, size, false, nil
}
if rangeKv[0] != "bytes" {
return 0, size, false, nil
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) != 2 {
return 0, size, false, nil
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if bRange[1] == "" {
return startOffset, size - startOffset, true, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, size, false, nil
}
if endOffset < startOffset {
return 0, size, false, nil
}
if endOffset >= size {
return startOffset, size - startOffset, true, nil
}
return startOffset, endOffset - startOffset + 1, true, nil
}
// ParseCopySourceRange parses input range header and returns startoffset, length
// and error. If no endoffset specified, then length is set to the object size
func ParseCopySourceRange(size int64, acceptRange string) (int64, int64, error) {
// ParseRange parses input range header and returns startoffset, length, and
// error. If no endoffset specified, then length is set to -1.
func ParseRange(size int64, acceptRange string) (int64, int64, error) {
if acceptRange == "" {
return 0, size, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) != 2 {
return 0, 0, errInvalidCopySourceRange
}
if rangeKv[0] != "bytes" {
return 0, 0, errInvalidCopySourceRange
if len(rangeKv) < 2 {
return 0, 0, errInvalidRange
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) != 2 {
return 0, 0, errInvalidCopySourceRange
if len(bRange) < 1 || len(bRange) > 2 {
return 0, 0, errInvalidRange
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, 0, errInvalidCopySourceRange
return 0, 0, errInvalidRange
}
if startOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
endOffset := int64(-1)
if len(bRange) == 1 || bRange[1] == "" {
return startOffset, endOffset, nil
}
if bRange[1] == "" {
return startOffset, size - startOffset + 1, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, 0, errInvalidCopySourceRange
return 0, 0, errInvalidRange
}
if endOffset < startOffset {
return 0, 0, errInvalidCopySourceRange
}
if endOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
if endOffset <= startOffset {
return 0, 0, errInvalidRange
}
return startOffset, endOffset - startOffset + 1, nil
@@ -194,13 +109,15 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
copySourceHeader = copySourceHeader[1:]
}
var copySource, versionId string
i := strings.LastIndex(copySourceHeader, "?versionId=")
if i == -1 {
copySource = copySourceHeader
} else {
copySource = copySourceHeader[:i]
versionId = copySourceHeader[i+11:]
cSplitted := strings.Split(copySourceHeader, "?")
copySource := cSplitted[0]
var versionId string
if len(cSplitted) > 1 {
versionIdParts := strings.Split(cSplitted[1], "=")
if len(versionIdParts) != 2 || versionIdParts[0] != "versionId" {
return "", "", "", s3err.GetAPIError(s3err.ErrInvalidRequest)
}
versionId = versionIdParts[1]
}
srcBucket, srcObject, ok := strings.Cut(copySource, "/")
@@ -211,13 +128,21 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
return srcBucket, srcObject, versionId, nil
}
func CreateExceedingRangeErr(objSize int64) s3err.APIError {
return s3err.APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
HTTPStatusCode: http.StatusBadRequest,
}
}
func GetMultipartMD5(parts []types.CompletedPart) string {
var partsEtagBytes []byte
for _, part := range parts {
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)
}
return fmt.Sprintf("\"%s-%d\"", md5String(partsEtagBytes), len(parts))
s3MD5 := fmt.Sprintf("%s-%d", md5String(partsEtagBytes), len(parts))
return s3MD5
}
func getEtagBytes(etag string) []byte {

View File

@@ -14,19 +14,17 @@
package meta
import "os"
// MetadataStorer defines the interface for managing metadata.
// When object == "", the operation is on the bucket.
type MetadataStorer interface {
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
// Returns the value of the attribute, or an error if the attribute does not exist.
RetrieveAttribute(f *os.File, bucket, object, attribute string) ([]byte, error)
RetrieveAttribute(bucket, object, attribute string) ([]byte, error)
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
// If attribute already exists, new attribute should replace existing.
// Returns an error if the operation fails.
StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error
StoreAttribute(bucket, object, attribute string, value []byte) error
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
// Returns an error if the operation fails.

View File

@@ -1,54 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package meta
import (
"os"
)
// NoMeta is a metadata storer that does not store metadata.
// This can be useful for read only mounts where attempting to store metadata
// would fail.
type NoMeta struct{}
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
// always returns ErrNoSuchKey
func (NoMeta) RetrieveAttribute(_ *os.File, _, _, _ string) ([]byte, error) {
return nil, ErrNoSuchKey
}
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
// always returns nil without storing the attribute
func (NoMeta) StoreAttribute(_ *os.File, _, _, _ string, _ []byte) error {
return nil
}
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
// always returns nil without deleting the attribute
func (NoMeta) DeleteAttribute(_, _, _ string) error {
return nil
}
// ListAttributes lists all attributes for an object or a bucket.
// always returns an empty list of attributes
func (NoMeta) ListAttributes(_, _ string) ([]string, error) {
return []string{}, nil
}
// DeleteAttributes removes all attributes for an object or a bucket.
// always returns nil without deleting any attributes
func (NoMeta) DeleteAttributes(bucket, object string) error {
return nil
}

View File

@@ -1,139 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package meta
import (
"errors"
"fmt"
"os"
"path/filepath"
)
// SideCar is a metadata storer that uses sidecar files to store metadata.
type SideCar struct {
dir string
}
const (
sidecarmeta = "meta"
)
// NewSideCar creates a new SideCar metadata storer.
func NewSideCar(dir string) (SideCar, error) {
fi, err := os.Lstat(dir)
if err != nil {
return SideCar{}, fmt.Errorf("failed to stat directory: %v", err)
}
if !fi.IsDir() {
return SideCar{}, fmt.Errorf("not a directory")
}
return SideCar{dir: dir}, nil
}
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
func (s SideCar) RetrieveAttribute(_ *os.File, bucket, object, attribute string) ([]byte, error) {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
attr := filepath.Join(metadir, attribute)
value, err := os.ReadFile(attr)
if errors.Is(err, os.ErrNotExist) {
return nil, ErrNoSuchKey
}
if err != nil {
return nil, fmt.Errorf("failed to read attribute: %v", err)
}
return value, nil
}
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
func (s SideCar) StoreAttribute(_ *os.File, bucket, object, attribute string, value []byte) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
err := os.MkdirAll(metadir, 0777)
if err != nil {
return fmt.Errorf("failed to create metadata directory: %v", err)
}
attr := filepath.Join(metadir, attribute)
err = os.WriteFile(attr, value, 0666)
if err != nil {
return fmt.Errorf("failed to write attribute: %v", err)
}
return nil
}
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
func (s SideCar) DeleteAttribute(bucket, object, attribute string) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
attr := filepath.Join(metadir, attribute)
err := os.Remove(attr)
if errors.Is(err, os.ErrNotExist) {
return ErrNoSuchKey
}
if err != nil {
return fmt.Errorf("failed to remove attribute: %v", err)
}
return nil
}
// ListAttributes lists all attributes for an object or a bucket.
func (s SideCar) ListAttributes(bucket, object string) ([]string, error) {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
ents, err := os.ReadDir(metadir)
if errors.Is(err, os.ErrNotExist) {
return []string{}, nil
}
if err != nil {
return nil, fmt.Errorf("failed to list attributes: %v", err)
}
var attrs []string
for _, ent := range ents {
attrs = append(attrs, ent.Name())
}
return attrs, nil
}
// DeleteAttributes removes all attributes for an object or a bucket.
func (s SideCar) DeleteAttributes(bucket, object string) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
err := os.RemoveAll(metadir)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("failed to remove attributes: %v", err)
}
return nil
}

View File

@@ -17,13 +17,11 @@ package meta
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/pkg/xattr"
"github.com/versity/versitygw/s3err"
)
const (
@@ -38,15 +36,7 @@ var (
type XattrMeta struct{}
// RetrieveAttribute retrieves the value of a specific attribute for an object in a bucket.
func (x XattrMeta) RetrieveAttribute(f *os.File, bucket, object, attribute string) ([]byte, error) {
if f != nil {
b, err := xattr.FGet(f, xattrPrefix+attribute)
if errors.Is(err, xattr.ENOATTR) {
return nil, ErrNoSuchKey
}
return b, err
}
func (x XattrMeta) RetrieveAttribute(bucket, object, attribute string) ([]byte, error) {
b, err := xattr.Get(filepath.Join(bucket, object), xattrPrefix+attribute)
if errors.Is(err, xattr.ENOATTR) {
return nil, ErrNoSuchKey
@@ -55,20 +45,8 @@ func (x XattrMeta) RetrieveAttribute(f *os.File, bucket, object, attribute strin
}
// StoreAttribute stores the value of a specific attribute for an object in a bucket.
func (x XattrMeta) StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error {
if f != nil {
err := xattr.FSet(f, xattrPrefix+attribute, value)
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
}
err := xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
func (x XattrMeta) StoreAttribute(bucket, object, attribute string, value []byte) error {
return xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
}
// DeleteAttribute removes the value of a specific attribute for an object in a bucket.
@@ -77,9 +55,6 @@ func (x XattrMeta) DeleteAttribute(bucket, object, attribute string) error {
if errors.Is(err, xattr.ENOATTR) {
return ErrNoSuchKey
}
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
}

View File

@@ -15,6 +15,11 @@ import (
"github.com/versity/versitygw/s3err"
)
var (
// TODO: make this configurable
defaultDirPerm fs.FileMode = 0755
)
// MkdirAll is similar to os.MkdirAll but it will return
// ErrObjectParentIsFile when appropriate
// MkdirAll creates a directory named path,
@@ -27,7 +32,7 @@ import (
// and returns nil.
// Any directory created will be set to provided uid/gid ownership
// if doChown is true.
func MkdirAll(path string, uid, gid int, doChown bool, dirPerm fs.FileMode) error {
func MkdirAll(path string, uid, gid int, doChown bool) error {
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := os.Stat(path)
if err == nil {
@@ -50,14 +55,14 @@ func MkdirAll(path string, uid, gid int, doChown bool, dirPerm fs.FileMode) erro
if j > 1 {
// Create parent.
err = MkdirAll(path[:j-1], uid, gid, doChown, dirPerm)
err = MkdirAll(path[:j-1], uid, gid, doChown)
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
err = os.Mkdir(path, dirPerm)
err = os.Mkdir(path, defaultDirPerm)
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.

File diff suppressed because it is too large Load Diff

View File

@@ -29,7 +29,6 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"golang.org/x/sys/unix"
)
@@ -44,7 +43,6 @@ type tmpfile struct {
needsChown bool
uid int
gid int
newDirPerm fs.FileMode
}
var (
@@ -63,12 +61,8 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
// this is not supported.
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
// O_TMPFILE not supported, try fallback
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
err = backend.MkdirAll(dir, uid, gid, doChown)
if err != nil {
return nil, fmt.Errorf("make temp dir: %w", err)
}
@@ -114,7 +108,6 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
needsChown: doChown,
uid: uid,
gid: gid,
newDirPerm: p.newDirPerm,
}
// falloc is best effort, its fine if this fails
@@ -141,9 +134,6 @@ func (tmp *tmpfile) falloc() error {
}
func (tmp *tmpfile) link() error {
// make sure this is cleaned up in all error cases
defer tmp.f.Close()
// We use Linkat/Rename as the atomic operation for object puts. The
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
// with any other simultaneous uploads. The final operation is to move the
@@ -158,7 +148,7 @@ func (tmp *tmpfile) link() error {
dir := filepath.Dir(objPath)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown, tmp.newDirPerm)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown)
if err != nil {
return fmt.Errorf("make parent dir: %w", err)
}
@@ -180,21 +170,11 @@ func (tmp *tmpfile) link() error {
}
defer dirf.Close()
for {
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if errors.Is(err, syscall.EEXIST) {
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
continue
}
if err != nil {
return fmt.Errorf("link tmpfile (fd %q as %q): %w",
filepath.Base(tmp.f.Name()), objPath, err)
}
break
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if err != nil {
return fmt.Errorf("link tmpfile (%q in %q): %w",
filepath.Dir(objPath), filepath.Base(tmp.f.Name()), err)
}
err = tmp.f.Close()

View File

@@ -24,11 +24,9 @@ import (
"io/fs"
"os"
"path/filepath"
"syscall"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
)
type tmpfile struct {
@@ -43,19 +41,13 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
// Create a temp file for upload while in progress (see link comments below).
var err error
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
err = backend.MkdirAll(dir, uid, gid, doChown)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("create temp file: %w", err)
}

File diff suppressed because it is too large Load Diff

View File

@@ -40,12 +40,10 @@ import (
)
type ScoutfsOpts struct {
ChownUID bool
ChownGID bool
GlacierMode bool
BucketLinks bool
NewDirPerm fs.FileMode
DisableNoArchive bool
ChownUID bool
ChownGID bool
GlacierMode bool
BucketLinks bool
}
type ScoutFS struct {
@@ -76,14 +74,6 @@ type ScoutFS struct {
// used to determine if chowning is needed
euid int
egid int
// newDirPerm is the permissions to use when creating new directories
newDirPerm fs.FileMode
// disableNoArchive is used to disable setting scoutam noarchive flag
// on mutlipart parts. This is enabled by default to prevent archive
// copies of temporary multipart parts.
disableNoArchive bool
}
var _ backend.Backend = &ScoutFS{}
@@ -162,31 +152,6 @@ func (s *ScoutFS) getChownIDs(acct auth.Account) (int, int, bool) {
return uid, gid, needsChown
}
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
out, err := s.Posix.UploadPart(ctx, input)
if err != nil {
return nil, err
}
if !s.disableNoArchive {
sum := sha256.Sum256([]byte(*input.Key))
partPath := filepath.Join(
*input.Bucket, // bucket
metaTmpMultipartDir, // temp multipart dir
fmt.Sprintf("%x", sum), // hashed objname
*input.UploadId, // upload id
fmt.Sprintf("%v", *input.PartNumber), // part number
)
err = setNoArchive(partPath)
if err != nil {
return nil, fmt.Errorf("set noarchive: %w", err)
}
}
return out, err
}
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
// ioctl to not have to read and copy the part data to the final object. This
// saves a read and write cycle for all mutlipart uploads.
@@ -261,7 +226,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
b, err := s.meta.RetrieveAttribute(nil, bucket, partObjPath, etagkey)
b, err := s.meta.RetrieveAttribute(bucket, partObjPath, etagkey)
etag := string(b)
if err != nil {
etag = ""
@@ -297,7 +262,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
// scoutfs move data is a metadata only operation that moves the data
// extent references from the source, appeding to the destination.
// this needs to be 4k aligned.
err = moveData(pf, f.File())
err = moveData(pf, f.f)
pf.Close()
if err != nil {
return nil, fmt.Errorf("move blocks part %v: %v", *part.PartNumber, err)
@@ -312,81 +277,88 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
dir := filepath.Dir(objname)
if dir != "" {
uid, gid, doChown := s.getChownIDs(acct)
err = backend.MkdirAll(dir, uid, gid, doChown, s.newDirPerm)
err = backend.MkdirAll(dir, uid, gid, doChown)
if err != nil {
return nil, err
}
}
for k, v := range userMetaData {
err = s.meta.StoreAttribute(f.File(), bucket, object, fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
if err != nil {
return nil, fmt.Errorf("set user attr %q: %w", k, err)
}
}
// load and set tagging
tagging, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, tagHdr)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get object tagging: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, tagHdr, tagging)
if err != nil {
return nil, fmt.Errorf("set object tagging: %w", err)
}
}
// set content-type
if cType != "" {
err := s.meta.StoreAttribute(f.File(), bucket, object, contentTypeHdr, []byte(cType))
if err != nil {
return nil, fmt.Errorf("set object content type: %w", err)
}
}
// load and set legal hold
lHold, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectLegalHoldKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get object legal hold: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectLegalHoldKey, lHold)
if err != nil {
return nil, fmt.Errorf("set object legal hold: %w", err)
}
}
// load and set retention
ret, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectRetentionKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get object retention: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectRetentionKey, ret)
if err != nil {
return nil, fmt.Errorf("set object retention: %w", err)
}
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5 := backend.GetMultipartMD5(parts)
err = s.meta.StoreAttribute(f.File(), bucket, object, etagkey, []byte(s3MD5))
if err != nil {
return nil, fmt.Errorf("set etag attr: %w", err)
}
err = f.link()
if err != nil {
return nil, fmt.Errorf("link object in namespace: %w", err)
}
for k, v := range userMetaData {
err = s.meta.StoreAttribute(bucket, object, fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
if err != nil {
// cleanup object if returning error
os.Remove(objname)
return nil, fmt.Errorf("set user attr %q: %w", k, err)
}
}
// load and set tagging
tagging, err := s.meta.RetrieveAttribute(bucket, upiddir, tagHdr)
if err == nil {
if err := s.meta.StoreAttribute(bucket, object, tagHdr, tagging); err != nil {
// cleanup object
os.Remove(objname)
return nil, fmt.Errorf("set object tagging: %w", err)
}
}
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get object tagging: %w", err)
}
// set content-type
if cType != "" {
if err := s.meta.StoreAttribute(bucket, object, contentTypeHdr, []byte(cType)); err != nil {
// cleanup object
os.Remove(objname)
return nil, fmt.Errorf("set object content type: %w", err)
}
}
// load and set legal hold
lHold, err := s.meta.RetrieveAttribute(bucket, upiddir, objectLegalHoldKey)
if err == nil {
if err := s.meta.StoreAttribute(bucket, object, objectLegalHoldKey, lHold); err != nil {
// cleanup object
os.Remove(objname)
return nil, fmt.Errorf("set object legal hold: %w", err)
}
}
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get object legal hold: %w", err)
}
// load and set retention
ret, err := s.meta.RetrieveAttribute(bucket, upiddir, objectRetentionKey)
if err == nil {
if err := s.meta.StoreAttribute(bucket, object, objectRetentionKey, ret); err != nil {
// cleanup object
os.Remove(objname)
return nil, fmt.Errorf("set object retention: %w", err)
}
}
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get object retention: %w", err)
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5 := backend.GetMultipartMD5(parts)
err = s.meta.StoreAttribute(bucket, object, etagkey, []byte(s3MD5))
if err != nil {
// cleanup object if returning error
os.Remove(objname)
return nil, fmt.Errorf("set etag attr: %w", err)
}
// cleanup tmp dirs
os.RemoveAll(filepath.Join(bucket, upiddir))
os.RemoveAll(upiddir)
// use Remove for objdir in case there are still other uploads
// for same object name outstanding
os.Remove(filepath.Join(bucket, objdir))
os.Remove(objdir)
return &s3.CompleteMultipartUploadOutput{
Bucket: &bucket,
@@ -420,7 +392,7 @@ func (s *ScoutFS) loadUserMetaData(bucket, object string, m map[string]string) (
if !isValidMeta(e) {
continue
}
b, err := s.meta.RetrieveAttribute(nil, bucket, object, e)
b, err := s.meta.RetrieveAttribute(bucket, object, e)
if err != nil {
continue
}
@@ -432,13 +404,13 @@ func (s *ScoutFS) loadUserMetaData(bucket, object string, m map[string]string) (
}
var contentType, contentEncoding string
b, _ := s.meta.RetrieveAttribute(nil, bucket, object, contentTypeHdr)
b, _ := s.meta.RetrieveAttribute(bucket, object, contentTypeHdr)
contentType = string(b)
if contentType != "" {
m[contentTypeHdr] = contentType
}
b, _ = s.meta.RetrieveAttribute(nil, bucket, object, contentEncHdr)
b, _ = s.meta.RetrieveAttribute(bucket, object, contentEncHdr)
contentEncoding = string(b)
if contentEncoding != "" {
m[contentEncHdr] = contentEncoding
@@ -494,7 +466,7 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
return nil, fmt.Errorf("stat part: %w", err)
}
b, err := s.meta.RetrieveAttribute(nil, bucket, partPath, etagkey)
b, err := s.meta.RetrieveAttribute(bucket, partPath, etagkey)
etag := string(b)
if err != nil {
etag = ""
@@ -521,7 +493,7 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
objPath := filepath.Join(bucket, object)
fi, err := os.Stat(objPath)
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if errors.Is(err, syscall.ENAMETOOLONG) {
@@ -542,7 +514,7 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
contentType = "application/x-directory"
}
b, err := s.meta.RetrieveAttribute(nil, bucket, object, etagkey)
b, err := s.meta.RetrieveAttribute(bucket, object, etagkey)
etag := string(b)
if err != nil {
etag = ""
@@ -582,7 +554,7 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
contentLength := fi.Size()
var objectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
status, err := s.Posix.GetObjectLegalHold(ctx, bucket, object, *input.VersionId)
status, err := s.Posix.GetObjectLegalHold(ctx, bucket, object, "")
if err == nil {
if *status {
objectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOn
@@ -593,7 +565,7 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
var objectLockMode types.ObjectLockMode
var objectLockRetainUntilDate *time.Time
retention, err := s.Posix.GetObjectRetention(ctx, bucket, object, *input.VersionId)
retention, err := s.Posix.GetObjectRetention(ctx, bucket, object, "")
if err == nil {
var config types.ObjectLockRetention
if err := json.Unmarshal(retention, &config); err == nil {
@@ -645,7 +617,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.Ge
objPath := filepath.Join(bucket, object)
fi, err := os.Stat(objPath)
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if errors.Is(err, syscall.ENAMETOOLONG) {
@@ -659,20 +631,28 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.Ge
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
objSize := fi.Size()
startOffset, length, isValid, err := backend.ParseGetObjectRange(objSize, acceptRange)
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
if err != nil {
return nil, err
}
objSize := fi.Size()
if fi.IsDir() {
// directory objects are always 0 len
objSize = 0
length = 0
}
if length == -1 {
length = fi.Size() - startOffset + 1
}
if startOffset+length > fi.Size() {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
var contentRange string
if isValid {
if acceptRange != "" {
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, objSize)
}
@@ -705,7 +685,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.Ge
contentType, contentEncoding := s.loadUserMetaData(bucket, object, userMetaData)
b, err := s.meta.RetrieveAttribute(nil, bucket, object, etagkey)
b, err := s.meta.RetrieveAttribute(bucket, object, etagkey)
etag := string(b)
if err != nil {
etag = ""
@@ -719,7 +699,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.Ge
tagCount := int32(len(tags))
return &s3.GetObjectOutput{
AcceptRanges: backend.GetPtrFromString("bytes"),
AcceptRanges: &acceptRange,
ContentLength: &length,
ContentEncoding: &contentEncoding,
ContentType: &contentType,
@@ -860,7 +840,7 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
if d.IsDir() {
// directory object only happens if directory empty
// check to see if this is a directory object by checking etag
etagBytes, err := s.meta.RetrieveAttribute(nil, bucket, path, etagkey)
etagBytes, err := s.meta.RetrieveAttribute(bucket, path, etagkey)
if errors.Is(err, meta.ErrNoSuchKey) || errors.Is(err, fs.ErrNotExist) {
return s3response.Object{}, backend.ErrSkipObj
}
@@ -889,7 +869,7 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
}
// file object, get object info and fill out object data
b, err := s.meta.RetrieveAttribute(nil, bucket, path, etagkey)
b, err := s.meta.RetrieveAttribute(bucket, path, etagkey)
if errors.Is(err, fs.ErrNotExist) {
return s3response.Object{}, backend.ErrSkipObj
}
@@ -963,6 +943,30 @@ func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput)
return nil
}
func setStaging(objname string) error {
b, err := xattr.Get(objname, flagskey)
if err != nil && !isNoAttr(err) {
return err
}
var oldflags uint64
if !isNoAttr(err) {
err = json.Unmarshal(b, &oldflags)
if err != nil {
return err
}
}
newflags := oldflags | Staging
if newflags == oldflags {
// no flags change, just return
return nil
}
return fSetNewGlobalFlags(objname, newflags)
}
func isStaging(objname string) (bool, error) {
b, err := xattr.Get(objname, flagskey)
if err != nil && !isNoAttr(err) {
@@ -980,28 +984,8 @@ func isStaging(objname string) (bool, error) {
return flags&Staging == Staging, nil
}
func setFlag(objname string, flag uint64) error {
b, err := xattr.Get(objname, flagskey)
if err != nil && !isNoAttr(err) {
return err
}
var oldflags uint64
if !isNoAttr(err) {
err = json.Unmarshal(b, &oldflags)
if err != nil {
return err
}
}
newflags := oldflags | flag
if newflags == oldflags {
// no flags change, just return
return nil
}
b, err = json.Marshal(&newflags)
func fSetNewGlobalFlags(objname string, flags uint64) error {
b, err := json.Marshal(&flags)
if err != nil {
return err
}
@@ -1009,14 +993,6 @@ func setFlag(objname string, flag uint64) error {
return xattr.Set(objname, flagskey, b)
}
func setStaging(objname string) error {
return setFlag(objname, Staging)
}
func setNoArchive(objname string) error {
return setFlag(objname, NoArchive)
}
func isNoAttr(err error) bool {
xerr, ok := err.(*xattr.Error)
if ok && xerr.Err == xattr.ENOATTR {

View File

@@ -40,7 +40,6 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
ChownUID: opts.ChownUID,
ChownGID: opts.ChownGID,
BucketLinks: opts.BucketLinks,
NewDirPerm: opts.NewDirPerm,
})
if err != nil {
return nil, err
@@ -52,15 +51,13 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
}
return &ScoutFS{
Posix: p,
rootfd: f,
rootdir: rootdir,
meta: metastore,
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
glaciermode: opts.GlacierMode,
newDirPerm: opts.NewDirPerm,
disableNoArchive: opts.DisableNoArchive,
Posix: p,
rootfd: f,
rootdir: rootdir,
meta: metastore,
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
glaciermode: opts.GlacierMode,
}, nil
}
@@ -74,10 +71,10 @@ type tmpfile struct {
needsChown bool
uid int
gid int
newDirPerm fs.FileMode
}
var (
// TODO: make this configurable
defaultFilePerm uint32 = 0644
)
@@ -105,7 +102,6 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
needsChown: doChown,
uid: uid,
gid: gid,
newDirPerm: s.newDirPerm,
}
if doChown {
@@ -133,7 +129,7 @@ func (tmp *tmpfile) link() error {
dir := filepath.Dir(objPath)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown, tmp.newDirPerm)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown)
if err != nil {
return fmt.Errorf("make parent dir: %w", err)
}
@@ -178,10 +174,6 @@ func (tmp *tmpfile) cleanup() {
tmp.f.Close()
}
func (tmp *tmpfile) File() *os.File {
return tmp.f
}
func moveData(from *os.File, to *os.File) error {
return scoutfs.MoveData(from, to)
}

View File

@@ -28,7 +28,9 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
return nil, fmt.Errorf("scoutfs only available on linux")
}
type tmpfile struct{}
type tmpfile struct {
f *os.File
}
var (
errNotSupported = errors.New("not supported")
@@ -54,10 +56,6 @@ func (tmp *tmpfile) Write(b []byte) (int, error) {
func (tmp *tmpfile) cleanup() {
}
func (tmp *tmpfile) File() *os.File {
return nil
}
func moveData(_, _ *os.File) error {
return errNotSupported
}

View File

@@ -19,9 +19,9 @@ import (
"errors"
"fmt"
"io/fs"
"os"
"sort"
"strings"
"syscall"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3response"
@@ -53,15 +53,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
var newMarker string
var truncated bool
root := "."
if strings.Contains(prefix, "/") {
idx := strings.LastIndex(prefix, "/")
if idx > 0 {
root = prefix[:idx]
}
}
err := fs.WalkDir(fileSystem, root, func(path string, d fs.DirEntry, err error) error {
err := fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
@@ -84,11 +76,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipAll
}
// After this point, return skipflag instead of nil
// so we can skip a directory without an early return
var skipflag error
if d.IsDir() {
fmt.Println("path: ", path)
// If prefix is defined and the directory does not match prefix,
// do not descend into the directory because nothing will
// match this prefix. Make sure to append the / at the end of
@@ -97,58 +85,51 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
// building to match. So only skip if path isn't a prefix of prefix
// and prefix isn't a prefix of path.
if prefix != "" &&
!strings.HasPrefix(path+"/", prefix) &&
!strings.HasPrefix(prefix, path+"/") {
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
return fs.SkipDir
}
// Don't recurse into subdirectories which contain the delimiter
// after reaching the prefix
if delimiter != "" &&
strings.HasPrefix(path+"/", prefix) &&
strings.Contains(strings.TrimPrefix(path+"/", prefix), delimiter) {
skipflag = fs.SkipDir
} else {
if delimiter == "" {
dirobj, err := getObj(path+"/", d)
if err == ErrSkipObj {
return skipflag
}
if err != nil {
return fmt.Errorf("directory to object %q: %w", path, err)
}
objects = append(objects, dirobj)
return skipflag
}
// TODO: can we do better here rather than a second readdir
// per directory?
ents, err := fs.ReadDir(fileSystem, path)
if err != nil {
return fmt.Errorf("readdir %q: %w", path, err)
}
if len(ents) != 0 {
return skipflag
}
// TODO: can we do better here rather than a second readdir
// per directory?
ents, err := fs.ReadDir(fileSystem, path)
if err != nil {
return fmt.Errorf("readdir %q: %w", path, err)
}
path += string(os.PathSeparator)
if len(ents) == 0 && delimiter == "" {
dirobj, err := getObj(path, d)
if err == ErrSkipObj {
return nil
}
if err != nil {
return fmt.Errorf("directory to object %q: %w", path, err)
}
objects = append(objects, dirobj)
return nil
}
if len(ents) != 0 {
return nil
}
path += "/"
}
if !pastMarker {
if path == marker {
pastMarker = true
return skipflag
return nil
}
if path < marker {
return skipflag
return nil
}
}
// If object doesn't have prefix, don't include in results.
if prefix != "" && !strings.HasPrefix(path, prefix) {
return skipflag
return nil
}
if delimiter == "" {
@@ -156,7 +137,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
// prefix are included in results
obj, err := getObj(path, d)
if err == ErrSkipObj {
return skipflag
return nil
}
if err != nil {
return fmt.Errorf("file to object %q: %w", path, err)
@@ -167,7 +148,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
pastMax = true
}
return skipflag
return nil
}
// Since delimiter is specified, we only want results that
@@ -196,7 +177,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
if !found {
obj, err := getObj(path, d)
if err == ErrSkipObj {
return skipflag
return nil
}
if err != nil {
return fmt.Errorf("file to object %q: %w", path, err)
@@ -205,7 +186,7 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
if (len(objects) + len(cpmap)) == int(max) {
pastMax = true
}
return skipflag
return nil
}
// Common prefixes are a set, so should not have duplicates.
@@ -215,12 +196,12 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
cpref := prefix + before + delimiter
if cpref == marker {
pastMarker = true
return skipflag
return nil
}
if marker != "" && strings.HasPrefix(marker, cprefNoDelim) {
// skip common prefixes that are before the marker
return skipflag
return nil
}
cpmap[cpref] = struct{}{}
@@ -230,13 +211,9 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipAll
}
return skipflag
return nil
})
if err != nil {
// suppress file not found caused by user's prefix
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
return WalkResults{}, nil
}
return WalkResults{}, err
}
@@ -338,16 +315,8 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
// building to match. So only skip if path isn't a prefix of prefix
// and prefix isn't a prefix of path.
if prefix != "" &&
!strings.HasPrefix(path+"/", prefix) &&
!strings.HasPrefix(prefix, path+"/") {
return fs.SkipDir
}
// Don't recurse into subdirectories when listing with delimiter.
if delimiter == "/" &&
prefix != path+"/" &&
strings.HasPrefix(path+"/", prefix) {
cpmap[path+"/"] = struct{}{}
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
return fs.SkipDir
}

View File

@@ -31,18 +31,9 @@ import (
)
type walkTest struct {
fsys fs.FS
getobj backend.GetObjFunc
cases []testcase
}
type testcase struct {
name string
prefix string
delimiter string
marker string
maxObjs int32
expected backend.WalkResults
fsys fs.FS
expected backend.WalkResults
getobj backend.GetObjFunc
}
func getObj(path string, d fs.DirEntry) (s3response.Object, error) {
@@ -97,154 +88,50 @@ func TestWalk(t *testing.T) {
"photos/2006/February/sample3.jpg": {},
"photos/2006/February/sample4.jpg": {},
},
getobj: getObj,
cases: []testcase{
{
name: "aws example",
delimiter: "/",
maxObjs: 1000,
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photos/"),
}},
Objects: []s3response.Object{{
Key: backend.GetPtrFromString("sample.jpg"),
}},
},
},
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetStringPtr("photos/"),
}},
Objects: []s3response.Object{{
Key: backend.GetStringPtr("sample.jpg"),
}},
},
getobj: getObj,
},
{
// test case single dir/single file
fsys: fstest.MapFS{
"test/file": {},
},
getobj: getObj,
cases: []testcase{
{
name: "single dir single file",
delimiter: "/",
maxObjs: 1000,
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("test/"),
}},
Objects: []s3response.Object{},
},
},
},
},
{
// non-standard delimiter
fsys: fstest.MapFS{
"photo|s/200|6/Januar|y/sampl|e1.jpg": {},
"photo|s/200|6/Januar|y/sampl|e2.jpg": {},
"photo|s/200|6/Januar|y/sampl|e3.jpg": {},
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetStringPtr("test/"),
}},
Objects: []s3response.Object{},
},
getobj: getObj,
cases: []testcase{
{
name: "different delimiter 1",
delimiter: "|",
maxObjs: 1000,
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|"),
}},
},
},
{
name: "different delimiter 2",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|"),
}},
},
},
{
name: "different delimiter 3",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|6/Januar|"),
}},
},
},
{
name: "different delimiter 4",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|6/Januar|"),
}},
},
},
{
name: "different delimiter 5",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|6/Januar|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|"),
}},
},
},
{
name: "different delimiter 6",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|6/Januar|y/sampl|",
expected: backend.WalkResults{
Objects: []s3response.Object{
{
Key: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|e1.jpg"),
},
{
Key: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|e2.jpg"),
},
{
Key: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|e3.jpg"),
},
},
},
},
},
},
}
for _, tt := range tests {
for _, tc := range tt.cases {
res, err := backend.Walk(context.Background(),
tt.fsys, tc.prefix, tc.delimiter, tc.marker, tc.maxObjs,
tt.getobj, []string{})
if err != nil {
t.Errorf("tc.name: walk: %v", err)
}
compareResults(tc.name, res, tc.expected, t)
res, err := backend.Walk(context.Background(), tt.fsys, "", "/", "", 1000, tt.getobj, []string{})
if err != nil {
t.Fatalf("walk: %v", err)
}
compareResults(res, tt.expected, t)
}
}
func compareResults(name string, got, wanted backend.WalkResults, t *testing.T) {
func compareResults(got, wanted backend.WalkResults, t *testing.T) {
if !compareCommonPrefix(got.CommonPrefixes, wanted.CommonPrefixes) {
t.Errorf("%v: unexpected common prefix, got %v wanted %v",
name,
t.Errorf("unexpected common prefix, got %v wanted %v",
printCommonPrefixes(got.CommonPrefixes),
printCommonPrefixes(wanted.CommonPrefixes))
}
if !compareObjects(got.Objects, wanted.Objects) {
t.Errorf("%v: unexpected object, got %v wanted %v",
name,
t.Errorf("unexpected object, got %v wanted %v",
printObjects(got.Objects),
printObjects(wanted.Objects))
}
@@ -315,16 +202,10 @@ func containsObject(c s3response.Object, list []s3response.Object) bool {
func printObjects(list []s3response.Object) string {
res := "["
for _, cp := range list {
var key string
if cp.Key == nil {
key = "<nil>"
} else {
key = *cp.Key
}
if res == "[" {
res = res + key
res = res + *cp.Key
} else {
res = res + ", " + key
res = res + ", " + *cp.Key
}
}
return res + "]"

View File

@@ -19,7 +19,7 @@ import (
"crypto/sha256"
"crypto/tls"
"encoding/hex"
"encoding/xml"
"encoding/json"
"fmt"
"io"
"net/http"
@@ -29,7 +29,6 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3response"
@@ -38,7 +37,6 @@ import (
var (
adminAccess string
adminSecret string
adminRegion string
adminEndpoint string
allowInsecure bool
)
@@ -173,14 +171,6 @@ func adminCommand() *cli.Command {
Required: true,
Destination: &adminSecret,
},
&cli.StringFlag{
Name: "region",
Usage: "admin s3 region string",
EnvVars: []string{"ADMIN_REGION"},
Value: "us-east-1",
Destination: &adminRegion,
Aliases: []string{"r"},
},
&cli.StringFlag{
Name: "endpoint-url",
Usage: "admin apis endpoint url",
@@ -225,24 +215,24 @@ func createUser(ctx *cli.Context) error {
GroupID: groupID,
}
accxml, err := xml.Marshal(acc)
accJson, err := json.Marshal(acc)
if err != nil {
return fmt.Errorf("failed to parse user data: %w", err)
}
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user", adminEndpoint), bytes.NewBuffer(accxml))
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user", adminEndpoint), bytes.NewBuffer(accJson))
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
signer := v4.NewSigner()
hashedPayload := sha256.Sum256(accxml)
hashedPayload := sha256.Sum256(accJson)
hexPayload := hex.EncodeToString(hashedPayload[:])
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -261,9 +251,11 @@ func createUser(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Printf("%s\n", body)
return nil
}
@@ -285,7 +277,7 @@ func deleteUser(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -304,9 +296,11 @@ func deleteUser(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Printf("%s\n", body)
return nil
}
@@ -323,24 +317,24 @@ func updateUser(ctx *cli.Context) error {
props.GroupID = &groupId
}
propsxml, err := xml.Marshal(props)
propsJSON, err := json.Marshal(props)
if err != nil {
return fmt.Errorf("failed to parse user attributes: %w", err)
}
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/update-user?access=%v", adminEndpoint, access), bytes.NewBuffer(propsxml))
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/update-user?access=%v", adminEndpoint, access), bytes.NewBuffer(propsJSON))
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
signer := v4.NewSigner()
hashedPayload := sha256.Sum256(propsxml)
hashedPayload := sha256.Sum256(propsJSON)
hexPayload := hex.EncodeToString(hashedPayload[:])
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -359,9 +353,11 @@ func updateUser(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Printf("%s\n", body)
return nil
}
@@ -378,7 +374,7 @@ func listUsers(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -397,15 +393,15 @@ func listUsers(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
var accs auth.ListUserAccountsResult
if err := xml.Unmarshal(body, &accs); err != nil {
var accs []auth.Account
if err := json.Unmarshal(body, &accs); err != nil {
return err
}
printAcctTable(accs.Accounts)
printAcctTable(accs)
return nil
}
@@ -445,7 +441,7 @@ func changeBucketOwner(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -464,9 +460,11 @@ func changeBucketOwner(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Println(string(body))
return nil
}
@@ -495,7 +493,7 @@ func listBuckets(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -514,26 +512,15 @@ func listBuckets(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
var result s3response.ListBucketsResult
if err := xml.Unmarshal(body, &result); err != nil {
var buckets []s3response.Bucket
if err := json.Unmarshal(body, &buckets); err != nil {
return err
}
printBuckets(result.Buckets)
printBuckets(buckets)
return nil
}
func parseApiError(body []byte) error {
var apiErr smithy.GenericAPIError
err := xml.Unmarshal(body, &apiErr)
if err != nil {
apiErr.Code = "InternalServerError"
apiErr.Message = err.Error()
}
return &apiErr
}

View File

@@ -7,7 +7,6 @@ import (
"path/filepath"
"sync"
"testing"
"time"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
@@ -58,9 +57,7 @@ func initPosix(ctx context.Context) {
log.Fatalf("make temp directory: %v", err)
}
be, err := posix.New(tempdir, meta.XattrMeta{}, posix.PosixOpts{
NewDirPerm: 0755,
})
be, err := posix.New(tempdir, meta.XattrMeta{}, posix.PosixOpts{})
if err != nil {
log.Fatalf("init posix: %v", err)
}
@@ -78,9 +75,6 @@ func initPosix(ctx context.Context) {
}
wg.Done()
}()
// wait for server to start
time.Sleep(1 * time.Second)
}
func TestIntegration(t *testing.T) {

View File

@@ -74,9 +74,6 @@ var (
metricsService string
statsdServers string
dogstatsServers string
ipaHost, ipaVaultName string
ipaUser, ipaPassword string
ipaInsecure, ipaDebug bool
)
var (
@@ -209,7 +206,6 @@ func initFlags() []cli.Flag {
&cli.BoolFlag{
Name: "debug",
Usage: "enable debug output",
Value: false,
EnvVars: []string{"VGW_DEBUG"},
Destination: &debug,
},
@@ -510,42 +506,6 @@ func initFlags() []cli.Flag {
Aliases: []string{"mds"},
Destination: &dogstatsServers,
},
&cli.StringFlag{
Name: "ipa-host",
Usage: "FreeIPA server url e.g. https://ipa.example.test",
EnvVars: []string{"VGW_IPA_HOST"},
Destination: &ipaHost,
},
&cli.StringFlag{
Name: "ipa-vault-name",
Usage: "A name of the user vault containing their secret",
EnvVars: []string{"VGW_IPA_VAULT_NAME"},
Destination: &ipaVaultName,
},
&cli.StringFlag{
Name: "ipa-user",
Usage: "Username used to connect to FreeIPA. Needs permissions to read user vault contents",
EnvVars: []string{"VGW_IPA_USER"},
Destination: &ipaUser,
},
&cli.StringFlag{
Name: "ipa-password",
Usage: "Password of the user used to connect to FreeIPA.",
EnvVars: []string{"VGW_IPA_PASSWORD"},
Destination: &ipaPassword,
},
&cli.BoolFlag{
Name: "ipa-insecure",
Usage: "Verify TLS certificate of FreeIPA server. Default is 'true'.",
EnvVars: []string{"VGW_IPA_INSECURE"},
Destination: &ipaInsecure,
},
&cli.BoolFlag{
Name: "ipa-debug",
Usage: "FreeIPA IAM debug output",
EnvVars: []string{"VGW_IPA_DEBUG"},
Destination: &ipaDebug,
},
}
}
@@ -663,12 +623,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
CacheDisable: iamCacheDisable,
CacheTTL: iamCacheTTL,
CachePrune: iamCachePrune,
IpaHost: ipaHost,
IpaVaultName: ipaVaultName,
IpaUser: ipaUser,
IpaPassword: ipaPassword,
IpaInsecure: ipaInsecure,
IpaDebug: ipaDebug,
})
if err != nil {
return fmt.Errorf("setup iam: %w", err)

View File

@@ -16,8 +16,6 @@ package main
import (
"fmt"
"io/fs"
"math"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/backend/meta"
@@ -28,9 +26,6 @@ var (
chownuid, chowngid bool
bucketlinks bool
versioningDir string
dirPerms uint
sidecar string
nometa bool
)
func posixCommand() *cli.Command {
@@ -73,26 +68,6 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
EnvVars: []string{"VGW_VERSIONING_DIR"},
Destination: &versioningDir,
},
&cli.UintFlag{
Name: "dir-perms",
Usage: "default directory permissions for new directories",
EnvVars: []string{"VGW_DIR_PERMS"},
Destination: &dirPerms,
DefaultText: "0755",
Value: 0755,
},
&cli.StringFlag{
Name: "sidecar",
Usage: "use provided sidecar directory to store metadata",
EnvVars: []string{"VGW_META_SIDECAR"},
Destination: &sidecar,
},
&cli.BoolFlag{
Name: "nometa",
Usage: "disable metadata storage",
EnvVars: []string{"VGW_META_NONE"},
Destination: &nometa,
},
},
}
}
@@ -103,45 +78,19 @@ func runPosix(ctx *cli.Context) error {
}
gwroot := (ctx.Args().Get(0))
if dirPerms > math.MaxUint32 {
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
err := meta.XattrMeta{}.Test(gwroot)
if err != nil {
return fmt.Errorf("posix xattr check: %v", err)
}
if nometa && sidecar != "" {
return fmt.Errorf("cannot use both nometa and sidecar metadata")
}
opts := posix.PosixOpts{
be, err := posix.New(gwroot, meta.XattrMeta{}, posix.PosixOpts{
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
NewDirPerm: fs.FileMode(dirPerms),
}
var ms meta.MetadataStorer
switch {
case sidecar != "":
sc, err := meta.NewSideCar(sidecar)
if err != nil {
return fmt.Errorf("failed to init sidecar metadata: %w", err)
}
ms = sc
opts.SideCarDir = sidecar
case nometa:
ms = meta.NoMeta{}
default:
ms = meta.XattrMeta{}
err := meta.XattrMeta{}.Test(gwroot)
if err != nil {
return fmt.Errorf("xattr check failed: %w", err)
}
}
be, err := posix.New(gwroot, ms, opts)
})
if err != nil {
return fmt.Errorf("failed to init posix backend: %w", err)
return fmt.Errorf("init posix: %v", err)
}
return runGateway(ctx.Context, be)

View File

@@ -16,16 +16,13 @@ package main
import (
"fmt"
"io/fs"
"math"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/backend/scoutfs"
)
var (
glacier bool
disableNoArchive bool
glacier bool
)
func scoutfsCommand() *cli.Command {
@@ -72,20 +69,6 @@ move interfaces as well as support for tiered filesystems.`,
EnvVars: []string{"VGW_BUCKET_LINKS"},
Destination: &bucketlinks,
},
&cli.UintFlag{
Name: "dir-perms",
Usage: "default directory permissions for new directories",
EnvVars: []string{"VGW_DIR_PERMS"},
Destination: &dirPerms,
DefaultText: "0755",
Value: 0755,
},
&cli.BoolFlag{
Name: "disable-noarchive",
Usage: "disable setting noarchive for multipart part uploads",
EnvVars: []string{"VGW_DISABLE_NOARCHIVE"},
Destination: &disableNoArchive,
},
},
}
}
@@ -95,17 +78,11 @@ func runScoutfs(ctx *cli.Context) error {
return fmt.Errorf("no directory provided for operation")
}
if dirPerms > math.MaxUint32 {
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
}
var opts scoutfs.ScoutfsOpts
opts.GlacierMode = glacier
opts.ChownUID = chownuid
opts.ChownGID = chowngid
opts.BucketLinks = bucketlinks
opts.NewDirPerm = fs.FileMode(dirPerms)
opts.DisableNoArchive = disableNoArchive
be, err := scoutfs.New(ctx.Args().Get(0), opts)
if err != nil {

View File

@@ -37,8 +37,6 @@ var (
pathStyle bool
checksumDisable bool
versioningEnabled bool
azureTests bool
tlsStatus bool
)
func testCommand() *cli.Command {
@@ -80,12 +78,6 @@ func initTestFlags() []cli.Flag {
Aliases: []string{"d"},
Destination: &debug,
},
&cli.BoolFlag{
Name: "allow-insecure",
Usage: "skip tls verification",
Aliases: []string{"ai"},
Destination: &tlsStatus,
},
}
}
@@ -103,26 +95,12 @@ func initTestCommands() []*cli.Command {
Destination: &versioningEnabled,
Aliases: []string{"vs"},
},
&cli.BoolFlag{
Name: "azure-test-mode",
Usage: "Skips tests that are not supported by Azure",
Destination: &azureTests,
Aliases: []string{"azure"},
},
},
},
{
Name: "posix",
Usage: "Tests posix specific features",
Action: getAction(integration.TestPosix),
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "versioning-enabled",
Usage: "Test posix when versioning is enabled",
Destination: &versioningEnabled,
Aliases: []string{"vs"},
},
},
},
{
Name: "iam",
@@ -218,7 +196,6 @@ func initTestCommands() []*cli.Command {
integration.WithEndpoint(endpoint),
integration.WithConcurrency(concurrency),
integration.WithPartSize(partSize),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -279,7 +256,6 @@ func initTestCommands() []*cli.Command {
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithConcurrency(concurrency),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -305,7 +281,6 @@ func getAction(tf testFunc) func(*cli.Context) error {
integration.WithSecret(awsSecret),
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -313,9 +288,6 @@ func getAction(tf testFunc) func(*cli.Context) error {
if versioningEnabled {
opts = append(opts, integration.WithVersioningEnabled())
}
if azureTests {
opts = append(opts, integration.WithAzureMode())
}
s := integration.NewS3Conf(opts...)
tf(s)
@@ -343,27 +315,15 @@ func extractIntTests() (commands []*cli.Command) {
integration.WithSecret(awsSecret),
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
}
if versioningEnabled {
opts = append(opts, integration.WithVersioningEnabled())
}
s := integration.NewS3Conf(opts...)
err := testFunc(s)
return err
},
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "versioning-enabled",
Usage: "Test the bucket object versioning, if the versioning is enabled",
Destination: &versioningEnabled,
Aliases: []string{"vs"},
},
},
})
}
return

View File

@@ -311,12 +311,6 @@ ROOT_SECRET_ACCESS_KEY=
# to directories at the top level gateway directory as buckets.
#VGW_BUCKET_LINKS=false
# The default permissions mode when creating new directories is 0755. Use
# VGW_DIR_PERMS option to set a different mode for any new directory that the
# gateway creates. This applies to buckets created through the gateway as well
# as any parent directories automatically created with object uploads.
#VGW_DIR_PERMS=0755
###########
# scoutfs #
###########
@@ -352,12 +346,6 @@ ROOT_SECRET_ACCESS_KEY=
# to directories at the top level gateway directory as buckets.
#VGW_BUCKET_LINKS=false
# The default permissions mode when creating new directories is 0755. Use
# VGW_DIR_PERMS option to set a different mode for any new directory that the
# gateway creates. This applies to buckets created through the gateway as well
# as any parent directories automatically created with object uploads.
#VGW_DIR_PERMS=0755
######
# s3 #
######

90
go.mod
View File

@@ -1,44 +1,41 @@
module github.com/versity/versitygw
go 1.23.0
toolchain go1.23.6
go 1.21.0
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
github.com/DataDog/datadog-go/v5 v5.6.0
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.1
github.com/aws/smithy-go v1.22.3
github.com/go-ldap/ldap/v3 v3.4.10
github.com/gofiber/fiber/v2 v2.52.6
github.com/google/go-cmp v0.7.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0
github.com/DataDog/datadog-go/v5 v5.5.0
github.com/aws/aws-sdk-go-v2 v1.30.5
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2
github.com/aws/smithy-go v1.20.4
github.com/go-ldap/ldap/v3 v3.4.8
github.com/gofiber/fiber/v2 v2.52.5
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/hashicorp/vault-client-go v0.4.3
github.com/nats-io/nats.go v1.39.1
github.com/nats-io/nats.go v1.37.0
github.com/oklog/ulid/v2 v2.1.0
github.com/pkg/xattr v0.4.10
github.com/segmentio/kafka-go v0.4.47
github.com/smira/go-statsd v1.3.4
github.com/urfave/cli/v2 v2.27.6
github.com/valyala/fasthttp v1.59.0
github.com/smira/go-statsd v1.3.3
github.com/urfave/cli/v2 v2.27.4
github.com/valyala/fasthttp v1.55.0
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
golang.org/x/sync v0.12.0
golang.org/x/sys v0.31.0
golang.org/x/sys v0.25.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -47,37 +44,38 @@ require (
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/net v0.37.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/crypto v0.27.0 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/text v0.18.0 // indirect
golang.org/x/time v0.6.0 // indirect
)
require (
github.com/andybalholm/brotli v1.1.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.9
github.com/aws/aws-sdk-go-v2/credentials v1.17.62
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.65
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect
github.com/aws/aws-sdk-go-v2/config v1.27.33
github.com/aws/aws-sdk-go-v2/credentials v1.17.32
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
)

220
go.sum
View File

@@ -1,91 +1,83 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 h1:8BKxhZZLX/WosEeoCvWysmKUscfa9v8LIPEEU0JjE2o=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw=
github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0=
github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U=
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU=
github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.65 h1:03zF9oWZyXvw08Say761JGpE9PbeGPd4FAmdpgDAm/I=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.65/go.mod h1:hBobvLKm46Igpcw6tkq9hFUmU14iAOrC5KL6EyYYckA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2 h1:t/gZFyrijKuSU0elA5kRngP/oU3mc0I+Dvp8HwRE4c0=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.1 h1:1M0gSbyP6q06gl3384wpoKPaH9G16NPqZFieEhLboSU=
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.1/go.mod h1:4qzsZSzB/KiX2EzDjs9D7A8rI/WGJxZceVJIHqtJjIU=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g=
github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw=
github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU=
github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks=
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I=
github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18 h1:9DIp7vhmOPmueCDwpXa45bEbLHHTt1kcxChdTJWWxvI=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18/go.mod h1:aJv/Fwz8r56ozwYFRC4bzoeL1L17GYQYemfblOBux1M=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 h1:Roo69qTpfu8OlJ2Tb7pAYVuF0CpuUMB0IYWwYP/4DZM=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17/go.mod h1:NcWPxQzGM1USQggaTVwz6VpqMZPX1CvDJLDh6jnOCa4=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 h1:FLMkfEiRjhgeDTCjjLoc3URo/TBkgeQbocA78lfkzSI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19/go.mod h1:Vx+GucNSsdhaxs3aZIKfSUjKVGsxN25nX2SRcdhuw08=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 h1:u+EfGmksnJc/x5tq3A+OD7LrMbSSR/5TrKLvkdy/fhY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17/go.mod h1:VaMx6302JHax2vHJWgRo+5n9zvbacs3bLU/23DNQrTY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 h1:Kp6PWAlXwP1UvIflkIP6MFZYBNDCa4mFCGtxrpICVOg=
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2/go.mod h1:5FmD/Dqq57gP+XwaUnd5WFPipAuzrf0HmupX27Gvjvc=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o=
github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ=
github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
@@ -117,33 +109,32 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/nats-io/nats.go v1.39.1 h1:oTkfKBmz7W047vRxV762M67ZdXeOtUgvbBaNoQ+3PPk=
github.com/nats-io/nats.go v1.39.1/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -151,8 +142,6 @@ github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -163,8 +152,8 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smira/go-statsd v1.3.4 h1:kBYWcLSGT+qC6JVbvfz48kX7mQys32fjDOPrfmsSx2c=
github.com/smira/go-statsd v1.3.4/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
github.com/smira/go-statsd v1.3.3 h1:WnMlmGTyMpzto+HvOJWRPoLaLlk5EGfzsnlQBcvj4yI=
github.com/smira/go-statsd v1.3.3/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
@@ -174,14 +163,16 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8=
github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.59.0 h1:Qu0qYHfXvPk1mSLNqcFtEk6DpxgA26hy6bmydotDpRI=
github.com/valyala/fasthttp v1.59.0/go.mod h1:GTxNb9Bc6r2a9D0TWNSPwDz78UxnTGBViY3xZNEqyYU=
github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8=
github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
@@ -192,27 +183,20 @@ github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -222,23 +206,15 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -250,27 +226,23 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -279,19 +251,15 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -72,14 +72,6 @@ var (
ActionPutBucketOwnershipControls = "s3_PutBucketOwnershipControls"
ActionGetBucketOwnershipControls = "s3_GetBucketOwnershipControls"
ActionDeleteBucketOwnershipControls = "s3_DeleteBucketOwnershipControls"
// Admin actions
ActionAdminCreateUser = "admin_CreateUser"
ActionAdminUpdateUser = "admin_UpdateUser"
ActionAdminDeleteUser = "admin_DeleteUser"
ActionAdminChangeBucketOwner = "admin_ChangeBucketOwner"
ActionAdminListUsers = "admin_ListUsers"
ActionAdminListBuckets = "admin_ListBuckets"
)
func init() {

View File

@@ -5,27 +5,17 @@ rm -rf /tmp/gw
mkdir /tmp/gw
rm -rf /tmp/covdata
mkdir /tmp/covdata
rm -rf /tmp/versioing.covdata
mkdir /tmp/versioning.covdata
rm -rf /tmp/versioningdir
mkdir /tmp/versioningdir
# setup tls certificate and key
ECHO "Generating TLS certificate and key in the cert.pem and key.pem files"
openssl genpkey -algorithm RSA -out key.pem -pkeyopt rsa_keygen_bits:2048
openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
ECHO "Running the sdk test over http"
# run server in background not versioning-enabled
# port: 7070(default)
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
# run server in background
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_PID=$!
# wait a second for server to start up
sleep 1
# check if gateway process is still running
# check if server is still running
if ! kill -0 $GW_PID; then
echo "server no longer running"
exit 1
@@ -33,7 +23,7 @@ fi
# run tests
# full flow tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow -vs; then
echo "full flow tests failed"
kill $GW_PID
exit 1
@@ -51,110 +41,8 @@ if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 iam; then
exit 1
fi
# kill off server
kill $GW_PID
ECHO "Running the sdk test over https"
# run server in background with TLS certificate
# port: 7071(default)
GOCOVERDIR=/tmp/https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7071 -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
GW_HTTPS_PID=$!
sleep 1
# check if https gateway process is still running
if ! kill -0 $GW_HTTPS_PID; then
echo "server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow; then
echo "full flow tests failed"
kill $GW_HTTPS_PID
exit 1
fi
# posix tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 posix; then
echo "posix tests failed"
kill $GW_HTTPS_PID
exit 1
fi
# iam tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 iam; then
echo "iam tests failed"
kill $GW_HTTPS_PID
exit 1
fi
kill $GW_HTTPS_PID
ECHO "Running the sdk test over http against the versioning-enabled gateway"
# run server in background versioning-enabled
# port: 7072
GOCOVERDIR=/tmp/versioning.covdata ./versitygw -p :7072 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_VS_PID=$!
# wait a second for server to start up
sleep 1
# check if versioning-enabled gateway process is still running
if ! kill -0 $GW_VS_PID; then
echo "versioning-enabled server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs; then
echo "versioning-enabled full-flow tests failed"
kill $GW_VS_PID
exit 1
fi
# posix tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 posix -vs; then
echo "versiongin-enabled posix tests failed"
kill $GW_VS_PID
exit 1
fi
# kill off server
kill $GW_VS_PID
ECHO "Running the sdk test over https against the versioning-enabled gateway"
# run server in background versioning-enabled
# port: 7073
GOCOVERDIR=/tmp/versioning.https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7073 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_VS_HTTPS_PID=$!
# wait a second for server to start up
sleep 1
# check if versioning-enabled gateway process is still running
if ! kill -0 $GW_VS_HTTPS_PID; then
echo "versioning-enabled server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs; then
echo "versioning-enabled full-flow tests failed"
kill $GW_VS_HTTPS_PID
exit 1
fi
# posix tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 posix -vs; then
echo "versiongin-enabled posix tests failed"
kill $GW_VS_HTTPS_PID
exit 1
fi
# kill off server
kill $GW_VS_HTTPS_PID
exit 0
# if the above binary was built with -cover enabled (make testbin),

View File

@@ -53,9 +53,6 @@ func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUse
app.Use(middlewares.VerifyV4Signature(root, iam, l, nil, region, false))
app.Use(middlewares.VerifyMD5Body(l))
// Admin role checker
app.Use(middlewares.IsAdmin(l))
server.router.Init(app, be, iam, l)
return server

View File

@@ -16,19 +16,15 @@ package controllers
import (
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"net/http"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
"github.com/versity/versitygw/s3response"
)
type AdminController struct {
@@ -42,124 +38,187 @@ func NewAdminController(iam auth.IAMService, be backend.Backend, l s3log.AuditLo
}
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:CreateUser",
})
}
var usr auth.Account
err := xml.Unmarshal(ctx.Body(), &usr)
err := json.Unmarshal(ctx.Body(), &usr)
if err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
return sendResponse(ctx, fmt.Errorf("failed to parse request body: %w", err), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusBadRequest,
action: "admin:CreateUser",
})
}
if !usr.Role.IsValid() {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminInvalidUserRole),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
if usr.Role != auth.RoleAdmin && usr.Role != auth.RoleUser && usr.Role != auth.RoleUserPlus {
return sendResponse(ctx, errors.New("invalid parameters: user role have to be one of the following: 'user', 'admin', 'userplus'"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusBadRequest,
action: "admin:CreateUser",
})
}
err = c.iam.CreateAccount(usr)
if err != nil {
status := fiber.StatusInternalServerError
err = fmt.Errorf("failed to create user: %w", err)
if strings.Contains(err.Error(), "user already exists") {
err = s3err.GetAPIError(s3err.ErrAdminUserExists)
status = fiber.StatusConflict
}
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
return sendResponse(ctx, err, nil,
&metaOptions{
status: status,
logger: c.l,
action: "admin:CreateUser",
})
}
return SendResponse(ctx, nil,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
Status: http.StatusCreated,
})
return sendResponse(ctx, nil, "The user has been created successfully", &metaOptions{
status: fiber.StatusCreated,
logger: c.l,
action: "admin:CreateUser",
})
}
func (c AdminController) UpdateUser(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:UpdateUser",
})
}
access := ctx.Query("access")
if access == "" {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminMissingUserAcess),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
return sendResponse(ctx, errors.New("missing user access parameter"), nil,
&metaOptions{
status: fiber.StatusBadRequest,
logger: c.l,
action: "admin:UpdateUser",
})
}
var props auth.MutableProps
if err := xml.Unmarshal(ctx.Body(), &props); err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
if err := json.Unmarshal(ctx.Body(), &props); err != nil {
return sendResponse(ctx, fmt.Errorf("invalid request body %w", err), nil,
&metaOptions{
status: fiber.StatusBadRequest,
logger: c.l,
action: "admin:UpdateUser",
})
}
err := c.iam.UpdateUserAccount(access, props)
if err != nil {
status := fiber.StatusInternalServerError
err = fmt.Errorf("failed to update user account: %w", err)
if strings.Contains(err.Error(), "user not found") {
err = s3err.GetAPIError(s3err.ErrAdminUserNotFound)
status = fiber.StatusNotFound
}
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
return sendResponse(ctx, err, nil,
&metaOptions{
status: status,
logger: c.l,
action: "admin:UpdateUser",
})
}
return SendResponse(ctx, nil,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
return sendResponse(ctx, nil, "the user has been updated successfully",
&metaOptions{
logger: c.l,
action: "admin:UpdateUser",
})
}
func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
access := ctx.Query("access")
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:DeleteUser",
})
}
err := c.iam.DeleteUserAccount(access)
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminDeleteUser,
if err != nil {
return sendResponse(ctx, err, nil,
&metaOptions{
logger: c.l,
action: "admin:DeleteUser",
})
}
return sendResponse(ctx, nil, "The user has been deleted successfully",
&metaOptions{
logger: c.l,
action: "admin:DeleteUser",
})
}
func (c AdminController) ListUsers(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:ListUsers",
})
}
accs, err := c.iam.ListUserAccounts()
return SendXMLResponse(ctx,
auth.ListUserAccountsResult{
Accounts: accs,
}, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminListUsers,
return sendResponse(ctx, err, accs,
&metaOptions{
logger: c.l,
action: "admin:ListUsers",
})
}
func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:ChangeBucketOwner",
})
}
owner := ctx.Query("owner")
bucket := ctx.Query("bucket")
accs, err := auth.CheckIfAccountsExist([]string{owner}, c.iam)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
return sendResponse(ctx, err, nil,
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
})
}
if len(accs) > 0 {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminUserNotFound),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
return sendResponse(ctx, errors.New("user specified as the new bucket owner does not exist"), nil,
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
status: fiber.StatusNotFound,
})
}
@@ -167,7 +226,7 @@ func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
Owner: owner,
Grantees: []auth.Grantee{
{
Permission: auth.PermissionFullControl,
Permission: types.PermissionFullControl,
Access: owner,
Type: types.TypeCanonicalUser,
},
@@ -176,28 +235,91 @@ func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
aclParsed, err := json.Marshal(acl)
if err != nil {
return SendResponse(ctx, fmt.Errorf("failed to marshal the bucket acl: %w", err),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
return sendResponse(ctx, fmt.Errorf("failed to marshal the bucket acl: %w", err), nil,
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
})
}
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, aclParsed)
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
return sendResponse(ctx, err, "Bucket owner has been updated successfully",
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
})
}
func (c AdminController) ListBuckets(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:ListBuckets",
})
}
buckets, err := c.be.ListBucketsAndOwners(ctx.Context())
return SendXMLResponse(ctx,
s3response.ListBucketsResult{
Buckets: buckets,
}, err, &MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminListBuckets,
return sendResponse(ctx, err, buckets,
&metaOptions{
logger: c.l,
action: "admin:ListBuckets",
})
}
type metaOptions struct {
action string
status int
logger s3log.AuditLogger
}
func sendResponse(ctx *fiber.Ctx, err error, data any, m *metaOptions) error {
status := m.status
if err != nil {
if status == 0 {
status = fiber.StatusInternalServerError
}
if m.logger != nil {
m.logger.Log(ctx, err, []byte(err.Error()), s3log.LogMeta{
Action: m.action,
HttpStatus: status,
})
}
return ctx.Status(status).SendString(err.Error())
}
if status == 0 {
status = fiber.StatusOK
}
msg, ok := data.(string)
if ok {
if m.logger != nil {
m.logger.Log(ctx, nil, []byte(msg), s3log.LogMeta{
Action: m.action,
HttpStatus: status,
})
}
return ctx.Status(status).SendString(msg)
}
dataJSON, err := json.Marshal(data)
if err != nil {
return err
}
if m.logger != nil {
m.logger.Log(ctx, nil, dataJSON, s3log.LogMeta{
HttpStatus: status,
Action: m.action,
})
}
ctx.Set(fiber.HeaderContentType, fiber.MIMEApplicationJSON)
return ctx.Status(status).Send(dataJSON)
}

View File

@@ -15,11 +15,12 @@
package controllers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gofiber/fiber/v2"
@@ -42,26 +43,33 @@ func TestAdminController_CreateUser(t *testing.T) {
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/create-user", adminController.CreateUser)
succUser := `
<Account>
<Access>access</Access>
<Secret>secret</Secret>
<Role>admin</Role>
<UserID>0</UserID>
<GroupID>0</GroupID>
</Account>
`
invuser := `
<Account>
<Access>access</Access>
<Secret>secret</Secret>
<Role>invalid_role</Role>
<UserID>0</UserID>
<GroupID>0</GroupID>
</Account>
`
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
usr := auth.Account{
Access: "access",
Secret: "secret",
Role: "invalid role",
}
user, _ := json.Marshal(&usr)
usr.Role = "admin"
succUsr, _ := json.Marshal(&usr)
appErr.Patch("/create-user", adminController.CreateUser)
tests := []struct {
name string
@@ -71,31 +79,31 @@ func TestAdminController_CreateUser(t *testing.T) {
statusCode int
}{
{
name: "Admin-create-user-malformed-body",
name: "Admin-create-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", nil),
req: httptest.NewRequest(http.MethodPatch, "/create-user", bytes.NewBuffer(succUsr)),
},
wantErr: false,
statusCode: 201,
},
{
name: "Admin-create-user-invalid-user-role",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", bytes.NewBuffer(user)),
},
wantErr: false,
statusCode: 400,
},
{
name: "Admin-create-user-invalid-requester-role",
app: app,
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", strings.NewReader(invuser)),
req: httptest.NewRequest(http.MethodPatch, "/create-user", nil),
},
wantErr: false,
statusCode: 400,
},
{
name: "Admin-create-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", strings.NewReader(succUser)),
},
wantErr: false,
statusCode: 201,
statusCode: 403,
},
}
for _, tt := range tests {
@@ -126,8 +134,24 @@ func TestAdminController_UpdateUser(t *testing.T) {
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/update-user", adminController.UpdateUser)
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appErr.Patch("/update-user", adminController.UpdateUser)
successBody, _ := json.Marshal(auth.MutableProps{Secret: getPtr("hello")})
adminControllerErr := AdminController{
iam: &IAMServiceMock{
UpdateUserAccountFunc: func(access string, props auth.MutableProps) error {
@@ -138,15 +162,12 @@ func TestAdminController_UpdateUser(t *testing.T) {
appNotFound := fiber.New()
appNotFound.Patch("/update-user", adminControllerErr.UpdateUser)
appNotFound.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
succUser := `
<Account>
<Secret>secret</Secret>
<UserID>0</UserID>
<GroupID>0</GroupID>
</Account>
`
appNotFound.Patch("/update-user", adminControllerErr.UpdateUser)
tests := []struct {
name string
@@ -159,7 +180,7 @@ func TestAdminController_UpdateUser(t *testing.T) {
name: "Admin-update-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", strings.NewReader(succUser)),
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", bytes.NewBuffer(successBody)),
},
wantErr: false,
statusCode: 200,
@@ -168,10 +189,10 @@ func TestAdminController_UpdateUser(t *testing.T) {
name: "Admin-update-user-missing-access",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user", strings.NewReader(succUser)),
req: httptest.NewRequest(http.MethodPatch, "/update-user", bytes.NewBuffer(successBody)),
},
wantErr: false,
statusCode: 404,
statusCode: 400,
},
{
name: "Admin-update-user-invalid-request-body",
@@ -182,11 +203,20 @@ func TestAdminController_UpdateUser(t *testing.T) {
wantErr: false,
statusCode: 400,
},
{
name: "Admin-update-user-invalid-requester-role",
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "Admin-update-user-not-found",
app: appNotFound,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", strings.NewReader(succUser)),
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", bytes.NewBuffer(successBody)),
},
wantErr: false,
statusCode: 404,
@@ -220,8 +250,22 @@ func TestAdminController_DeleteUser(t *testing.T) {
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/delete-user", adminController.DeleteUser)
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appErr.Patch("/delete-user", adminController.DeleteUser)
tests := []struct {
name string
app *fiber.App
@@ -238,6 +282,15 @@ func TestAdminController_DeleteUser(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Admin-delete-user-invalid-requester-role",
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/delete-user?access=test", nil),
},
wantErr: false,
statusCode: 403,
},
}
for _, tt := range tests {
resp, err := tt.app.Test(tt.args.req)
@@ -274,9 +327,30 @@ func TestAdminController_ListUsers(t *testing.T) {
}
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appErr.Patch("/list-users", adminControllerErr.ListUsers)
appRoleErr := fiber.New()
appRoleErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appRoleErr.Patch("/list-users", adminController.ListUsers)
appSucc := fiber.New()
appSucc.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appSucc.Patch("/list-users", adminController.ListUsers)
tests := []struct {
@@ -286,6 +360,15 @@ func TestAdminController_ListUsers(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "Admin-list-users-access-denied",
app: appRoleErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/list-users", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "Admin-list-users-iam-error",
app: appErr,
@@ -352,12 +435,39 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
}
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
appRoleErr := fiber.New()
appRoleErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appRoleErr.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
appIamErr := fiber.New()
appIamErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appIamErr.Patch("/change-bucket-owner", adminControllerIamErr.ChangeBucketOwner)
appIamNoSuchUser := fiber.New()
appIamNoSuchUser.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appIamNoSuchUser.Patch("/change-bucket-owner", adminControllerIamAccDoesNotExist.ChangeBucketOwner)
tests := []struct {
@@ -367,6 +477,15 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "Change-bucket-owner-access-denied",
app: appRoleErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/change-bucket-owner", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "Change-bucket-owner-check-account-server-error",
app: appIamErr,
@@ -421,8 +540,23 @@ func TestAdminController_ListBuckets(t *testing.T) {
}
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/list-buckets", adminController.ListBuckets)
appRoleErr := fiber.New()
appRoleErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appRoleErr.Patch("/list-buckets", adminController.ListBuckets)
tests := []struct {
name string
app *fiber.App
@@ -430,6 +564,15 @@ func TestAdminController_ListBuckets(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "List-buckets-incorrect-role",
app: appRoleErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/list-buckets", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "List-buckets-success",
app: app,

View File

@@ -41,7 +41,7 @@ var _ backend.Backend = &BackendMock{}
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
// panic("mock out the CreateMultipartUpload method")
// },
// DeleteBucketFunc: func(contextMoqParam context.Context, bucket string) error {
// DeleteBucketFunc: func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error {
// panic("mock out the DeleteBucket method")
// },
// DeleteBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) error {
@@ -74,7 +74,7 @@ var _ backend.Backend = &BackendMock{}
// GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
// panic("mock out the GetBucketTagging method")
// },
// GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
// GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
// panic("mock out the GetBucketVersioning method")
// },
// GetObjectFunc: func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
@@ -83,7 +83,7 @@ var _ backend.Backend = &BackendMock{}
// GetObjectAclFunc: func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
// panic("mock out the GetObjectAcl method")
// },
// GetObjectAttributesFunc: func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
// GetObjectAttributesFunc: func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
// panic("mock out the GetObjectAttributes method")
// },
// GetObjectLegalHoldFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) (*bool, error) {
@@ -104,7 +104,7 @@ var _ backend.Backend = &BackendMock{}
// HeadObjectFunc: func(contextMoqParam context.Context, headObjectInput *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
// panic("mock out the HeadObject method")
// },
// ListBucketsFunc: func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
// ListBucketsFunc: func(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
// panic("mock out the ListBuckets method")
// },
// ListBucketsAndOwnersFunc: func(contextMoqParam context.Context) ([]s3response.Bucket, error) {
@@ -170,10 +170,10 @@ var _ backend.Backend = &BackendMock{}
// StringFunc: func() string {
// panic("mock out the String method")
// },
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
// panic("mock out the UploadPart method")
// },
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
// panic("mock out the UploadPartCopy method")
// },
// }
@@ -202,7 +202,7 @@ type BackendMock struct {
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
// DeleteBucketFunc mocks the DeleteBucket method.
DeleteBucketFunc func(contextMoqParam context.Context, bucket string) error
DeleteBucketFunc func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error
// DeleteBucketOwnershipControlsFunc mocks the DeleteBucketOwnershipControls method.
DeleteBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) error
@@ -235,7 +235,7 @@ type BackendMock struct {
GetBucketTaggingFunc func(contextMoqParam context.Context, bucket string) (map[string]string, error)
// GetBucketVersioningFunc mocks the GetBucketVersioning method.
GetBucketVersioningFunc func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error)
GetBucketVersioningFunc func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error)
// GetObjectFunc mocks the GetObject method.
GetObjectFunc func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput) (*s3.GetObjectOutput, error)
@@ -244,7 +244,7 @@ type BackendMock struct {
GetObjectAclFunc func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
// GetObjectAttributesFunc mocks the GetObjectAttributes method.
GetObjectAttributesFunc func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
GetObjectAttributesFunc func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error)
// GetObjectLegalHoldFunc mocks the GetObjectLegalHold method.
GetObjectLegalHoldFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) (*bool, error)
@@ -265,7 +265,7 @@ type BackendMock struct {
HeadObjectFunc func(contextMoqParam context.Context, headObjectInput *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
// ListBucketsFunc mocks the ListBuckets method.
ListBucketsFunc func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error)
ListBucketsFunc func(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
// ListBucketsAndOwnersFunc mocks the ListBucketsAndOwners method.
ListBucketsAndOwnersFunc func(contextMoqParam context.Context) ([]s3response.Bucket, error)
@@ -331,10 +331,10 @@ type BackendMock struct {
StringFunc func() string
// UploadPartFunc mocks the UploadPart method.
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error)
// UploadPartCopyFunc mocks the UploadPartCopy method.
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
// calls tracks calls to the methods.
calls struct {
@@ -388,8 +388,8 @@ type BackendMock struct {
DeleteBucket []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
// DeleteBucketInput is the deleteBucketInput argument value.
DeleteBucketInput *s3.DeleteBucketInput
}
// DeleteBucketOwnershipControls holds details about calls to the DeleteBucketOwnershipControls method.
DeleteBucketOwnershipControls []struct {
@@ -547,8 +547,10 @@ type BackendMock struct {
ListBuckets []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// ListBucketsInput is the listBucketsInput argument value.
ListBucketsInput s3response.ListBucketsInput
// Owner is the owner argument value.
Owner string
// IsAdmin is the isAdmin argument value.
IsAdmin bool
}
// ListBucketsAndOwners holds details about calls to the ListBucketsAndOwners method.
ListBucketsAndOwners []struct {
@@ -1010,21 +1012,21 @@ func (mock *BackendMock) CreateMultipartUploadCalls() []struct {
}
// DeleteBucket calls DeleteBucketFunc.
func (mock *BackendMock) DeleteBucket(contextMoqParam context.Context, bucket string) error {
func (mock *BackendMock) DeleteBucket(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error {
if mock.DeleteBucketFunc == nil {
panic("BackendMock.DeleteBucketFunc: method is nil but Backend.DeleteBucket was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
ContextMoqParam context.Context
DeleteBucketInput *s3.DeleteBucketInput
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
ContextMoqParam: contextMoqParam,
DeleteBucketInput: deleteBucketInput,
}
mock.lockDeleteBucket.Lock()
mock.calls.DeleteBucket = append(mock.calls.DeleteBucket, callInfo)
mock.lockDeleteBucket.Unlock()
return mock.DeleteBucketFunc(contextMoqParam, bucket)
return mock.DeleteBucketFunc(contextMoqParam, deleteBucketInput)
}
// DeleteBucketCalls gets all the calls that were made to DeleteBucket.
@@ -1032,12 +1034,12 @@ func (mock *BackendMock) DeleteBucket(contextMoqParam context.Context, bucket st
//
// len(mockedBackend.DeleteBucketCalls())
func (mock *BackendMock) DeleteBucketCalls() []struct {
ContextMoqParam context.Context
Bucket string
ContextMoqParam context.Context
DeleteBucketInput *s3.DeleteBucketInput
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
ContextMoqParam context.Context
DeleteBucketInput *s3.DeleteBucketInput
}
mock.lockDeleteBucket.RLock()
calls = mock.calls.DeleteBucket
@@ -1410,7 +1412,7 @@ func (mock *BackendMock) GetBucketTaggingCalls() []struct {
}
// GetBucketVersioning calls GetBucketVersioningFunc.
func (mock *BackendMock) GetBucketVersioning(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
func (mock *BackendMock) GetBucketVersioning(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
if mock.GetBucketVersioningFunc == nil {
panic("BackendMock.GetBucketVersioningFunc: method is nil but Backend.GetBucketVersioning was just called")
}
@@ -1518,7 +1520,7 @@ func (mock *BackendMock) GetObjectAclCalls() []struct {
}
// GetObjectAttributes calls GetObjectAttributesFunc.
func (mock *BackendMock) GetObjectAttributes(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
func (mock *BackendMock) GetObjectAttributes(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
if mock.GetObjectAttributesFunc == nil {
panic("BackendMock.GetObjectAttributesFunc: method is nil but Backend.GetObjectAttributes was just called")
}
@@ -1790,21 +1792,23 @@ func (mock *BackendMock) HeadObjectCalls() []struct {
}
// ListBuckets calls ListBucketsFunc.
func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
if mock.ListBucketsFunc == nil {
panic("BackendMock.ListBucketsFunc: method is nil but Backend.ListBuckets was just called")
}
callInfo := struct {
ContextMoqParam context.Context
ListBucketsInput s3response.ListBucketsInput
ContextMoqParam context.Context
Owner string
IsAdmin bool
}{
ContextMoqParam: contextMoqParam,
ListBucketsInput: listBucketsInput,
ContextMoqParam: contextMoqParam,
Owner: owner,
IsAdmin: isAdmin,
}
mock.lockListBuckets.Lock()
mock.calls.ListBuckets = append(mock.calls.ListBuckets, callInfo)
mock.lockListBuckets.Unlock()
return mock.ListBucketsFunc(contextMoqParam, listBucketsInput)
return mock.ListBucketsFunc(contextMoqParam, owner, isAdmin)
}
// ListBucketsCalls gets all the calls that were made to ListBuckets.
@@ -1812,12 +1816,14 @@ func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, listBucket
//
// len(mockedBackend.ListBucketsCalls())
func (mock *BackendMock) ListBucketsCalls() []struct {
ContextMoqParam context.Context
ListBucketsInput s3response.ListBucketsInput
ContextMoqParam context.Context
Owner string
IsAdmin bool
} {
var calls []struct {
ContextMoqParam context.Context
ListBucketsInput s3response.ListBucketsInput
ContextMoqParam context.Context
Owner string
IsAdmin bool
}
mock.lockListBuckets.RLock()
calls = mock.calls.ListBuckets
@@ -2620,7 +2626,7 @@ func (mock *BackendMock) StringCalls() []struct {
}
// UploadPart calls UploadPartFunc.
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
if mock.UploadPartFunc == nil {
panic("BackendMock.UploadPartFunc: method is nil but Backend.UploadPart was just called")
}
@@ -2656,7 +2662,7 @@ func (mock *BackendMock) UploadPartCalls() []struct {
}
// UploadPartCopy calls UploadPartCopyFunc.
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
if mock.UploadPartCopyFunc == nil {
panic("BackendMock.UploadPartCopyFunc: method is nil but Backend.UploadPartCopy was just called")
}

File diff suppressed because it is too large Load Diff

View File

@@ -92,7 +92,7 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
app := fiber.New()
s3ApiController := S3ApiController{
be: &BackendMock{
ListBucketsFunc: func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
ListBucketsFunc: func(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, nil
},
},
@@ -109,7 +109,7 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
appErr := fiber.New()
s3ApiControllerErr := S3ApiController{
be: &BackendMock{
ListBucketsFunc: func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
ListBucketsFunc: func(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
},
},
@@ -187,8 +187,8 @@ func TestS3ApiController_GetActions(t *testing.T) {
GetObjectAclFunc: func(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
return &s3.GetObjectAclOutput{}, nil
},
GetObjectAttributesFunc: func(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, nil
GetObjectAttributesFunc: func(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
return s3response.GetObjectAttributesResult{}, nil
},
GetObjectFunc: func(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
return &s3.GetObjectOutput{
@@ -232,9 +232,6 @@ func TestS3ApiController_GetActions(t *testing.T) {
getObjAttrs := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
getObjAttrs.Header.Set("X-Amz-Object-Attributes", "hello")
invalidChecksumMode := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
invalidChecksumMode.Header.Set("x-amz-checksum-mode", "invalid_checksum_mode")
tests := []struct {
name string
app *fiber.App
@@ -332,15 +329,6 @@ func TestS3ApiController_GetActions(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Get-actions-get-object-invalid-checksum-mode",
app: app,
args: args{
req: invalidChecksumMode,
},
wantErr: false,
statusCode: 400,
},
{
name: "Get-actions-get-object-success",
app: app,
@@ -394,8 +382,8 @@ func TestS3ApiController_ListActions(t *testing.T) {
GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
return map[string]string{}, nil
},
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
return s3response.GetBucketVersioningOutput{}, nil
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
return &s3.GetBucketVersioningOutput{}, nil
},
ListObjectVersionsFunc: func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
return s3response.ListVersionsResult{}, nil
@@ -762,7 +750,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
req: httptest.NewRequest(http.MethodPut, "/my-bucket?tagging", strings.NewReader(tagBody)),
},
wantErr: false,
statusCode: 204,
statusCode: 200,
},
{
name: "Put-bucket-ownership-controls-invalid-ownership",
@@ -953,12 +941,12 @@ func TestS3ApiController_PutActions(t *testing.T) {
</Tagging>
`
//retentionBody := `
//<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
// <Mode>GOVERNANCE</Mode>
// <RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
//</Retention>
//`
retentionBody := `
<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Mode>GOVERNANCE</Mode>
<RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
</Retention>
`
legalHoldBody := `
<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
@@ -983,14 +971,14 @@ func TestS3ApiController_PutActions(t *testing.T) {
PutObjectFunc: func(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
return s3response.PutObjectOutput{}, nil
},
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
return &s3.UploadPartOutput{}, nil
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (string, error) {
return "hello", nil
},
PutObjectTaggingFunc: func(_ context.Context, bucket, object string, tags map[string]string) error {
return nil
},
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
return s3response.CopyPartResult{}, nil
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
return s3response.CopyObjectResult{}, nil
},
PutObjectLegalHoldFunc: func(contextMoqParam context.Context, bucket, object, versionId string, status bool) error {
return nil
@@ -1024,11 +1012,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
cpySrcReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
cpySrcReq.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
// CopyObject invalid checksum algorithm
cpyInvChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
cpyInvChecksumAlgo.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
cpyInvChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
// PutObjectAcl success
aclReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
aclReq.Header.Set("X-Amz-Acl", "private")
@@ -1050,40 +1033,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
invAclBodyGrtReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?acl", strings.NewReader(body))
invAclBodyGrtReq.Header.Set("X-Amz-Grant-Read", "hello")
// PutObject invalid checksum algorithm
invChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
// PutObject invalid base64 checksum
invBase64Checksum := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invBase64Checksum.Header.Set("X-Amz-Checksum-Crc32", "invalid_base64")
// PutObject invalid crc32
invCrc32 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invCrc32.Header.Set("X-Amz-Checksum-Crc32", "YXNkZmFkc2Zhc2Rm")
// PutObject invalid crc32c
invCrc32c := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invCrc32c.Header.Set("X-Amz-Checksum-Crc32c", "YXNkZmFkc2Zhc2RmYXNkZg==")
// PutObject invalid sha1
invSha1 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invSha1.Header.Set("X-Amz-Checksum-Sha1", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZg==")
// PutObject invalid sha256
invSha256 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invSha256.Header.Set("X-Amz-Checksum-Sha256", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZmFkc2Zhc2Rm")
// PutObject multiple checksum headers
mulChecksumHdrs := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Sha256", "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=")
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
// PutObject checksum algorithm and header mismatch
checksumHdrMismatch := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Algorithm", "SHA1")
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
tests := []struct {
name string
app *fiber.App
@@ -1127,15 +1076,15 @@ func TestS3ApiController_PutActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
//{
// name: "put-object-retention-success",
// app: app,
// args: args{
// req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
// },
// wantErr: false,
// statusCode: 200,
//},
{
name: "put-object-retention-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
},
wantErr: false,
statusCode: 200,
},
{
name: "put-legal-hold-invalid-request",
app: app,
@@ -1226,15 +1175,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Copy-object-invalid-checksum-algorithm",
app: app,
args: args{
req: cpyInvChecksumAlgo,
},
wantErr: false,
statusCode: 400,
},
{
name: "Copy-object-success",
app: app,
@@ -1277,7 +1217,7 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
app := fiber.New()
s3ApiController := S3ApiController{
be: &BackendMock{
DeleteBucketFunc: func(_ context.Context, bucket string) error {
DeleteBucketFunc: func(context.Context, *s3.DeleteBucketInput) error {
return nil
},
DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
@@ -1688,7 +1628,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
return acldata, nil
},
HeadObjectFunc: func(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
return nil, s3err.GetAPIError(42)
},
},
}
@@ -1702,9 +1642,6 @@ func TestS3ApiController_HeadObject(t *testing.T) {
})
appErr.Head("/:bucket/:key/*", s3ApiControllerErr.HeadObject)
invChecksumMode := httptest.NewRequest(http.MethodHead, "/my-bucket/my-key", nil)
invChecksumMode.Header.Set("X-Amz-Checksum-Mode", "invalid_checksum_mode")
tests := []struct {
name string
app *fiber.App
@@ -1721,15 +1658,6 @@ func TestS3ApiController_HeadObject(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Head-object-invalid-checksum-mode",
app: app,
args: args{
req: invChecksumMode,
},
wantErr: false,
statusCode: 400,
},
{
name: "Head-object-error",
app: appErr,
@@ -1785,19 +1713,6 @@ func TestS3ApiController_CreateActions(t *testing.T) {
</SelectObjectContentRequest>
`
completMpBody := `
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Part>
<ETag>etag</ETag>
<PartNumber>1</PartNumber>
</Part>
</CompleteMultipartUpload>
`
completMpEmptyBody := `
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></CompleteMultipartUpload>
`
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
@@ -1807,9 +1722,6 @@ func TestS3ApiController_CreateActions(t *testing.T) {
})
app.Post("/:bucket/:key/*", s3ApiController.CreateActions)
invChecksumAlgo := httptest.NewRequest(http.MethodPost, "/my-bucket/my-key", nil)
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
tests := []struct {
name string
app *fiber.App
@@ -1853,33 +1765,15 @@ func TestS3ApiController_CreateActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
{
name: "Complete-multipart-upload-empty-parts",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpEmptyBody)),
},
wantErr: false,
statusCode: 400,
},
{
name: "Complete-multipart-upload-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpBody)),
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(`<root><key>body</key></root>`)),
},
wantErr: false,
statusCode: 200,
},
{
name: "Create-multipart-upload-invalid-checksum-algorithm",
app: app,
args: args{
req: invChecksumAlgo,
},
wantErr: false,
statusCode: 400,
},
{
name: "Create-multipart-upload-success",
app: app,

View File

@@ -74,11 +74,6 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
}
// if owner is not set, set default owner to root account
if parsedAcl.Owner == "" {
parsedAcl.Owner = ctx.Locals("rootAccess").(string)
}
ctx.Locals("parsedAcl", parsedAcl)
return ctx.Next()
}

View File

@@ -1,59 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"strings"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
func IsAdmin(logger s3log.AuditLogger) fiber.Handler {
return func(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != auth.RoleAdmin {
path := ctx.Path()
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminAccessDenied),
&controllers.MetaOpts{
Logger: logger,
Action: detectAction(path),
})
}
return ctx.Next()
}
}
func detectAction(path string) (action string) {
if strings.Contains(path, "create-user") {
action = metrics.ActionAdminCreateUser
} else if strings.Contains(path, "update-user") {
action = metrics.ActionAdminUpdateUser
} else if strings.Contains(path, "delete-user") {
action = metrics.ActionAdminDeleteUser
} else if strings.Contains(path, "list-user") {
action = metrics.ActionAdminListUsers
} else if strings.Contains(path, "list-buckets") {
action = metrics.ActionAdminListBuckets
} else if strings.Contains(path, "change-bucket-owner") {
action = metrics.ActionAdminChangeBucketOwner
}
return action
}

View File

@@ -76,7 +76,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}
ctx.Locals("isRoot", authData.Access == root.Access)
ctx.Locals("rootAccess", root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
@@ -109,7 +108,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
return sendResponse(ctx, err, logger, mm)
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if utils.IsBigDataAction(ctx) {
// for streaming PUT actions, authorization is deferred
// until end of stream due to need to get length and
@@ -117,24 +115,10 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
return utils.NewAuthReader(ctx, r, authData, account.Secret, debug)
})
// wrap the io.Reader with ChunkReader if x-amz-content-sha256
// provide chunk encoding value
if utils.IsStreamingPayload(hashPayload) {
var err error
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr io.Reader
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
return cr
})
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
}
return ctx.Next()
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if !utils.IsSpecialPayload(hashPayload) {
// Calculate the hash of the request payload
hashedPayload := sha256.Sum256(ctx.Body())

View File

@@ -0,0 +1,62 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"io"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3log"
)
// ProcessChunkedBody initializes the chunked upload stream if the
// request appears to be a chunked upload
func ProcessChunkedBody(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, mm *metrics.Manager, region string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
decodedLength := ctx.Get("X-Amz-Decoded-Content-Length")
if decodedLength == "" {
return ctx.Next()
}
// TODO: validate content length
authData, err := utils.ParseAuthorization(ctx.Get("Authorization"))
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
acct := ctx.Locals("account").(auth.Account)
amzdate := ctx.Get("X-Amz-Date")
date, _ := time.Parse(iso8601Format, amzdate)
if utils.IsBigDataAction(ctx) {
var err error
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr *utils.ChunkReader
cr, err = utils.NewChunkReader(ctx, r, authData, region, acct.Secret, date)
return cr
})
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
return ctx.Next()
}
return ctx.Next()
}
}

View File

@@ -45,7 +45,7 @@ func VerifyMD5Body(logger s3log.AuditLogger) fiber.Handler {
}
sum := md5.Sum(ctx.Body())
calculatedSum := utils.Base64SumString(sum[:])
calculatedSum := utils.Md5SumString(sum[:])
if incomingSum != calculatedSum {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest), &controllers.MetaOpts{Logger: logger})

View File

@@ -43,8 +43,6 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
}
ctx.Locals("isRoot", authData.Access == root.Access)
ctx.Locals("rootAccess", root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), logger, mm)

View File

@@ -26,11 +26,12 @@ import (
func DecodeURL(logger s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
unescp, err := url.QueryUnescape(string(ctx.Request().URI().PathOriginal()))
reqURL := ctx.Request().URI().String()
decoded, err := url.Parse(reqURL)
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger, MetricsMng: mm})
}
ctx.Path(unescp)
ctx.Path(decoded.Path)
return ctx.Next()
}
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/middlewares"
"github.com/versity/versitygw/s3event"
"github.com/versity/versitygw/s3log"
)
@@ -36,22 +35,22 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
adminController := controllers.NewAdminController(iam, be, aLogger)
// CreateUser admin api
app.Patch("/create-user", middlewares.IsAdmin(logger), adminController.CreateUser)
app.Patch("/create-user", adminController.CreateUser)
// DeleteUsers admin api
app.Patch("/delete-user", middlewares.IsAdmin(logger), adminController.DeleteUser)
app.Patch("/delete-user", adminController.DeleteUser)
// UpdateUser admin api
app.Patch("update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
app.Patch("update-user", adminController.UpdateUser)
// ListUsers admin api
app.Patch("/list-users", middlewares.IsAdmin(logger), adminController.ListUsers)
app.Patch("/list-users", adminController.ListUsers)
// ChangeBucketOwner admin api
app.Patch("/change-bucket-owner", middlewares.IsAdmin(logger), adminController.ChangeBucketOwner)
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
// ListBucketsAndOwners admin api
app.Patch("/list-buckets", middlewares.IsAdmin(logger), adminController.ListBuckets)
app.Patch("/list-buckets", adminController.ListBuckets)
}
// ListBuckets action

View File

@@ -81,6 +81,7 @@ func New(
// Authentication middlewares
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, mm, region, server.debug))
app.Use(middlewares.VerifyV4Signature(root, iam, l, mm, region, server.debug))
app.Use(middlewares.ProcessChunkedBody(root, iam, l, mm, region))
app.Use(middlewares.VerifyMD5Body(l))
app.Use(middlewares.AclParser(be, l, server.readonly))

View File

@@ -56,7 +56,7 @@ func NewAuthReader(ctx *fiber.Ctx, r io.Reader, auth AuthData, secret string, de
var hr *HashReader
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if !IsSpecialPayload(hashPayload) {
hr, _ = NewHashReader(r, "", HashTypeSha256Hex)
hr, _ = NewHashReader(r, "", HashTypeSha256)
} else {
hr, _ = NewHashReader(r, "", HashTypeNone)
}
@@ -260,3 +260,19 @@ func removeSpace(str string) string {
}
return b.String()
}
var (
specialValues = map[string]bool{
"UNSIGNED-PAYLOAD": true,
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
}
)
// IsSpecialPayload checks for streaming/unsigned authorization types
func IsSpecialPayload(str string) bool {
return specialValues[str]
}

View File

@@ -15,98 +15,260 @@
package utils
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"math"
"strconv"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3err"
)
type payloadType string
// chunked uploads described in:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
const (
payloadTypeUnsigned payloadType = "UNSIGNED-PAYLOAD"
payloadTypeStreamingUnsignedTrailer payloadType = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
payloadTypeStreamingSigned payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
payloadTypeStreamingSignedTrailer payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
payloadTypeStreamingEcdsa payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
payloadTypeStreamingEcdsaTrailer payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
chunkHdrStr = ";chunk-signature="
chunkHdrDelim = "\r\n"
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
awsV4 = "AWS4"
awsS3Service = "s3"
awsV4Request = "aws4_request"
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
)
// ChunkReader reads from chunked upload request body, and returns
// object data stream
type ChunkReader struct {
r io.Reader
signingKey []byte
prevSig string
parsedSig string
currentChunkSize int64
chunkDataLeft int64
trailerExpected int
stash []byte
chunkHash hash.Hash
strToSignPrefix string
skipcheck bool
}
// NewChunkReader reads from request body io.Reader and parses out the
// chunk metadata in stream. The headers are validated for proper signatures.
// Reading from the chunk reader will read only the object data stream
// without the chunk headers/trailers.
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (*ChunkReader, error) {
return &ChunkReader{
r: r,
signingKey: getSigningKey(secret, region, date),
// the authdata.Signature is validated in the auth-reader,
// so we can use that here without any other checks
prevSig: authdata.Signature,
chunkHash: sha256.New(),
strToSignPrefix: getStringToSignPrefix(date, region),
}, nil
}
// Read satisfies the io.Reader for this type
func (cr *ChunkReader) Read(p []byte) (int, error) {
n, err := cr.r.Read(p)
if err != nil && err != io.EOF {
return n, err
}
if cr.chunkDataLeft < int64(n) {
chunkSize := cr.chunkDataLeft
if chunkSize > 0 {
cr.chunkHash.Write(p[:chunkSize])
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
n += int(chunkSize)
return n, err
}
cr.chunkDataLeft -= int64(n)
cr.chunkHash.Write(p[:n])
return n, err
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// This part is the same for all chunks,
// only the previous signature and hash of current chunk changes
func getStringToSignPrefix(date time.Time, region string) string {
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
date.Format("20060102"),
region,
awsS3Service,
awsV4Request)
return fmt.Sprintf("%s\n%s\n%s",
streamPayloadAlgo,
date.Format("20060102T150405Z"),
credentialScope)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// signature For each chunk, you calculate the signature using the following
// string to sign. For the first chunk, you use the seed-signature as the
// previous signature.
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
return fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
}
// The provided p should have all of the previous chunk data and trailer
// consumed already. The positioning here is expected that p[0] starts the
// new chunk size with the ";chunk-signature=" following. The only exception
// is if we started consuming the trailer, but hit the end of the read buffer.
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
// finish consuming the final trailer bytes.
// This parses the chunk metadata in situ without allocating an extra buffer.
// It will just read and validate the chunk metadata and then move the
// following chunk data to overwrite the metadata in the provided buffer.
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
n := len(p)
if !cr.skipcheck && cr.parsedSig != "" {
chunkhash := cr.chunkHash.Sum(nil)
cr.chunkHash.Reset()
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
}
if cr.trailerExpected != 0 {
if len(p) < len(chunkHdrDelim) {
// This is the special case where we need to consume the
// trailer, but instead hit the end of the buffer. The
// subsequent call will finish consuming the trailer.
cr.chunkDataLeft = 0
cr.trailerExpected -= len(p)
cr.skipcheck = true
return 0, nil
}
// move data up to remove trailer
copy(p, p[cr.trailerExpected:])
n -= cr.trailerExpected
}
cr.skipcheck = false
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
cr.currentChunkSize = chunkSize
cr.parsedSig = sig
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
return 0, err
}
if chunkSize == 0 {
return 0, io.EOF
}
cr.trailerExpected = len(chunkHdrDelim)
// move data up to remove chunk header
copy(p, p[bufOffset:n])
n -= bufOffset
// if remaining buffer larger than chunk data,
// parse next header in buffer
if int64(n) > chunkSize {
cr.chunkDataLeft = 0
cr.chunkHash.Write(p[:chunkSize])
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
}
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
return n, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
// Task 3: Calculate Signature
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
func getSigningKey(secret, region string, date time.Time) []byte {
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
return signingKey
}
func hmac256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
var (
specialValues = map[payloadType]bool{
payloadTypeUnsigned: true,
payloadTypeStreamingUnsignedTrailer: true,
payloadTypeStreamingSigned: true,
payloadTypeStreamingSignedTrailer: true,
payloadTypeStreamingEcdsa: true,
payloadTypeStreamingEcdsaTrailer: true,
}
errInvalidChunkFormat = errors.New("invalid chunk header format")
errskipHeader = errors.New("skip to next header")
)
func (pt payloadType) isValid() bool {
return pt == payloadTypeUnsigned ||
pt == payloadTypeStreamingUnsignedTrailer ||
pt == payloadTypeStreamingSigned ||
pt == payloadTypeStreamingSignedTrailer ||
pt == payloadTypeStreamingEcdsa ||
pt == payloadTypeStreamingEcdsaTrailer
}
type checksumType string
const (
checksumTypeCrc32 checksumType = "x-amz-checksum-crc32"
checksumTypeCrc32c checksumType = "x-amz-checksum-crc32c"
checksumTypeSha1 checksumType = "x-amz-checksum-sha1"
checksumTypeSha256 checksumType = "x-amz-checksum-sha256"
checksumTypeCrc64nvme checksumType = "x-amz-checksum-crc64nvme"
maxHeaderSize = 1024
)
func (c checksumType) isValid() bool {
return c == checksumTypeCrc32 ||
c == checksumTypeCrc32c ||
c == checksumTypeSha1 ||
c == checksumTypeSha256 ||
c == checksumTypeCrc64nvme
}
// IsSpecialPayload checks for special authorization types
func IsSpecialPayload(str string) bool {
return specialValues[payloadType(str)]
}
// IsChunkEncoding checks for streaming/unsigned authorization types
func IsStreamingPayload(str string) bool {
pt := payloadType(str)
return pt == payloadTypeStreamingUnsignedTrailer ||
pt == payloadTypeStreamingSigned ||
pt == payloadTypeStreamingSignedTrailer
}
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
decContLength := ctx.Get("X-Amz-Decoded-Content-Length")
if decContLength == "" {
return nil, s3err.GetAPIError(s3err.ErrMissingDecodedContentLength)
}
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
if !contentSha256.isValid() {
//TODO: Add proper APIError
return nil, fmt.Errorf("invalid x-amz-content-sha256: %v", string(contentSha256))
// Theis returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
stashLen := len(cr.stash)
if cr.stash != nil {
tmp := make([]byte, maxHeaderSize)
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
header = tmp
cr.stash = nil
}
checksumType := checksumType(ctx.Get("X-Amz-Trailer"))
if checksumType != "" && !checksumType.isValid() {
//TODO: Add proper APIError
return nil, fmt.Errorf("invalid X-Amz-Trailer: %v", checksumType)
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
if semicolonIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
switch contentSha256 {
case payloadTypeStreamingUnsignedTrailer:
return NewUnsignedChunkReader(r, checksumType)
//TODO: Add other chunk readers
sigIndex := semicolonIndex + len(chunkHdrStr)
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
if sigEndIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
return NewSignedChunkReader(r, authdata, region, secret, date)
chunkSizeBytes := header[:semicolonIndex]
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
if err != nil {
return 0, "", 0, errInvalidChunkFormat
}
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
return chunkSize, signature, dataStartOffset - stashLen, nil
}

View File

@@ -16,19 +16,13 @@ package utils
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"hash/crc32"
"hash/crc64"
"io"
"math/bits"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
)
@@ -37,21 +31,11 @@ type HashType string
const (
// HashTypeMd5 generates MD5 checksum for the data stream
HashTypeMd5 HashType = "md5"
// HashTypeSha256 generates SHA256 Base64-Encoded checksum for the data stream
HashTypeSha256 HashType = "sha256"
// HashTypeSha256Hex generates SHA256 hex encoded checksum for the data stream
HashTypeSha256Hex HashType = "sha256-hex"
// HashTypeSha1 generates SHA1 Base64-Encoded checksum for the data stream
HashTypeSha1 HashType = "sha1"
// HashTypeCRC32 generates CRC32 Base64-Encoded checksum for the data stream
HashTypeCRC32 HashType = "crc32"
// HashTypeCRC32C generates CRC32C Base64-Encoded checksum for the data stream
HashTypeCRC32C HashType = "crc32c"
// HashTypeCRC64NVME generates CRC64NVME Base64-Encoded checksum for the data stream
HashTypeCRC64NVME HashType = "crc64nvme"
HashTypeMd5 = "md5"
// HashTypeSha256 generates SHA256 checksum for the data stream
HashTypeSha256 = "sha256"
// HashTypeNone is a no-op checksum for the data stream
HashTypeNone HashType = "none"
HashTypeNone = "none"
)
// HashReader is an io.Reader that calculates the checksum
@@ -78,18 +62,8 @@ func NewHashReader(r io.Reader, expectedSum string, ht HashType) (*HashReader, e
switch ht {
case HashTypeMd5:
hash = md5.New()
case HashTypeSha256Hex:
hash = sha256.New()
case HashTypeSha256:
hash = sha256.New()
case HashTypeSha1:
hash = sha1.New()
case HashTypeCRC32:
hash = crc32.NewIEEE()
case HashTypeCRC32C:
hash = crc32.New(crc32.MakeTable(crc32.Castagnoli))
case HashTypeCRC64NVME:
hash = crc64.New(crc64.MakeTable(bits.Reverse64(0xad93d23594c93659)))
case HashTypeNone:
hash = noop{}
default:
@@ -114,40 +88,15 @@ func (hr *HashReader) Read(p []byte) (int, error) {
if errors.Is(readerr, io.EOF) && hr.sum != "" {
switch hr.hashType {
case HashTypeMd5:
sum := hr.Sum()
sum := base64.StdEncoding.EncodeToString(hr.hash.Sum(nil))
if sum != hr.sum {
return n, s3err.GetAPIError(s3err.ErrInvalidDigest)
}
case HashTypeSha256Hex:
sum := hr.Sum()
case HashTypeSha256:
sum := hex.EncodeToString(hr.hash.Sum(nil))
if sum != hr.sum {
return n, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
}
case HashTypeCRC32:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32)
}
case HashTypeCRC32C:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32c)
}
case HashTypeSha1:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha1)
}
case HashTypeSha256:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha256)
}
case HashTypeCRC64NVME:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc64nvme)
}
default:
return n, errInvalidHashType
}
@@ -155,38 +104,20 @@ func (hr *HashReader) Read(p []byte) (int, error) {
return n, readerr
}
func (hr *HashReader) SetReader(r io.Reader) {
hr.r = r
}
// Sum returns the checksum hash of the data read so far
func (hr *HashReader) Sum() string {
switch hr.hashType {
case HashTypeMd5:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeSha256Hex:
return hex.EncodeToString(hr.hash.Sum(nil))
case HashTypeCRC32:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeCRC32C:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeSha1:
return Base64SumString(hr.hash.Sum(nil))
return Md5SumString(hr.hash.Sum(nil))
case HashTypeSha256:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeCRC64NVME:
return Base64SumString(hr.hash.Sum(nil))
return hex.EncodeToString(hr.hash.Sum(nil))
default:
return ""
}
}
func (hr *HashReader) Type() HashType {
return hr.hashType
}
// Md5SumString converts the hash bytes to the string checksum value
func Base64SumString(b []byte) string {
func Md5SumString(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
@@ -197,59 +128,3 @@ func (n noop) Sum(b []byte) []byte { return []byte{} }
func (n noop) Reset() {}
func (n noop) Size() int { return 0 }
func (n noop) BlockSize() int { return 1 }
// NewCompositeChecksumReader initializes a composite checksum
// processor, which decodes and validates the provided
// checksums and returns the final checksum based on
// the previous processings.
//
// The supported checksum types are:
// - CRC32
// - CRC32C
// - SHA1
// - SHA256
func NewCompositeChecksumReader(ht HashType) (*CompositeChecksumReader, error) {
var hasher hash.Hash
switch ht {
case HashTypeSha256:
hasher = sha256.New()
case HashTypeSha1:
hasher = sha1.New()
case HashTypeCRC32:
hasher = crc32.NewIEEE()
case HashTypeCRC32C:
hasher = crc32.New(crc32.MakeTable(crc32.Castagnoli))
case HashTypeNone:
hasher = noop{}
default:
return nil, errInvalidHashType
}
return &CompositeChecksumReader{
hasher: hasher,
}, nil
}
type CompositeChecksumReader struct {
hasher hash.Hash
}
// Decodes and writes the checksum in the hasher
func (ccr *CompositeChecksumReader) Process(checksum string) error {
data, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
return fmt.Errorf("base64 decode: %w", err)
}
_, err = ccr.hasher.Write(data)
if err != nil {
return fmt.Errorf("hash write: %w", err)
}
return nil
}
// Returns the base64 encoded composite checksum
func (ccr *CompositeChecksumReader) Sum() string {
return Base64SumString(ccr.hasher.Sum(nil))
}

View File

@@ -1,276 +0,0 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"math"
"strconv"
"time"
"github.com/versity/versitygw/s3err"
)
// chunked uploads described in:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
const (
chunkHdrStr = ";chunk-signature="
chunkHdrDelim = "\r\n"
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
awsV4 = "AWS4"
awsS3Service = "s3"
awsV4Request = "aws4_request"
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
)
// ChunkReader reads from chunked upload request body, and returns
// object data stream
type ChunkReader struct {
r io.Reader
signingKey []byte
prevSig string
parsedSig string
currentChunkSize int64
chunkDataLeft int64
trailerExpected int
stash []byte
chunkHash hash.Hash
strToSignPrefix string
skipcheck bool
}
// NewChunkReader reads from request body io.Reader and parses out the
// chunk metadata in stream. The headers are validated for proper signatures.
// Reading from the chunk reader will read only the object data stream
// without the chunk headers/trailers.
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
return &ChunkReader{
r: r,
signingKey: getSigningKey(secret, region, date),
// the authdata.Signature is validated in the auth-reader,
// so we can use that here without any other checks
prevSig: authdata.Signature,
chunkHash: sha256.New(),
strToSignPrefix: getStringToSignPrefix(date, region),
}, nil
}
// Read satisfies the io.Reader for this type
func (cr *ChunkReader) Read(p []byte) (int, error) {
n, err := cr.r.Read(p)
if err != nil && err != io.EOF {
return 0, err
}
if cr.chunkDataLeft < int64(n) {
chunkSize := cr.chunkDataLeft
if chunkSize > 0 {
cr.chunkHash.Write(p[:chunkSize])
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
n += int(chunkSize)
return n, err
}
cr.chunkDataLeft -= int64(n)
cr.chunkHash.Write(p[:n])
return n, err
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// This part is the same for all chunks,
// only the previous signature and hash of current chunk changes
func getStringToSignPrefix(date time.Time, region string) string {
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
date.Format("20060102"),
region,
awsS3Service,
awsV4Request)
return fmt.Sprintf("%s\n%s\n%s",
streamPayloadAlgo,
date.Format("20060102T150405Z"),
credentialScope)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// signature For each chunk, you calculate the signature using the following
// string to sign. For the first chunk, you use the seed-signature as the
// previous signature.
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
return fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
}
// The provided p should have all of the previous chunk data and trailer
// consumed already. The positioning here is expected that p[0] starts the
// new chunk size with the ";chunk-signature=" following. The only exception
// is if we started consuming the trailer, but hit the end of the read buffer.
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
// finish consuming the final trailer bytes.
// This parses the chunk metadata in situ without allocating an extra buffer.
// It will just read and validate the chunk metadata and then move the
// following chunk data to overwrite the metadata in the provided buffer.
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
n := len(p)
if !cr.skipcheck && cr.parsedSig != "" {
chunkhash := cr.chunkHash.Sum(nil)
cr.chunkHash.Reset()
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
}
if cr.trailerExpected != 0 {
if len(p) < len(chunkHdrDelim) {
// This is the special case where we need to consume the
// trailer, but instead hit the end of the buffer. The
// subsequent call will finish consuming the trailer.
cr.chunkDataLeft = 0
cr.trailerExpected -= len(p)
cr.skipcheck = true
return 0, nil
}
// move data up to remove trailer
copy(p, p[cr.trailerExpected:])
n -= cr.trailerExpected
}
cr.skipcheck = false
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
cr.currentChunkSize = chunkSize
cr.parsedSig = sig
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
return 0, err
}
if chunkSize == 0 {
return 0, io.EOF
}
cr.trailerExpected = len(chunkHdrDelim)
// move data up to remove chunk header
copy(p, p[bufOffset:n])
n -= bufOffset
// if remaining buffer larger than chunk data,
// parse next header in buffer
if int64(n) > chunkSize {
cr.chunkDataLeft = 0
cr.chunkHash.Write(p[:chunkSize])
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
}
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
return n, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
// Task 3: Calculate Signature
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
func getSigningKey(secret, region string, date time.Time) []byte {
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
return signingKey
}
func hmac256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
var (
errInvalidChunkFormat = errors.New("invalid chunk header format")
errskipHeader = errors.New("skip to next header")
)
const (
maxHeaderSize = 1024
)
// This returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
stashLen := len(cr.stash)
if stashLen > maxHeaderSize {
return 0, "", 0, errInvalidChunkFormat
}
if cr.stash != nil {
tmp := make([]byte, maxHeaderSize)
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
header = tmp
cr.stash = nil
}
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
if semicolonIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
sigIndex := semicolonIndex + len(chunkHdrStr)
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
if sigEndIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
chunkSizeBytes := header[:semicolonIndex]
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
if err != nil {
return 0, "", 0, errInvalidChunkFormat
}
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
return chunkSize, signature, dataStartOffset - stashLen, nil
}

View File

@@ -1,235 +0,0 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bufio"
"bytes"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"hash"
"hash/crc32"
"hash/crc64"
"io"
"math/bits"
"strconv"
"strings"
)
var (
trailerDelim = []byte{'\n', '\r', '\n'}
errMalformedEncoding = errors.New("malformed chunk encoding")
)
type UnsignedChunkReader struct {
reader *bufio.Reader
checksumType checksumType
expectedChecksum string
hasher hash.Hash
stash []byte
chunkCounter int
offset int
}
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
hasher, err := getHasher(ct)
if err != nil {
return nil, err
}
return &UnsignedChunkReader{
reader: bufio.NewReader(r),
checksumType: ct,
stash: make([]byte, 0),
hasher: hasher,
chunkCounter: 1,
}, nil
}
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// First read any stashed data
if len(ucr.stash) != 0 {
n := copy(p, ucr.stash)
ucr.offset += n
if n < len(ucr.stash) {
ucr.stash = ucr.stash[n:]
ucr.offset = 0
return n, nil
}
}
for {
// Read the chunk size
chunkSize, err := ucr.extractChunkSize()
if err != nil {
return 0, err
}
if chunkSize == 0 {
// Stop reading parsing payloads as 0 sized chunk is reached
break
}
rdr := io.TeeReader(ucr.reader, ucr.hasher)
payload := make([]byte, chunkSize)
// Read and cache the payload
_, err = io.ReadFull(rdr, payload)
if err != nil {
return 0, err
}
// Skip the trailing "\r\n"
if err := ucr.readAndSkip('\r', '\n'); err != nil {
return 0, err
}
// Copy the payload into the io.Reader buffer
n := copy(p[ucr.offset:], payload)
ucr.offset += n
ucr.chunkCounter++
if int64(n) < chunkSize {
// stash the remaining data
ucr.stash = payload[n:]
dataRead := ucr.offset
ucr.offset = 0
return dataRead, nil
}
}
// Read and validate trailers
if err := ucr.readTrailer(); err != nil {
return 0, err
}
return ucr.offset, io.EOF
}
// Reads and validates the bytes provided from the underlying io.Reader
func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
for _, d := range data {
b, err := ucr.reader.ReadByte()
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if b != d {
return errMalformedEncoding
}
}
return nil
}
// Extracts the chunk size from the payload
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
line, err := ucr.reader.ReadString('\n')
if err != nil {
return 0, errMalformedEncoding
}
line = strings.TrimSpace(line)
chunkSize, err := strconv.ParseInt(line, 16, 64)
if err != nil {
return 0, errMalformedEncoding
}
return chunkSize, nil
}
// Reads and validates the trailer at the end
func (ucr *UnsignedChunkReader) readTrailer() error {
var trailerBuffer bytes.Buffer
for {
v, err := ucr.reader.ReadByte()
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if v != '\r' {
trailerBuffer.WriteByte(v)
continue
}
var tmp [3]byte
_, err = io.ReadFull(ucr.reader, tmp[:])
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if !bytes.Equal(tmp[:], trailerDelim) {
return errMalformedEncoding
}
break
}
// Parse the trailer
trailerHeader := trailerBuffer.String()
trailerHeader = strings.TrimSpace(trailerHeader)
trailerHeaderParts := strings.Split(trailerHeader, ":")
if len(trailerHeaderParts) != 2 {
return errMalformedEncoding
}
if trailerHeaderParts[0] != string(ucr.checksumType) {
//TODO: handle the error
return errMalformedEncoding
}
ucr.expectedChecksum = trailerHeaderParts[1]
// Validate checksum
return ucr.validateChecksum()
}
// Validates the trailing checksum sent at the end
func (ucr *UnsignedChunkReader) validateChecksum() error {
csum := ucr.hasher.Sum(nil)
checksum := base64.StdEncoding.EncodeToString(csum)
if checksum != ucr.expectedChecksum {
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
}
return nil
}
// Retruns the hash calculator based on the hash type provided
func getHasher(ct checksumType) (hash.Hash, error) {
switch ct {
case checksumTypeCrc32:
return crc32.NewIEEE(), nil
case checksumTypeCrc32c:
return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
case checksumTypeCrc64nvme:
table := crc64.MakeTable(bits.Reverse64(0xad93d23594c93659))
return crc64.New(table), nil
case checksumTypeSha1:
return sha1.New(), nil
case checksumTypeSha256:
return sha256.New(), nil
default:
return nil, errors.New("unsupported checksum type")
}
}

View File

@@ -16,7 +16,6 @@ package utils
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -40,10 +39,6 @@ var (
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
)
const (
upperhex = "0123456789ABCDEF"
)
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
metadata = make(map[string]string)
headers.DisableNormalizing()
@@ -69,9 +64,7 @@ func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength
body = bytes.NewReader(req.Body())
}
escapedURI := escapeOriginalURI(ctx)
httpReq, err := http.NewRequest(string(req.Header.Method()), escapedURI, body)
httpReq, err := http.NewRequest(string(req.Header.Method()), string(ctx.Context().RequestURI()), body)
if err != nil {
return nil, errors.New("error in creating an http request")
}
@@ -179,15 +172,9 @@ func ParseUint(str string) (int32, error) {
if str == "" {
return 1000, nil
}
num, err := strconv.ParseInt(str, 10, 32)
num, err := strconv.ParseUint(str, 10, 16)
if err != nil {
return 1000, fmt.Errorf("invalid int: %w", err)
}
if num < 0 {
return 1000, fmt.Errorf("negative uint: %v", num)
}
if num > 1000 {
num = 1000
return 1000, s3err.GetAPIError(s3err.ErrInvalidMaxKeys)
}
return int32(num), nil
}
@@ -203,13 +190,6 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
}
}
// Streams the response body by chunks
func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
// SetBodyStream will call Close() on the reader when the stream is done
// since rdr is a ReadCloser
ctx.Context().SetBodyStream(rdr, bodysize)
}
func IsValidBucketName(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
return false
@@ -268,53 +248,35 @@ func ParseDeleteObjects(objs []types.ObjectIdentifier) (result []string) {
return
}
func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, output s3response.GetObjectAttributesResponse) s3response.GetObjectAttributesResponse {
// These properties shouldn't appear in the final response body
output.LastModified = nil
output.VersionId = nil
output.DeleteMarker = nil
if _, ok := attrs[s3response.ObjectAttributesEtag]; !ok {
func FilterObjectAttributes(attrs map[types.ObjectAttributes]struct{}, output s3response.GetObjectAttributesResult) s3response.GetObjectAttributesResult {
if _, ok := attrs[types.ObjectAttributesEtag]; !ok {
output.ETag = nil
}
if _, ok := attrs[s3response.ObjectAttributesObjectParts]; !ok {
if _, ok := attrs[types.ObjectAttributesObjectParts]; !ok {
output.ObjectParts = nil
}
if _, ok := attrs[s3response.ObjectAttributesObjectSize]; !ok {
if _, ok := attrs[types.ObjectAttributesObjectSize]; !ok {
output.ObjectSize = nil
}
if _, ok := attrs[s3response.ObjectAttributesStorageClass]; !ok {
if _, ok := attrs[types.ObjectAttributesStorageClass]; !ok {
output.StorageClass = ""
}
if _, ok := attrs[s3response.ObjectAttributesChecksum]; !ok {
output.Checksum = nil
}
return output
}
func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]struct{}, error) {
attrs := map[s3response.ObjectAttributes]struct{}{}
var err error
func ParseObjectAttributes(ctx *fiber.Ctx) map[types.ObjectAttributes]struct{} {
attrs := map[types.ObjectAttributes]struct{}{}
ctx.Request().Header.VisitAll(func(key, value []byte) {
if string(key) == "X-Amz-Object-Attributes" {
oattrs := strings.Split(string(value), ",")
for _, a := range oattrs {
attr := s3response.ObjectAttributes(a)
if !attr.IsValid() {
err = s3err.GetAPIError(s3err.ErrInvalidObjectAttributes)
break
}
attrs[attr] = struct{}{}
attrs[types.ObjectAttributes(a)] = struct{}{}
}
}
})
if len(attrs) == 0 {
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
}
return attrs, err
return attrs
}
type objLockCfg struct {
@@ -377,257 +339,3 @@ func IsValidOwnership(val types.ObjectOwnership) bool {
return false
}
}
func escapeOriginalURI(ctx *fiber.Ctx) string {
path := ctx.Path()
// Escape the URI original path
escapedURI := escapePath(path)
// Add the URI query params
query := string(ctx.Request().URI().QueryArgs().QueryString())
if query != "" {
escapedURI = escapedURI + "?" + query
}
return escapedURI
}
// Escapes the path string
// Most of the parts copied from std url
func escapePath(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case shouldEscape(c):
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
// Checks if the character needs to be escaped
func shouldEscape(c byte) bool {
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '.', '~', '/':
return false
}
return true
}
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, map[types.ChecksumAlgorithm]string, error) {
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
err := IsChecksumAlgorithmValid(sdkAlgorithm)
if err != nil {
return "", nil, err
}
checksums := map[types.ChecksumAlgorithm]string{}
var hdrErr error
// Parse and validate checksum headers
ctx.Request().Header.VisitAll(func(key, value []byte) {
// Skip `X-Amz-Checksum-Type` as it's a special header
if hdrErr != nil || !strings.HasPrefix(string(key), "X-Amz-Checksum-") || string(key) == "X-Amz-Checksum-Type" {
return
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
err := IsChecksumAlgorithmValid(algo)
if err != nil {
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
return
}
checksums[algo] = string(value)
})
if hdrErr != nil {
return sdkAlgorithm, nil, hdrErr
}
headerCtr := 0
for al, val := range checksums {
if val != "" && !IsValidChecksum(val, al) {
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
}
// If any other checksum value is provided,
// rather than x-amz-sdk-checksum-algorithm
if sdkAlgorithm != "" && sdkAlgorithm != al && val != "" {
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
if val != "" {
sdkAlgorithm = al
headerCtr++
}
if headerCtr > 1 {
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
}
return sdkAlgorithm, checksums, nil
}
var checksumLengths = map[types.ChecksumAlgorithm]int{
types.ChecksumAlgorithmCrc32: 4,
types.ChecksumAlgorithmCrc32c: 4,
types.ChecksumAlgorithmCrc64nvme: 8,
types.ChecksumAlgorithmSha1: 20,
types.ChecksumAlgorithmSha256: 32,
}
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
decoded, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
return false
}
expectedLength, exists := checksumLengths[algorithm]
if !exists {
return false
}
return len(decoded) == expectedLength
}
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
alg = types.ChecksumAlgorithm(strings.ToUpper(string(alg)))
if alg != "" &&
alg != types.ChecksumAlgorithmCrc32 &&
alg != types.ChecksumAlgorithmCrc32c &&
alg != types.ChecksumAlgorithmSha1 &&
alg != types.ChecksumAlgorithmSha256 &&
alg != types.ChecksumAlgorithmCrc64nvme {
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
}
return nil
}
// Validates the provided checksum type
func IsChecksumTypeValid(t types.ChecksumType) error {
if t != "" &&
t != types.ChecksumTypeComposite &&
t != types.ChecksumTypeFullObject {
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
}
return nil
}
type checksumTypeSchema map[types.ChecksumType]struct{}
type checksumSchema map[types.ChecksumAlgorithm]checksumTypeSchema
// A table defining the checksum algorithm/type support
var checksumMap checksumSchema = checksumSchema{
types.ChecksumAlgorithmCrc32: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmCrc32c: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmSha1: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmSha256: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmCrc64nvme: checksumTypeSchema{
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
// Both could be empty
"": checksumTypeSchema{
"": struct{}{},
},
}
// Checks if checksum type and algorithm are supported together
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType) error {
typeSchema := checksumMap[algo]
_, ok := typeSchema[t]
if !ok {
return s3err.GetChecksumSchemaMismatchErr(algo, t)
}
return nil
}
// Parses and validates the x-amz-checksum-algorithm and x-amz-checksum-type headers
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, types.ChecksumType, error) {
algo := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
if err := IsChecksumAlgorithmValid(algo); err != nil {
return "", "", err
}
chType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
if err := IsChecksumTypeValid(chType); err != nil {
return "", "", err
}
// Verify if checksum algorithm is provided, if
// checksum type is specified
if chType != "" && algo == "" {
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
}
// Verify if the checksum type is supported for
// the provided checksum algorithm
if err := checkChecksumTypeAndAlgo(algo, chType); err != nil {
return algo, chType, err
}
// x-amz-checksum-type defaults to COMPOSITE
// if x-amz-checksum-algorithm is set except
// for the CRC64NVME algorithm: it defaults to FULL_OBJECT
if algo != "" && chType == "" {
if algo == types.ChecksumAlgorithmCrc64nvme {
chType = types.ChecksumTypeFullObject
} else {
chType = types.ChecksumTypeComposite
}
}
return algo, chType, nil
}

View File

@@ -19,12 +19,10 @@ import (
"net/http"
"reflect"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3response"
)
@@ -268,14 +266,6 @@ func TestParseUint(t *testing.T) {
want: 23,
wantErr: false,
},
{
name: "Parse-uint-greater-than-1000",
args: args{
str: "25000000",
},
want: 1000,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -293,68 +283,47 @@ func TestParseUint(t *testing.T) {
func TestFilterObjectAttributes(t *testing.T) {
type args struct {
attrs map[s3response.ObjectAttributes]struct{}
output s3response.GetObjectAttributesResponse
attrs map[types.ObjectAttributes]struct{}
output s3response.GetObjectAttributesResult
}
etag, objSize := "etag", int64(3222)
delMarker := true
tests := []struct {
name string
args args
want s3response.GetObjectAttributesResponse
want s3response.GetObjectAttributesResult
}{
{
name: "keep only ETag",
args: args{
attrs: map[s3response.ObjectAttributes]struct{}{
s3response.ObjectAttributesEtag: {},
attrs: map[types.ObjectAttributes]struct{}{
types.ObjectAttributesEtag: {},
},
output: s3response.GetObjectAttributesResponse{
output: s3response.GetObjectAttributesResult{
ObjectSize: &objSize,
ETag: &etag,
},
},
want: s3response.GetObjectAttributesResponse{ETag: &etag},
want: s3response.GetObjectAttributesResult{ETag: &etag},
},
{
name: "keep multiple props",
args: args{
attrs: map[s3response.ObjectAttributes]struct{}{
s3response.ObjectAttributesEtag: {},
s3response.ObjectAttributesObjectSize: {},
s3response.ObjectAttributesStorageClass: {},
attrs: map[types.ObjectAttributes]struct{}{
types.ObjectAttributesEtag: {},
types.ObjectAttributesObjectSize: {},
types.ObjectAttributesStorageClass: {},
},
output: s3response.GetObjectAttributesResponse{
output: s3response.GetObjectAttributesResult{
ObjectSize: &objSize,
ETag: &etag,
ObjectParts: &s3response.ObjectParts{},
VersionId: &etag,
},
},
want: s3response.GetObjectAttributesResponse{
want: s3response.GetObjectAttributesResult{
ETag: &etag,
ObjectSize: &objSize,
},
},
{
name: "make sure LastModified, DeleteMarker and VersionId are removed",
args: args{
attrs: map[s3response.ObjectAttributes]struct{}{
s3response.ObjectAttributesEtag: {},
},
output: s3response.GetObjectAttributesResponse{
ObjectSize: &objSize,
ETag: &etag,
ObjectParts: &s3response.ObjectParts{},
VersionId: &etag,
LastModified: backend.GetTimePtr(time.Now()),
DeleteMarker: &delMarker,
},
},
want: s3response.GetObjectAttributesResponse{
ETag: &etag,
VersionId: &etag,
},
},
}
@@ -413,447 +382,3 @@ func TestIsValidOwnership(t *testing.T) {
})
}
}
func Test_shouldEscape(t *testing.T) {
type args struct {
c byte
}
tests := []struct {
name string
args args
want bool
}{
{
name: "shouldn't-escape-alphanum",
args: args{
c: 'h',
},
want: false,
},
{
name: "shouldn't-escape-unreserved-char",
args: args{
c: '_',
},
want: false,
},
{
name: "shouldn't-escape-unreserved-number",
args: args{
c: '0',
},
want: false,
},
{
name: "shouldn't-escape-path-separator",
args: args{
c: '/',
},
want: false,
},
{
name: "should-escape-special-char-1",
args: args{
c: '&',
},
want: true,
},
{
name: "should-escape-special-char-2",
args: args{
c: '*',
},
want: true,
},
{
name: "should-escape-special-char-3",
args: args{
c: '(',
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := shouldEscape(tt.args.c); got != tt.want {
t.Errorf("shouldEscape() = %v, want %v", got, tt.want)
}
})
}
}
func Test_escapePath(t *testing.T) {
type args struct {
s string
}
tests := []struct {
name string
args args
want string
}{
{
name: "empty-string",
args: args{
s: "",
},
want: "",
},
{
name: "alphanum-path",
args: args{
s: "/test-bucket/test-key",
},
want: "/test-bucket/test-key",
},
{
name: "path-with-unescapable-chars",
args: args{
s: "/test~bucket/test.key",
},
want: "/test~bucket/test.key",
},
{
name: "path-with-escapable-chars",
args: args{
s: "/bucket-*(/test=key&",
},
want: "/bucket-%2A%28/test%3Dkey%26",
},
{
name: "path-with-space",
args: args{
s: "/test-bucket/my key",
},
want: "/test-bucket/my%20key",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := escapePath(tt.args.s); got != tt.want {
t.Errorf("escapePath() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsChecksumAlgorithmValid(t *testing.T) {
type args struct {
alg types.ChecksumAlgorithm
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "empty",
args: args{
alg: "",
},
wantErr: false,
},
{
name: "crc32",
args: args{
alg: types.ChecksumAlgorithmCrc32,
},
wantErr: false,
},
{
name: "crc32c",
args: args{
alg: types.ChecksumAlgorithmCrc32c,
},
wantErr: false,
},
{
name: "sha1",
args: args{
alg: types.ChecksumAlgorithmSha1,
},
wantErr: false,
},
{
name: "sha256",
args: args{
alg: types.ChecksumAlgorithmSha256,
},
wantErr: false,
},
{
name: "crc64nvme",
args: args{
alg: types.ChecksumAlgorithmCrc64nvme,
},
wantErr: false,
},
{
name: "invalid",
args: args{
alg: types.ChecksumAlgorithm("invalid_checksum_algorithm"),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumAlgorithmValid(tt.args.alg); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumAlgorithmValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestIsValidChecksum(t *testing.T) {
type args struct {
checksum string
algorithm types.ChecksumAlgorithm
}
tests := []struct {
name string
args args
want bool
}{
{
name: "invalid-base64",
args: args{
checksum: "invalid_base64_string",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: false,
},
{
name: "invalid-crc32",
args: args{
checksum: "YXNkZmFzZGZhc2Rm",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: false,
},
{
name: "valid-crc32",
args: args{
checksum: "ww2FVQ==",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: true,
},
{
name: "invalid-crc32c",
args: args{
checksum: "Zmdoa2doZmtnZmhr",
algorithm: types.ChecksumAlgorithmCrc32c,
},
want: false,
},
{
name: "valid-crc32c",
args: args{
checksum: "DOsb4w==",
algorithm: types.ChecksumAlgorithmCrc32c,
},
want: true,
},
{
name: "invalid-sha1",
args: args{
checksum: "YXNkZmFzZGZhc2RmYXNkZnNhZGZzYWRm",
algorithm: types.ChecksumAlgorithmSha1,
},
want: false,
},
{
name: "valid-sha1",
args: args{
checksum: "L4q6V59Zcwn12wyLIytoE2c1ugk=",
algorithm: types.ChecksumAlgorithmSha1,
},
want: true,
},
{
name: "invalid-sha256",
args: args{
checksum: "Zmdoa2doZmtnZmhrYXNkZmFzZGZhc2RmZHNmYXNkZg==",
algorithm: types.ChecksumAlgorithmSha256,
},
want: false,
},
{
name: "valid-sha256",
args: args{
checksum: "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=",
algorithm: types.ChecksumAlgorithmSha256,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm); got != tt.want {
t.Errorf("IsValidChecksum() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsChecksumTypeValid(t *testing.T) {
type args struct {
t types.ChecksumType
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "valid_FULL_OBJECT",
args: args{
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "valid_COMPOSITE",
args: args{
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "invalid",
args: args{
t: types.ChecksumType("invalid_checksum_type"),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumTypeValid(tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumTypeValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_checkChecksumTypeAndAlgo(t *testing.T) {
type args struct {
algo types.ChecksumAlgorithm
t types.ChecksumType
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "full_object-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-crc32c",
args: args{
algo: types.ChecksumAlgorithmCrc32c,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-sha1",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
{
name: "full_object-sha256",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
{
name: "full_object-crc64nvme",
args: args{
algo: types.ChecksumAlgorithmCrc64nvme,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "composite-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-crc32c",
args: args{
algo: types.ChecksumAlgorithmCrc32c,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-sha1",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-sha256",
args: args{
algo: types.ChecksumAlgorithmSha256,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-crc64nvme",
args: args{
algo: types.ChecksumAlgorithmCrc64nvme,
t: types.ChecksumTypeComposite,
},
wantErr: true,
},
{
name: "composite-empty",
args: args{
t: types.ChecksumTypeComposite,
},
wantErr: true,
},
{
name: "full_object-empty",
args: args{
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("checkChecksumTypeAndAlgo() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -17,11 +17,7 @@ package s3err
import (
"bytes"
"encoding/xml"
"fmt"
"net/http"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
// APIError structure
@@ -61,7 +57,6 @@ const (
ErrAccessDenied
ErrMethodNotAllowed
ErrBucketNotEmpty
ErrVersionedBucketNotEmpty
ErrBucketAlreadyExists
ErrBucketAlreadyOwnedByYou
ErrNoSuchBucket
@@ -70,20 +65,14 @@ const (
ErrInvalidBucketName
ErrInvalidDigest
ErrInvalidMaxKeys
ErrInvalidMaxBuckets
ErrInvalidMaxUploads
ErrInvalidMaxParts
ErrInvalidPartNumberMarker
ErrInvalidObjectAttributes
ErrInvalidPart
ErrEmptyParts
ErrInvalidPartNumber
ErrInvalidPartOrder
ErrInvalidCompleteMpPartNumber
ErrInternalError
ErrInvalidCopyDest
ErrInvalidCopySource
ErrInvalidCopySourceRange
ErrInvalidTag
ErrAuthHeaderEmpty
ErrSignatureVersionNotSupported
@@ -113,7 +102,6 @@ const (
ErrSignatureTerminationStr
ErrSignatureIncorrService
ErrContentSHA256Mismatch
ErrMissingDecodedContentLength
ErrInvalidAccessKeyID
ErrRequestNotReadyYet
ErrMissingDateHeader
@@ -134,7 +122,6 @@ const (
ErrNoSuchBucketPolicy
ErrBucketTaggingNotFound
ErrObjectLockInvalidHeaders
ErrObjectAttributesInvalidHeader
ErrRequestTimeTooSkewed
ErrInvalidBucketAclWithObjectOwnership
ErrBothCannedAndHeaderGrants
@@ -147,28 +134,12 @@ const (
ErrKeyTooLong
ErrInvalidVersionId
ErrNoSuchVersion
ErrSuspendedVersioningNotAllowed
ErrMultipleChecksumHeaders
ErrInvalidChecksumAlgorithm
ErrInvalidChecksumPart
ErrChecksumTypeWithAlgo
ErrInvalidChecksumHeader
// Non-AWS errors
ErrExistingObjectIsDirectory
ErrObjectParentIsFile
ErrDirectoryObjectContainsData
ErrDirectoryNotEmpty
ErrQuotaExceeded
ErrVersioningNotConfigured
// Admin api errors
ErrAdminAccessDenied
ErrAdminUserNotFound
ErrAdminUserExists
ErrAdminInvalidUserRole
ErrAdminMissingUserAcess
ErrAdminMethodNotSupported
)
var errorCodeResponse = map[ErrorCode]APIError{
@@ -187,11 +158,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The bucket you tried to delete is not empty.",
HTTPStatusCode: http.StatusConflict,
},
ErrVersionedBucketNotEmpty: {
Code: "BucketNotEmpty",
Description: "The bucket you tried to delete is not empty. You must delete all versions in the bucket.",
HTTPStatusCode: http.StatusConflict,
},
ErrBucketAlreadyExists: {
Code: "BucketAlreadyExists",
Description: "The requested bucket name is not available. The bucket name can not be an existing collection, and the bucket namespace is shared by all users of the system. Please select a different name and try again.",
@@ -212,11 +178,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The Content-Md5 you specified is not valid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidMaxBuckets: {
Code: "InvalidArgument",
Description: "Argument max-buckets must be an integer between 1 and 10000.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidMaxUploads: {
Code: "InvalidArgument",
Description: "Argument max-uploads must be an integer between 0 and 2147483647.",
@@ -237,11 +198,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Argument partNumberMarker must be an integer.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidObjectAttributes: {
Code: "InvalidArgument",
Description: "Invalid attribute name specified.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchBucket: {
Code: "NoSuchBucket",
Description: "The specified bucket does not exist.",
@@ -267,26 +223,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEmptyParts: {
Code: "InvalidRequest",
Description: "You must specify at least one part",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartNumber: {
Code: "InvalidArgument",
Description: "Part number must be an integer between 1 and 10000, inclusive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartOrder: {
Code: "InvalidPartOrder",
Description: "The list of parts was not in ascending order. Parts must be ordered by part number.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCompleteMpPartNumber: {
Code: "InvalidArgument",
Description: "PartNumber must be >= 1",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCopyDest: {
Code: "InvalidRequest",
Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
@@ -297,11 +238,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCopySourceRange: {
Code: "InvalidArgument",
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTag: {
Code: "InvalidArgument",
Description: "The Tag value you have provided is invalid",
@@ -419,7 +355,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
},
ErrInvalidAccessKeyID: {
Code: "InvalidAccessKeyId",
Description: "The AWS Access Key Id you provided does not exist in our records.",
Description: "The access key ID you provided does not exist in our records.",
HTTPStatusCode: http.StatusForbidden,
},
ErrRequestNotReadyYet: {
@@ -452,11 +388,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingDecodedContentLength: {
Code: "MissingContentLength",
Description: "You must provide the Content-Length HTTP header.",
HTTPStatusCode: http.StatusLengthRequired,
},
ErrMissingDateHeader: {
Code: "AccessDenied",
Description: "AWS authentication requires a valid Date or x-amz-date header.",
@@ -504,12 +435,12 @@ var errorCodeResponse = map[ErrorCode]APIError{
},
ErrNoSuchObjectLockConfiguration: {
Code: "NoSuchObjectLockConfiguration",
Description: "The specified object does not have a ObjectLock configuration.",
Description: "The specified object does not have an ObjectLock configuration.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidBucketObjectLockConfiguration: {
Code: "InvalidRequest",
Description: "Bucket is missing Object Lock Configuration.",
Description: "Bucket is missing ObjectLockConfiguration.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectLockConfigurationNotAllowed: {
@@ -547,11 +478,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectAttributesInvalidHeader: {
Code: "InvalidRequest",
Description: "The x-amz-object-attributes header specifying the attributes to be retrieved is either missing or empty",
HTTPStatusCode: http.StatusBadRequest,
},
ErrRequestTimeTooSkewed: {
Code: "RequestTimeTooSkewed",
Description: "The difference between the request time and the server's time is too large.",
@@ -593,9 +519,8 @@ var errorCodeResponse = map[ErrorCode]APIError{
HTTPStatusCode: http.StatusNotFound,
},
ErrInvalidMetadataDirective: {
Code: "InvalidArgument",
Description: "Unknown metadata directive.",
HTTPStatusCode: http.StatusBadRequest,
Code: "InvalidArgument",
Description: "Unknown metadata directive.",
},
ErrInvalidVersionId: {
Code: "InvalidArgument",
@@ -612,36 +537,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The specified version does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrSuspendedVersioningNotAllowed: {
Code: "InvalidBucketState",
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMultipleChecksumHeaders: {
Code: "InvalidRequest",
Description: "Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidChecksumAlgorithm: {
Code: "InvalidRequest",
Description: "Checksum algorithm provided is unsupported. Please try again with any of the valid types: [CRC32, CRC32C, SHA1, SHA256]",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidChecksumPart: {
Code: "InvalidArgument",
Description: "Invalid Base64 or multiple checksums present in request",
HTTPStatusCode: http.StatusBadRequest,
},
ErrChecksumTypeWithAlgo: {
Code: "InvalidRequest",
Description: "The x-amz-checksum-type header can only be used with the x-amz-checksum-algorithm header.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidChecksumHeader: {
Code: "InvalidRequest",
Description: "The algorithm type you specified in x-amz-checksum- header is invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
// non aws errors
ErrExistingObjectIsDirectory: {
@@ -659,53 +554,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Directory object contains data payload.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrDirectoryNotEmpty: {
Code: "ErrDirectoryNotEmpty",
Description: "Directory object not empty.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrQuotaExceeded: {
Code: "QuotaExceeded",
Description: "Your request was denied due to quota exceeded.",
HTTPStatusCode: http.StatusForbidden,
},
ErrVersioningNotConfigured: {
Code: "VersioningNotConfigured",
Description: "Versioning has not been configured for the gateway.",
HTTPStatusCode: http.StatusNotImplemented,
},
// Admin api errors
ErrAdminAccessDenied: {
Code: "XAdminAccessDenied",
Description: "Only admin users have access to this resource.",
HTTPStatusCode: http.StatusForbidden,
},
ErrAdminUserNotFound: {
Code: "XAdminUserNotFound",
Description: "No user exists with the provided access key ID.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminUserExists: {
Code: "XAdminUserExists",
Description: "A user with the provided access key ID already exists.",
HTTPStatusCode: http.StatusConflict,
},
ErrAdminInvalidUserRole: {
Code: "XAdminInvalidArgument",
Description: "User role has to be one of the following: 'user', 'admin', 'userplus'.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminMissingUserAcess: {
Code: "XAdminInvalidArgument",
Description: "User access key ID is missing.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminMethodNotSupported: {
Code: "XAdminMethodNotSupported",
Description: "The method is not supported in single root user mode.",
HTTPStatusCode: http.StatusNotImplemented,
},
}
// GetAPIError provides API Error for input API error code.
@@ -736,72 +589,3 @@ func encodeResponse(response interface{}) []byte {
e.Encode(response)
return bytesBuffer.Bytes()
}
// Returns invalid checksum error with the provided header in the error description
func GetInvalidChecksumHeaderErr(header string) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("Value for %v header is invalid.", header),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns checksum type mismatch APIError
func GetChecksumTypeMismatchErr(expected, actual types.ChecksumAlgorithm) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("Checksum Type mismatch occurred, expected checksum Type: %v, actual checksum Type: %v", expected, actual),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns incorrect checksum APIError
func GetChecksumBadDigestErr(algo types.ChecksumAlgorithm) APIError {
return APIError{
Code: "BadDigest",
Description: fmt.Sprintf("The %v you specified did not match the calculated checksum.", strings.ToLower(string(algo))),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns checksum type mismatch error with checksum algorithm
func GetChecksumSchemaMismatchErr(algo types.ChecksumAlgorithm, t types.ChecksumType) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The %v checksum type cannot be used with the %v checksum algorithm.", algo, strings.ToLower(string(t))),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns checksum type mismatch error for multipart uploads
func GetChecksumTypeMismatchOnMpErr(t types.ChecksumType) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The upload was created using the %v checksum mode. The complete request must use the same checksum mode.", t),
HTTPStatusCode: http.StatusBadRequest,
}
}
func GetIncorrectMpObjectSizeErr(expected, actual int64) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The provided 'x-amz-mp-object-size' header value %v does not match what was computed: %v", expected, actual),
HTTPStatusCode: http.StatusBadRequest,
}
}
func GetInvalidMpObjectSizeErr(val int64) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("Value for x-amz-mp-object-size header is less than zero: '%v'", val),
HTTPStatusCode: http.StatusBadRequest,
}
}
func CreateExceedingRangeErr(objSize int64) APIError {
return APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
HTTPStatusCode: http.StatusBadRequest,
}
}

View File

@@ -19,37 +19,21 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
)
const (
iso8601TimeFormat = "2006-01-02T15:04:05.000Z"
iso8601TimeFormatExtended = "2006-01-02T15:04:05.000000Z"
iso8601TimeFormatWithTZ = "2006-01-02T15:04:05-0700"
)
const RFC3339TimeFormat = "2006-01-02T15:04:05.999Z"
type PutObjectOutput struct {
ETag string
VersionID string
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
ChecksumType types.ChecksumType
ETag string
VersionID string
}
// Part describes part metadata.
type Part struct {
PartNumber int
LastModified time.Time
ETag string
Size int64
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
PartNumber int
LastModified time.Time
ETag string
Size int64
}
func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
@@ -61,7 +45,7 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
Alias: (*Alias)(&p),
}
aux.LastModified = p.LastModified.UTC().Format(iso8601TimeFormat)
aux.LastModified = p.LastModified.UTC().Format(RFC3339TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -70,11 +54,9 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type ListPartsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
Bucket string
Key string
UploadID string `xml:"UploadId"`
ChecksumAlgorithm types.ChecksumAlgorithm
ChecksumType types.ChecksumType
Bucket string
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
@@ -91,35 +73,30 @@ type ListPartsResult struct {
Parts []Part `xml:"Part"`
}
type ObjectAttributes string
const (
ObjectAttributesEtag ObjectAttributes = "ETag"
ObjectAttributesChecksum ObjectAttributes = "Checksum"
ObjectAttributesObjectParts ObjectAttributes = "ObjectParts"
ObjectAttributesStorageClass ObjectAttributes = "StorageClass"
ObjectAttributesObjectSize ObjectAttributes = "ObjectSize"
)
func (o ObjectAttributes) IsValid() bool {
return o == ObjectAttributesChecksum ||
o == ObjectAttributesEtag ||
o == ObjectAttributesObjectParts ||
o == ObjectAttributesObjectSize ||
o == ObjectAttributesStorageClass
type GetObjectAttributesResult struct {
ETag *string
LastModified *time.Time
ObjectSize *int64
StorageClass types.StorageClass
VersionId *string
ObjectParts *ObjectParts
}
type GetObjectAttributesResponse struct {
ETag *string
ObjectSize *int64
StorageClass types.StorageClass `xml:",omitempty"`
ObjectParts *ObjectParts
Checksum *types.Checksum
func (r GetObjectAttributesResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias GetObjectAttributesResult
aux := &struct {
LastModified *string `xml:"LastModified"`
*Alias
}{
Alias: (*Alias)(&r),
}
// Not included in the response body
VersionId *string
LastModified *time.Time
DeleteMarker *bool
if r.LastModified != nil {
formattedTime := r.LastModified.UTC().Format(RFC3339TimeFormat)
aux.LastModified = &formattedTime
}
return e.EncodeElement(aux, start)
}
type ObjectParts struct {
@@ -183,15 +160,13 @@ type ListObjectsV2Result struct {
}
type Object struct {
ChecksumAlgorithm []types.ChecksumAlgorithm
ChecksumType types.ChecksumType
ETag *string
Key *string
LastModified *time.Time
Owner *types.Owner
RestoreStatus *types.RestoreStatus
Size *int64
StorageClass types.ObjectStorageClass
ETag *string
Key *string
LastModified *time.Time
Owner *types.Owner
RestoreStatus *types.RestoreStatus
Size *int64
StorageClass types.ObjectStorageClass
}
func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
@@ -204,7 +179,7 @@ func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
}
if o.LastModified != nil {
formattedTime := o.LastModified.UTC().Format(iso8601TimeFormat)
formattedTime := o.LastModified.UTC().Format(RFC3339TimeFormat)
aux.LastModified = &formattedTime
}
@@ -213,14 +188,12 @@ func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// Upload describes in progress multipart upload
type Upload struct {
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass types.StorageClass
Initiated time.Time
ChecksumAlgorithm types.ChecksumAlgorithm
ChecksumType types.ChecksumType
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass types.StorageClass
Initiated time.Time
}
func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
@@ -232,7 +205,7 @@ func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
Alias: (*Alias)(&u),
}
aux.Initiated = u.Initiated.UTC().Format(iso8601TimeFormat)
aux.Initiated = u.Initiated.UTC().Format(RFC3339TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -299,20 +272,10 @@ type Bucket struct {
Owner string `json:"owner"`
}
type ListBucketsInput struct {
Owner string
IsAdmin bool
ContinuationToken string
Prefix string
MaxBuckets int32
}
type ListAllMyBucketsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"`
Owner CanonicalUser
Buckets ListAllMyBucketsList
ContinuationToken string `xml:"ContinuationToken,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"`
Owner CanonicalUser
Buckets ListAllMyBucketsList
}
type ListAllMyBucketsEntry struct {
@@ -329,7 +292,7 @@ func (r ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement
Alias: (*Alias)(&r),
}
aux.CreationDate = r.CreationDate.UTC().Format(iso8601TimeFormat)
aux.CreationDate = r.CreationDate.UTC().Format(RFC3339TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -350,20 +313,6 @@ type CopyObjectResult struct {
CopySourceVersionId string `xml:"-"`
}
type CopyPartResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyPartResult" json:"-"`
LastModified time.Time
ETag *string
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
// not included in the body
CopySourceVersionId string `xml:"-"`
}
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyObjectResult
aux := &struct {
@@ -373,7 +322,7 @@ func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) err
Alias: (*Alias)(&r),
}
aux.LastModified = r.LastModified.UTC().Format(iso8601TimeFormat)
aux.LastModified = r.LastModified.UTC().Format(RFC3339TimeFormat)
return e.EncodeElement(aux, start)
}
@@ -431,82 +380,7 @@ type ListVersionsResult struct {
NextKeyMarker *string
NextVersionIdMarker *string
Prefix *string
RequestCharged types.RequestCharged
VersionIdMarker *string
Versions []types.ObjectVersion `xml:"Version"`
}
type GetBucketVersioningOutput struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersioningConfiguration" json:"-"`
MFADelete *types.MFADeleteStatus
Status *types.BucketVersioningStatus
}
type PutObjectRetentionInput struct {
XMLName xml.Name `xml:"Retention"`
Mode types.ObjectLockRetentionMode
RetainUntilDate AmzDate
}
type AmzDate struct {
time.Time
}
// Parses the date from xml string and validates for predefined date formats
func (d *AmzDate) UnmarshalXML(e *xml.Decoder, startElement xml.StartElement) error {
var dateStr string
err := e.DecodeElement(&dateStr, &startElement)
if err != nil {
return err
}
retDate, err := d.ISO8601Parse(dateStr)
if err != nil {
return s3err.GetAPIError(s3err.ErrInvalidRequest)
}
*d = AmzDate{retDate}
return nil
}
// Encodes expiration date if it is non-zero
// Encodes empty string if it's zero
func (d AmzDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if d.IsZero() {
return nil
}
return e.EncodeElement(d.UTC().Format(iso8601TimeFormat), startElement)
}
// Parses ISO8601 date string to time.Time by
// validating different time layouts
func (AmzDate) ISO8601Parse(date string) (t time.Time, err error) {
for _, layout := range []string{
iso8601TimeFormat,
iso8601TimeFormatExtended,
iso8601TimeFormatWithTZ,
time.RFC3339,
} {
t, err = time.Parse(layout, date)
if err == nil {
return t, nil
}
}
return t, err
}
// Admin api response types
type ListBucketsResult struct {
Buckets []Bucket
}
type Checksum struct {
Algorithm types.ChecksumAlgorithm
Type types.ChecksumType
CRC32 *string
CRC32C *string
SHA1 *string
SHA256 *string
CRC64NVME *string
}

View File

@@ -27,6 +27,3 @@ USERNAME_TWO=HIJKLMN
PASSWORD_TWO=OPQRSTU
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
REMOVE_TEST_FILE_FOLDER=true
VERSIONING_DIR=/tmp/versioning
COMMAND_LOG=command.log
TIME_LOG=time.log

View File

@@ -25,5 +25,4 @@ USERNAME_TWO=HIJKLMN
PASSWORD_TWO=OPQRSTU
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
RECREATE_BUCKETS=true
REMOVE_TEST_FILE_FOLDER=true
VERSIONING_DIR=/tmp/versioning
REMOVE_TEST_FILE_FOLDER=true

View File

@@ -47,7 +47,7 @@ RUN git clone https://github.com/bats-core/bats-core.git && \
USER tester
RUN mkdir -p /home/tester/tests
COPY --chown=tester:tester . /home/tester
COPY --chown=tester:tester . /home/tester/tests
# add bats support libraries
RUN git clone https://github.com/bats-core/bats-support.git && rm -rf /home/tester/tests/bats-support && mv bats-support /home/tester/tests

View File

@@ -3,9 +3,8 @@ FROM ubuntu:latest
ARG DEBIAN_FRONTEND=noninteractive
ARG SECRETS_FILE=tests/.secrets
ARG CONFIG_FILE=tests/.env.docker
ARG GO_LIBRARY=go1.21.13.linux-arm64.tar.gz
# see https://github.com/versity/versitygw/issues/1034
ARG AWS_CLI=awscli-exe-linux-aarch64-2.22.35.zip
ARG GO_LIBRARY=go1.23.1.linux-arm64.tar.gz
ARG AWS_CLI=awscli-exe-linux-aarch64.zip
ARG MC_FOLDER=linux-arm64
ENV TZ=Etc/UTC
@@ -86,7 +85,5 @@ RUN openssl genpkey -algorithm RSA -out versitygw-docker.pem -pkeyopt rsa_keygen
ENV WORKSPACE=.
ENV VERSITYGW_TEST_ENV=$CONFIG_FILE
#ENV AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
ENTRYPOINT ["tests/run.sh"]
CMD ["s3api,s3,s3cmd,mc,rest"]
CMD ["tests/run_all.sh"]

View File

@@ -1,19 +1,5 @@
# Command-Line Tests
## Table of Contents
[Instructions - Running Locally](#instructions---running-locally)<br>
[* Posix Backend](#posix-backend)<br>
[* Static Bucket Mode](#static-bucket-mode)<br>
[* S3 Backend](#s3-backend)<br>
[* Direct Mode](#direct-mode)<br>
[Instructions - Running With Docker](#instructions---running-with-docker)<br>
[Instructions - Running With Docker-Compose](#instructions---running-with-docker-compose)<br>
[Environment Parameters](#environment-parameters)<br>
[* Secret](#secret)<br>
[* Non-Secret](#non-secret)<br>
[REST Scripts](#rest-scripts)<br>
## Instructions - Running Locally
### Posix Backend
@@ -75,11 +61,10 @@ To communicate directly with s3, in order to compare the gateway results to dire
1. Copy `.secrets.default` to `.secrets` in the `tests` folder and change the parameters and add the additional s3 fields explained in the **S3 Backend** section above if running with the s3 backend.
2. By default, the dockerfile uses the **arm** architecture (usually modern Mac). If using **amd** (usually earlier Mac or Linux), you can either replace the corresponding `ARG` values directly, or with `arg="<param>=<amd library or folder>"` Also, you can determine which is used by your OS with `uname -a`.
3. Build and run the `Dockerfile_test_bats` file. Change the `SECRETS_FILE` and `CONFIG_FILE` parameters to point to your secrets and config file, respectively, if not using the defaults. Example: `docker build -t <tag> -f Dockerfile_test_bats --build-arg="SECRETS_FILE=<file>" --build-arg="CONFIG_FILE=<file>" .`.
4. To run the entire suite, run `docker run -it <image name>`. To run an individual suite, pass in the name of the suite as defined in `tests/run.sh` (e.g. REST tests -> `docker run -it <image name> rest`). Also, multiple specific suites can be run, if separated by comma.
## Instructions - Running with docker-compose
A file named `docker-compose-bats.yml` is provided in the root folder. A few configurations are provided, and you can also create your own provided you have a secrets and config file:
A file named `docker-compose-bats.yml` is provided in the root folder. Four configurations are provided:
* insecure (without certificates), with creation/removal of buckets
* secure, posix backend, with static buckets
* secure, posix backend, with creation/removal of buckets
@@ -96,84 +81,4 @@ For the s3 backend, see the **S3 Backend** instructions above.
If using AMD rather than ARM architecture, add the corresponding **args** values matching those in the Dockerfile for **amd** libraries.
A single instance can be run with `docker-compose -f docker-compose-bats.yml up <service name>`
## Environment Parameters
### Secret
**AWS_PROFILE**, **AWS_ENDPOINT_URL**, **AWS_REGION**, **AWS_ACCESS_KEY_ID**, **AWS_SECRET_ACCESS_KEY**: identical to the same parameters in **s3**.
**AWS_CANONICAL_ID**: for direct mode, the canonical ID for the main user (owner)
**ACL_AWS_CANONICAL_ID**: for direct mode, the canonical ID for the user to test ACL changes and access by non-owners
**ACL_AWS_ACCESS_KEY_ID**, **ACL_AWS_ACCESS_SECRET_KEY**: for direct mode, the ID and key for the S3 user in the **ACL_AWS_CANONICAL_ID** account.
### Non-Secret
**VERSITY_EXE**: location of the versity executable relative to test folder.
**RUN_VERSITYGW**: whether to run the versitygw executable, should be set to **false** when running tests directly against **s3**.
**BACKEND**: the storage backend type for the gateway, e.g. **posix** or **s3**.
**LOCAL_FOLDER**: if running with a **posix** backend, the backend storage folder.
**BUCKET_ONE_NAME**, **BUCKET_TWO_NAME**: test bucket names.
**RECREATE_BUCKETS**: whether to delete buckets between tests. If set to false, the bucket will be restored to an original state for the purpose of ensuring consistent tests, but not deleted.
**CERT**, **KEY**: certificate and key locations if using SSL.
**S3CMD_CONFIG**: location of **s3cmd** config file if running **s3cmd** tests.
**SECRETS_FILE**: file where sensitive values, such as **AWS_SECRET_ACCESS_KEY**, should be stored.
**MC_ALIAS**: Minio MC alias if running MC tests.
**LOG_LEVEL**: level for test logger (1 - only critical, 2 - errors, 3 - warnings, 4 - info, 5 - debug info, 6 - tracing)
**GOCOVERDIR**: folder to put golang coverage info in, if checking coverage info.
**USERS_FOLDER**: folder to use if storing IAM data in a folder.
**IAM_TYPE**: how to store IAM data (**s3** or **folder**).
**TEST_LOG_FILE**: log file location for these bats tests.
**VERSITY_LOG_FILE**: log file for versity application as it is tested by bats tests.
**DIRECT**: if **true**, bypass versitygw and run directly against s3 (for comparison and validity-checking purposes).
**DIRECT_DISPLAY_NAME**: username if **DIRECT** is set to **true**.
**COVERAGE_DB**: database to store client command coverage info and usage counts, if using.
**USERNAME_ONE**, **PASSWORD_ONE**, **USERNAME_TWO**, **PASSWORD_TWO**: credentials for users created and tested for non-root user **versitygw** operations.
**TEST_FILE_FOLDER**: where to put temporary test files.
**REMOVE_TEST_FILE_FOLDER**: whether to delete the test file folder between tests, should be set to **true** unless checking the files after a single test, or not yet sure that the test folder is in a safe location to avoid deleting other files.
**VERSIONING_DIR**: where to put gateway file versioning info.
**COMMAND_LOG**: where to store list of client commands, which if using will be reported during test failures.
**TIME_LOG**: optional log to show duration of individual tests
**DIRECT_S3_ROOT_ACCOUNT_NAME**: for direct mode, S3 username
**DELETE_BUCKETS_AFTER_TEST**: whether or not to delete buckets after individual tests, useful for debugging if the post-test bucket state needs to be checked
## REST Scripts
REST scripts are included for calls to S3's REST API in the `./tests/rest_scripts/` folder. To call a script, the following parameters are needed:
* **AWS_ACCESS_KEY_ID**, **AWS_SECRET_ACCESS_KEY**, etc.
* **AWS_ENDPOINT_URL** (default: `https://localhost:7070`)
* **OUTPUT_FILE**: file where the command's response data is written
* Any other parameters specified at the top of the script file, such as payloads and variables. Sometimes, defaults are included.
Upon success, the script will return a response code, and write the data to the **OUTPUT_FILE** location.
Example: `AWS_ACCESS_KEY_ID={id} AWS_SECRET_ACCESS_KEY={key} AWS_ENDPOINT_URL=https://s3.amazonaws.com OUTPUT_FILE=./output_file.xml ./tests/rest_scripts/list_buckets.sh`
A single instance can be run with `docker-compose -f docker-compose-bats.yml up <service name>`

View File

@@ -20,7 +20,7 @@ abort_multipart_upload() {
log 2 "'abort multipart upload' command requires bucket, key, upload ID"
return 1
fi
if ! error=$(send_command aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
if ! error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
log 2 "Error aborting upload: $error"
return 1
fi
@@ -33,10 +33,10 @@ abort_multipart_upload_with_user() {
return 1
fi
record_command "abort-multipart-upload" "client:s3api"
if ! abort_multipart_upload_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
if ! abort_multipart_upload_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" 2>&1); then
log 2 "Error aborting upload: $abort_multipart_upload_error"
export abort_multipart_upload_error
return 1
fi
return 0
}
}

View File

@@ -21,7 +21,7 @@ complete_multipart_upload() {
fi
log 5 "complete multipart upload id: $3, parts: $4"
record_command "complete-multipart-upload" "client:s3api"
error=$(send_command aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" --multipart-upload '{"Parts": '"$4"'}' 2>&1) || local completed=$?
error=$(aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2" --upload-id "$3" --multipart-upload '{"Parts": '"$4"'}' 2>&1) || local completed=$?
if [[ $completed -ne 0 ]]; then
log 2 "error completing multipart upload: $error"
return 1

View File

@@ -16,28 +16,28 @@
copy_object() {
if [ $# -ne 4 ]; then
log 2 "copy object command requires command type, source, bucket, key"
echo "copy object command requires command type, source, bucket, key"
return 1
fi
local exit_code=0
local error
record_command "copy-object" "client:$1"
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 cp "$2" s3://"$3/$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
error=$(send_command aws --no-verify-ssl s3api copy-object --copy-source "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
error=$(aws --no-verify-ssl s3 cp "$2" s3://"$3/$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
error=$(aws --no-verify-ssl s3api copy-object --copy-source "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate cp s3://$2 s3://$3/$4"
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate cp "s3://$2" s3://"$3/$4" 2>&1) || exit_code=$?
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate cp "s3://$2" s3://"$3/$4" 2>&1) || exit_code=$?
elif [[ $1 == 'mc' ]]; then
error=$(send_command mc --insecure cp "$MC_ALIAS/$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
error=$(mc --insecure cp "$MC_ALIAS/$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
else
log 2 "'copy-object' not implemented for '$1'"
echo "'copy-object' not implemented for '$1'"
return 1
fi
log 5 "copy object exit code: $exit_code"
if [ $exit_code -ne 0 ]; then
log 2 "error copying object to bucket: $error"
echo "error copying object to bucket: $error"
return 1
fi
return 0
@@ -45,7 +45,7 @@ copy_object() {
copy_object_empty() {
record-command "copy-object" "client:s3api"
error=$(send_command aws --no-verify-ssl s3api copy-object 2>&1) || local result=$?
error=$(aws --no-verify-ssl s3api copy-object 2>&1) || local result=$?
if [[ $result -eq 0 ]]; then
log 2 "copy object with empty parameters returned no error"
return 1

View File

@@ -30,14 +30,14 @@ create_bucket() {
local error
log 6 "create bucket"
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
error=$(send_command aws --no-verify-ssl s3api create-bucket --bucket "$2" 2>&1) || exit_code=$?
error=$(aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
error=$(aws --no-verify-ssl s3api create-bucket --bucket "$2" 2>&1) || exit_code=$?
elif [[ $1 == "s3cmd" ]]; then
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate mb s3://$2"
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == "mc" ]]; then
error=$(send_command mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
else
log 2 "invalid command type $1"
return 1
@@ -56,11 +56,11 @@ create_bucket_with_user() {
fi
local exit_code=0
if [[ $1 == "aws" ]] || [[ $1 == "s3api" ]]; then
error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == "s3cmd" ]]; then
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb --access_key="$3" --secret_key="$4" s3://"$2" 2>&1) || exit_code=$?
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb --access_key="$3" --secret_key="$4" s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == "mc" ]]; then
error=$(send_command mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
else
log 2 "invalid command type $1"
return 1
@@ -80,7 +80,7 @@ create_bucket_object_lock_enabled() {
fi
local exit_code=0
error=$(send_command aws --no-verify-ssl s3api create-bucket --bucket "$1" 2>&1 --object-lock-enabled-for-bucket) || local exit_code=$?
error=$(aws --no-verify-ssl s3api create-bucket --bucket "$1" 2>&1 --object-lock-enabled-for-bucket) || local exit_code=$?
if [ $exit_code -ne 0 ]; then
log 2 "error creating bucket: $error"
return 1

View File

@@ -24,7 +24,7 @@ create_multipart_upload() {
return 1
fi
if ! multipart_data=$(send_command aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
if ! multipart_data=$(aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
log 2 "Error creating multipart upload: $multipart_data"
return 1
fi
@@ -44,7 +44,7 @@ create_multipart_upload_with_user() {
return 1
fi
if ! multipart_data=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
if ! multipart_data=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1); then
log 2 "Error creating multipart upload: $multipart_data"
return 1
fi
@@ -54,7 +54,6 @@ create_multipart_upload_with_user() {
return 1
fi
upload_id="${upload_id//\"/}"
echo "$upload_id"
return 0
}
@@ -66,7 +65,7 @@ create_multipart_upload_params() {
return 1
fi
local multipart_data
multipart_data=$(send_command aws --no-verify-ssl s3api create-multipart-upload \
multipart_data=$(aws --no-verify-ssl s3api create-multipart-upload \
--bucket "$1" \
--key "$2" \
--content-type "$3" \
@@ -97,7 +96,7 @@ create_multipart_upload_custom() {
done
log 5 "${*:3}"
log 5 "aws --no-verify-ssl s3api create-multipart-upload --bucket $1 --key $2 ${*:3}"
multipart_data=$(send_command aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1) || local result=$?
multipart_data=$(aws --no-verify-ssl s3api create-multipart-upload --bucket "$1" --key "$2" 2>&1) || local result=$?
if [[ $result -ne 0 ]]; then
log 2 "error creating custom multipart data command: $multipart_data"
return 1
@@ -108,19 +107,3 @@ create_multipart_upload_custom() {
log 5 "upload id: $upload_id"
return 0
}
create_multipart_upload_rest() {
if [ $# -ne 2 ]; then
log 2 "'create_multipart_upload_rest' requires bucket name, key"
return 1
fi
if ! result=$(BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" COMMAND_LOG=$COMMAND_LOG ./tests/rest_scripts/create_multipart_upload.sh); then
log 2 "error creating multipart upload: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "put-object-retention returned code $result: $(cat "$TEST_FILE_FOLDER/output.txt")"
return 1
fi
return 0
}

View File

@@ -1,27 +0,0 @@
#!/usr/bin/env bash
create_presigned_url() {
if [[ $# -ne 3 ]]; then
log 2 "create presigned url function requires command type, bucket, and filename"
return 1
fi
local presign_result=0
if [[ $1 == 's3api' ]]; then
presigned_url=$(send_command aws s3 presign "s3://$2/$3" --expires-in 900) || presign_result=$?
elif [[ $1 == 's3cmd' ]]; then
presigned_url=$(send_command s3cmd --no-check-certificate "${S3CMD_OPTS[@]}" signurl "s3://$2/$3" "$(echo "$(date +%s)" + 900 | bc)") || presign_result=$?
elif [[ $1 == 'mc' ]]; then
presigned_url_data=$(send_command mc --insecure share download --recursive "$MC_ALIAS/$2/$3") || presign_result=$?
presigned_url="${presigned_url_data#*Share: }"
else
log 2 "unrecognized command type $1"
return 1
fi
if [[ $presign_result -ne 0 ]]; then
log 2 "error generating presigned url: $presigned_url"
return 1
fi
export presigned_url
return 0
}

View File

@@ -31,13 +31,13 @@ delete_bucket() {
exit_code=0
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 rb s3://"$2") || exit_code=$?
elif [[ $1 == 's3api' ]]; then
error=$(send_command aws --no-verify-ssl s3api delete-bucket --bucket "$2" 2>&1) || exit_code=$?
error=$(aws --no-verify-ssl s3 rb s3://"$2") || exit_code=$?
elif [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
error=$(aws --no-verify-ssl s3api delete-bucket --bucket "$2" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" 2>&1) || exit_code=$?
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == 'mc' ]]; then
error=$(send_command mc --insecure rb "$MC_ALIAS/$2" 2>&1) || exit_code=$?
error=$(mc --insecure rb "$MC_ALIAS/$2" 2>&1) || exit_code=$?
else
log 2 "Invalid command type $1"
return 1

View File

@@ -21,12 +21,12 @@ delete_bucket_policy() {
return 1
fi
local delete_result=0
if [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2" 2>&1) || delete_result=$?
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
error=$(aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2" 2>&1) || delete_result=$?
elif [[ $1 == 's3cmd' ]]; then
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate delpolicy "s3://$2" 2>&1) || delete_result=$?
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate delpolicy "s3://$2" 2>&1) || delete_result=$?
elif [[ $1 == 'mc' ]]; then
error=$(send_command mc --insecure anonymous set none "$MC_ALIAS/$2" 2>&1) || delete_result=$?
error=$(mc --insecure anonymous set none "$MC_ALIAS/$2" 2>&1) || delete_result=$?
else
log 2 "command 'delete bucket policy' not implemented for '$1'"
return 1
@@ -44,7 +44,7 @@ delete_bucket_policy_with_user() {
log 2 "'delete bucket policy with user' command requires bucket, username, password"
return 1
fi
if ! delete_bucket_policy_error=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" send_command aws --no-verify-ssl s3api delete-bucket-policy --bucket "$1" 2>&1); then
if ! delete_bucket_policy_error=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3api delete-bucket-policy --bucket "$1" 2>&1); then
log 2 "error deleting bucket policy: $delete_bucket_policy_error"
export delete_bucket_policy_error
return 1

View File

@@ -21,10 +21,10 @@ delete_bucket_tagging() {
return 1
fi
local result
if [[ $1 == 's3api' ]]; then
tags=$(send_command aws --no-verify-ssl s3api delete-bucket-tagging --bucket "$2" 2>&1) || result=$?
if [[ $1 == 'aws' ]]; then
tags=$(aws --no-verify-ssl s3api delete-bucket-tagging --bucket "$2" 2>&1) || result=$?
elif [[ $1 == 'mc' ]]; then
tags=$(send_command mc --insecure tag remove "$MC_ALIAS"/"$2" 2>&1) || result=$?
tags=$(mc --insecure tag remove "$MC_ALIAS"/"$2" 2>&1) || result=$?
else
log 2 "invalid command type $1"
return 1
@@ -43,7 +43,7 @@ delete_bucket_tagging_with_user() {
log 2 "delete bucket tagging command missing username, password, bucket name"
return 1
fi
if ! error=$(send_command AWS_ACCESS_KEY_ID="$1" AWS_SECRET_ACCESS_KEY="$2" aws --no-verify-ssl s3api delete-bucket-tagging --bucket "$3" 2>&1); then
if ! error=$(AWS_ACCESS_KEY_ID="$1" AWS_SECRET_ACCESS_KEY="$2" aws --no-verify-ssl s3api delete-bucket-tagging --bucket "$3" 2>&1); then
log 2 "error deleting bucket tagging with user: $error"
return 1
fi

View File

@@ -24,15 +24,13 @@ delete_object() {
fi
local exit_code=0
if [[ $1 == 's3' ]]; then
delete_object_error=$(send_command aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
delete_object_error=$(aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
delete_object_error=$(aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
delete_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm "s3://$2/$3" 2>&1) || exit_code=$?
delete_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm "s3://$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 'mc' ]]; then
delete_object_error=$(send_command mc --insecure rm "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 'rest' ]]; then
delete_object_rest "$2" "$3" || exit_code=$?
delete_object_error=$(mc --insecure rm "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
else
log 2 "invalid command type $1"
return 1
@@ -51,37 +49,13 @@ delete_object_bypass_retention() {
log 2 "'delete-object with bypass retention' requires bucket, key, user, password"
return 1
fi
if ! delete_object_error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --bypass-governance-retention 2>&1); then
if ! delete_object_error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --bypass-governance-retention 2>&1); then
log 2 "error deleting object with bypass retention: $delete_object_error"
return 1
fi
return 0
}
delete_object_version() {
if [[ $# -ne 3 ]]; then
log 2 "'delete_object_version' requires bucket, key, version ID"
return 1
fi
if ! delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --version-id "$3" 2>&1); then
log 2 "error deleting object version: $delete_object_error"
return 1
fi
return 0
}
delete_object_version_bypass_retention() {
if [[ $# -ne 3 ]]; then
log 2 "'delete_object_version_bypass_retention' requires bucket, key, version ID"
return 1
fi
if ! delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$1" --key "$2" --version-id "$3" --bypass-governance-retention 2>&1); then
log 2 "error deleting object version with bypass retention: $delete_object_error"
return 1
fi
return 0
}
delete_object_with_user() {
record_command "delete-object" "client:$1"
if [ $# -ne 5 ]; then
@@ -90,11 +64,11 @@ delete_object_with_user() {
fi
local exit_code=0
if [[ $1 == 's3' ]]; then
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" send_command aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" --bypass-governance-retention 2>&1) || exit_code=$?
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" --bypass-governance-retention 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
delete_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$?
delete_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$?
else
log 2 "command 'delete object with user' not implemented for '$1'"
return 1
@@ -105,44 +79,4 @@ delete_object_with_user() {
return 1
fi
return 0
}
delete_object_rest() {
if [ $# -ne 2 ]; then
log 2 "'delete_object_rest' requires bucket name, object name"
return 1
fi
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="DELETE
/$1/$2
host:$aws_endpoint_url_address
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
UNSIGNED-PAYLOAD"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -ks -w "%{http_code}" -X DELETE "$header://$aws_endpoint_url_address/$1/$2" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: UNSIGNED-PAYLOAD" \
-H "x-amz-date: $current_date_time" \
-o "$TEST_FILE_FOLDER"/delete_object_error.txt 2>&1)
if [[ "$reply" != "204" ]]; then
log 2 "delete object command returned error: $(cat "$TEST_FILE_FOLDER"/delete_object_error.txt)"
return 1
fi
return 0
}

View File

@@ -17,39 +17,20 @@
delete_object_tagging() {
record_command "delete-object-tagging" "client:$1"
if [[ $# -ne 3 ]]; then
log 2 "delete object tagging command missing command type, bucket, key"
echo "delete object tagging command missing command type, bucket, key"
return 1
fi
delete_result=0
if [[ $1 == 's3api' ]]; then
error=$(send_command aws --no-verify-ssl s3api delete-object-tagging --bucket "$2" --key "$3" 2>&1) || delete_result=$?
if [[ $1 == 'aws' ]]; then
error=$(aws --no-verify-ssl s3api delete-object-tagging --bucket "$2" --key "$3" 2>&1) || delete_result=$?
elif [[ $1 == 'mc' ]]; then
error=$(send_command mc --insecure tag remove "$MC_ALIAS/$2/$3") || delete_result=$?
elif [ "$1" == 'rest' ]; then
delete_object_tagging_rest "$2" "$3" || delete_result=$?
error=$(mc --insecure tag remove "$MC_ALIAS/$2/$3") || delete_result=$?
else
log 2 "delete-object-tagging command not implemented for '$1'"
echo "delete-object-tagging command not implemented for '$1'"
return 1
fi
if [[ $delete_result -ne 0 ]]; then
log 2 "error deleting object tagging: $error"
echo "error deleting object tagging: $error"
return 1
fi
return 0
}
delete_object_tagging_rest() {
if [ $# -ne 2 ]; then
log 2 "'delete_object_tagging' requires bucket, key"
return 1
fi
if ! result=$(BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/delete_object_tagging.sh); then
log 2 "error sending delete object tagging REST command: $result"
return 1
fi
if [ "$result" != "204" ]; then
log 2 "delete-object-tagging returned code $result (response: $(cat "$TEST_FILE_FOLDER/response.txt"))"
return 1
fi
return 0
}
}

View File

@@ -20,7 +20,7 @@ delete_objects() {
log 2 "'delete-objects' command requires bucket name, two object keys"
return 1
fi
if ! error=$(send_command aws --no-verify-ssl s3api delete-objects --bucket "$1" --delete "{
if ! error=$(aws --no-verify-ssl s3api delete-objects --bucket "$1" --delete "{
\"Objects\": [
{\"Key\": \"$2\"},
{\"Key\": \"$3\"}

View File

@@ -21,10 +21,10 @@ get_bucket_acl() {
return 1
fi
local exit_code=0
if [[ $1 == 's3api' ]]; then
acl=$(send_command aws --no-verify-ssl s3api get-bucket-acl --bucket "$2" 2>&1) || exit_code="$?"
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
acl=$(aws --no-verify-ssl s3api get-bucket-acl --bucket "$2" 2>&1) || exit_code="$?"
elif [[ $1 == 's3cmd' ]]; then
acl=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$2" 2>&1) || exit_code="$?"
acl=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$2" 2>&1) || exit_code="$?"
else
log 2 "command 'get bucket acl' not implemented for $1"
return 1
@@ -42,7 +42,7 @@ get_bucket_acl_with_user() {
log 2 "'get bucket ACL with user' command requires bucket name, username, password"
return 1
fi
if ! bucket_acl=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" send_command aws --no-verify-ssl s3api get-bucket-acl --bucket "$1" 2>&1); then
if ! bucket_acl=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3api get-bucket-acl --bucket "$1" 2>&1); then
log 2 "error getting bucket ACLs: $bucket_acl"
return 1
fi

View File

@@ -17,18 +17,17 @@
get_bucket_location() {
record_command "get-bucket-location" "client:$1"
if [[ $# -ne 2 ]]; then
log 2 "get bucket location command requires command type, bucket name"
echo "get bucket location command requires command type, bucket name"
return 1
fi
get_result=0
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]]; then
get_bucket_location_aws "$2" || get_result=$?
elif [[ $1 == 's3cmd' ]]; then
get_bucket_location_s3cmd "$2" || get_result=$?
elif [[ $1 == 'mc' ]]; then
get_bucket_location_mc "$2" || get_result=$?
else
log 2 "command type '$1' not implemented for get_bucket_location"
echo "command type '$1' not implemented for get_bucket_location"
return 1
fi
if [[ $get_result -ne 0 ]]; then
@@ -40,10 +39,10 @@ get_bucket_location() {
get_bucket_location_aws() {
record_command "get-bucket-location" "client:s3api"
if [[ $# -ne 1 ]]; then
log 2 "get bucket location (aws) requires bucket name"
echo "get bucket location (aws) requires bucket name"
return 1
fi
location_json=$(send_command aws --no-verify-ssl s3api get-bucket-location --bucket "$1") || location_result=$?
location_json=$(aws --no-verify-ssl s3api get-bucket-location --bucket "$1") || location_result=$?
if [[ $location_result -ne 0 ]]; then
echo "error getting bucket location: $location"
return 1
@@ -58,9 +57,9 @@ get_bucket_location_s3cmd() {
echo "get bucket location (s3cmd) requires bucket name"
return 1
fi
info=$(send_command s3cmd --no-check-certificate info "s3://$1") || results=$?
info=$(s3cmd --no-check-certificate info "s3://$1") || results=$?
if [[ $results -ne 0 ]]; then
log 2 "error getting bucket location: $location"
echo "error getting s3cmd info: $info"
return 1
fi
bucket_location=$(echo "$info" | grep -o 'Location:.*' | awk '{print $2}')
@@ -70,12 +69,12 @@ get_bucket_location_s3cmd() {
get_bucket_location_mc() {
record_command "get-bucket-location" "client:mc"
if [[ $# -ne 1 ]]; then
log 2 "get bucket location (mc) requires bucket name"
echo "get bucket location (mc) requires bucket name"
return 1
fi
info=$(send_command mc --insecure stat "$MC_ALIAS/$1") || results=$?
info=$(mc --insecure stat "$MC_ALIAS/$1") || results=$?
if [[ $results -ne 0 ]]; then
log 2 "error getting s3cmd info: $info"
echo "error getting s3cmd info: $info"
return 1
fi
# shellcheck disable=SC2034

View File

@@ -26,8 +26,7 @@ get_bucket_ownership_controls() {
return 1
fi
raw_bucket_ownership_controls=""
if ! raw_bucket_ownership_controls=$(send_command aws --no-verify-ssl s3api get-bucket-ownership-controls --bucket "$1" 2>&1); then
if ! raw_bucket_ownership_controls=$(aws --no-verify-ssl s3api get-bucket-ownership-controls --bucket "$1" 2>&1); then
log 2 "error getting bucket ownership controls: $raw_bucket_ownership_controls"
return 1
fi

View File

@@ -21,7 +21,7 @@ get_bucket_policy() {
return 1
fi
local get_bucket_policy_result=0
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
get_bucket_policy_aws "$2" || get_bucket_policy_result=$?
elif [[ $1 == 's3cmd' ]]; then
get_bucket_policy_s3cmd "$2" || get_bucket_policy_result=$?
@@ -44,7 +44,7 @@ get_bucket_policy_aws() {
log 2 "aws 'get bucket policy' command requires bucket"
return 1
fi
policy_json=$(send_command aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1) || local get_result=$?
policy_json=$(aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1) || local get_result=$?
policy_json=$(echo "$policy_json" | grep -v "InsecureRequestWarning")
log 5 "$policy_json"
if [[ $get_result -ne 0 ]]; then
@@ -66,7 +66,7 @@ get_bucket_policy_with_user() {
log 2 "'get bucket policy with user' command requires bucket, username, password"
return 1
fi
if policy_json=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" send_command aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1); then
if policy_json=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1); then
policy_json=$(echo "$policy_json" | grep -v "InsecureRequestWarning")
bucket_policy=$(echo "$policy_json" | jq -r '.Policy')
else
@@ -87,7 +87,7 @@ get_bucket_policy_s3cmd() {
return 1
fi
if ! info=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$1" 2>&1); then
if ! info=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$1" 2>&1); then
log 2 "error getting bucket policy: $info"
return 1
fi
@@ -97,57 +97,41 @@ get_bucket_policy_s3cmd() {
policy_brackets=false
# NOTE: versitygw sends policies back in multiple lines here, direct in single line
while IFS= read -r line; do
if check_and_load_policy_info; then
break
if [[ $policy_brackets == false ]]; then
policy_line=$(echo "$line" | grep 'Policy: ')
if [[ $policy_line != "" ]]; then
if [[ $policy_line != *'{'* ]]; then
break
fi
if [[ $policy_line == *'}'* ]]; then
log 5 "policy on single line"
bucket_policy=${policy_line//Policy:/}
break
else
policy_brackets=true
bucket_policy+="{"
fi
fi
else
bucket_policy+=$line
if [[ $line == "" ]]; then
break
fi
fi
done <<< "$info"
log 5 "bucket policy: $bucket_policy"
return 0
}
# return 0 for no policy, single-line policy, or loading complete, 1 for still searching or loading
check_and_load_policy_info() {
if [[ $policy_brackets == false ]]; then
if search_for_first_policy_line_or_full_policy; then
return 0
fi
else
bucket_policy+=$line
if [[ $line == "}" ]]; then
return 0
fi
fi
return 1
}
# return 0 for empty or single-line policy, 1 for other cases
search_for_first_policy_line_or_full_policy() {
policy_line=$(echo "$line" | grep 'Policy: ')
if [[ $policy_line != "" ]]; then
if [[ $policy_line != *'{'* ]]; then
return 0
fi
if [[ $policy_line == *'}'* ]]; then
log 5 "policy on single line"
bucket_policy=${policy_line//Policy:/}
return 0
else
policy_brackets=true
bucket_policy+="{"
fi
fi
return 1
}
get_bucket_policy_mc() {
record_command "get-bucket-policy" "client:mc"
if [[ $# -ne 1 ]]; then
log 2 "aws 'get bucket policy' command requires bucket"
echo "aws 'get bucket policy' command requires bucket"
return 1
fi
bucket_policy=$(send_command mc --insecure anonymous get-json "$MC_ALIAS/$1") || get_result=$?
bucket_policy=$(mc --insecure anonymous get-json "$MC_ALIAS/$1") || get_result=$?
if [[ $get_result -ne 0 ]]; then
log 2 "error getting policy: $bucket_policy"
echo "error getting policy: $bucket_policy"
return 1
fi
return 0

View File

@@ -21,10 +21,10 @@ get_bucket_tagging() {
assert [ $# -eq 2 ]
record_command "get-bucket-tagging" "client:$1"
local result
if [[ $1 == 's3api' ]]; then
tags=$(send_command aws --no-verify-ssl s3api get-bucket-tagging --bucket "$2" 2>&1) || result=$?
if [[ $1 == 'aws' ]]; then
tags=$(aws --no-verify-ssl s3api get-bucket-tagging --bucket "$2" 2>&1) || result=$?
elif [[ $1 == 'mc' ]]; then
tags=$(send_command mc --insecure tag list "$MC_ALIAS"/"$2" 2>&1) || result=$?
tags=$(mc --insecure tag list "$MC_ALIAS"/"$2" 2>&1) || result=$?
else
fail "invalid command type $1"
fi
@@ -35,7 +35,7 @@ get_bucket_tagging() {
export tags=
return 0
fi
log 2 "error getting bucket tags: $tags"
echo "error getting bucket tags: $tags"
return 1
fi
export tags
@@ -49,7 +49,7 @@ get_bucket_tagging_with_user() {
fi
record_command "get-bucket-tagging" "client:s3api"
local result
if ! tags=$(AWS_ACCESS_KEY_ID="$1" AWS_SECRET_ACCESS_KEY="$2" send_command aws --no-verify-ssl s3api get-bucket-tagging --bucket "$3" 2>&1); then
if ! tags=$(AWS_ACCESS_KEY_ID="$1" AWS_SECRET_ACCESS_KEY="$2" aws --no-verify-ssl s3api get-bucket-tagging --bucket "$3" 2>&1); then
log 5 "tags error: $tags"
if [[ $tags =~ "No tags found" ]] || [[ $tags =~ "The TagSet does not exist" ]]; then
export tags=

View File

@@ -17,32 +17,15 @@
get_bucket_versioning() {
record_command "get-bucket-versioning" "client:s3api"
if [[ $# -ne 2 ]]; then
log 2 "get bucket versioning command requires command type, bucket name"
log 2 "put bucket versioning command requires command type, bucket name"
return 1
fi
local get_result=0
if [[ $1 == 's3api' ]]; then
versioning=$(send_command aws --no-verify-ssl s3api get-bucket-versioning --bucket "$2" 2>&1) || get_result=$?
error=$(aws --no-verify-ssl s3api get-bucket-versioning --bucket "$2" 2>&1) || get_result=$?
fi
if [[ $get_result -ne 0 ]]; then
log 2 "error getting bucket versioning: $versioning"
return 1
fi
return 0
}
get_bucket_versioning_rest() {
log 6 "get_object_rest"
if [ $# -ne 1 ]; then
log 2 "'get_bucket_versioning_rest' requires bucket name"
return 1
fi
if ! result=$(COMMAND_LOG=$COMMAND_LOG BUCKET_NAME=$1 OUTPUT_FILE="$TEST_FILE_FOLDER/versioning.txt" ./tests/rest_scripts/get_bucket_versioning.sh); then
log 2 "error getting bucket versioning: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "get-bucket-versioning returned code $result: $(cat "$TEST_FILE_FOLDER/versioning.txt")"
log 2 "error getting bucket versioning: $error"
return 1
fi
return 0

View File

@@ -23,15 +23,13 @@ get_object() {
fi
local exit_code=0
if [[ $1 == 's3' ]]; then
get_object_error=$(send_command aws --no-verify-ssl s3 mv "s3://$2/$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
get_object_error=$(send_command aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
get_object_error=$(aws --no-verify-ssl s3 mv "s3://$2/$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
get_object_error=$(aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
get_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
get_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == 'mc' ]]; then
get_object_error=$(send_command mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == 'rest' ]]; then
get_object_rest "$2" "$3" "$4" || exit_code=$?
get_object_error=$(mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
else
log 2 "'get object' command not implemented for '$1'"
return 1
@@ -50,7 +48,8 @@ get_object_with_range() {
log 2 "'get object with range' requires bucket, key, range, outfile"
return 1
fi
if ! get_object_error=$(send_command aws --no-verify-ssl s3api get-object --bucket "$1" --key "$2" --range "$3" "$4" 2>&1); then
get_object_error=$(aws --no-verify-ssl s3api get-object --bucket "$1" --key "$2" --range "$3" "$4" 2>&1) || local exit_code=$?
if [[ $exit_code -ne 0 ]]; then
log 2 "error getting object with range: $get_object_error"
return 1
fi
@@ -66,13 +65,13 @@ get_object_with_user() {
fi
local exit_code=0
if [[ $1 == 's3' ]] || [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
get_object_error=$(AWS_ACCESS_KEY_ID="$5" AWS_SECRET_ACCESS_KEY="$6" send_command aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
get_object_error=$(AWS_ACCESS_KEY_ID="$5" AWS_SECRET_ACCESS_KEY="$6" aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == "s3cmd" ]]; then
log 5 "s3cmd filename: $3"
get_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --access_key="$5" --secret_key="$6" get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
get_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate --access_key="$5" --secret_key="$6" get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == "mc" ]]; then
log 5 "save location: $4"
get_object_error=$(send_command mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
get_object_error=$(mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
else
log 2 "'get_object_with_user' not implemented for client '$1'"
return 1
@@ -84,45 +83,3 @@ get_object_with_user() {
fi
return 0
}
get_object_rest() {
log 6 "get_object_rest"
if [ $# -ne 3 ]; then
log 2 "'get_object_rest' requires bucket name, object name, output file"
return 1
fi
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="GET
/$1/$2
host:$aws_endpoint_url_address
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
UNSIGNED-PAYLOAD"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -w "%{http_code}" -ks "$header://$aws_endpoint_url_address/$1/$2" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: UNSIGNED-PAYLOAD" \
-H "x-amz-date: $current_date_time" \
-o "$3" 2>&1)
log 5 "reply: $reply"
if [[ "$reply" != "200" ]]; then
log 2 "get object command returned error: $(cat "$3")"
return 1
fi
return 0
}

View File

@@ -20,7 +20,7 @@ get_object_attributes() {
log 2 "'get object attributes' command requires bucket, key"
return 1
fi
attributes=$(send_command aws --no-verify-ssl s3api get-object-attributes --bucket "$1" --key "$2" --object-attributes "ObjectSize" 2>&1) || local get_result=$?
attributes=$(aws --no-verify-ssl s3api get-object-attributes --bucket "$1" --key "$2" --object-attributes "ObjectSize" 2>&1) || local get_result=$?
if [[ $get_result -ne 0 ]]; then
log 2 "error getting object attributes: $attributes"
return 1

View File

@@ -20,26 +20,10 @@ get_object_legal_hold() {
return 1
fi
record_command "get-object-legal-hold" "client:s3api"
legal_hold=$(send_command aws --no-verify-ssl s3api get-object-legal-hold --bucket "$1" --key "$2" 2>&1) || local get_result=$?
legal_hold=$(aws --no-verify-ssl s3api get-object-legal-hold --bucket "$1" --key "$2" 2>&1) || local get_result=$?
if [[ $get_result -ne 0 ]]; then
log 2 "error getting object legal hold: $legal_hold"
return 1
fi
return 0
}
get_object_legal_hold_rest() {
if [ $# -ne 2 ]; then
log 2 "'get_object_legal_hold_rest' requires bucket, key"
return 1
fi
if ! result=$(COMMAND_LOG=$COMMAND_LOG BUCKET_NAME=$1 OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/legal_hold.txt" ./tests/rest_scripts/get_object_legal_hold.sh); then
log 2 "error getting object legal hold: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "get-object-legal-hold returned code $result: $(cat "$TEST_FILE_FOLDER/legal_hold.txt")"
return 1
fi
return 0
}

View File

@@ -20,7 +20,7 @@ get_object_lock_configuration() {
log 2 "'get object lock configuration' command missing bucket name"
return 1
fi
if ! lock_config=$(send_command aws --no-verify-ssl s3api get-object-lock-configuration --bucket "$1" 2>&1); then
if ! lock_config=$(aws --no-verify-ssl s3api get-object-lock-configuration --bucket "$1" 2>&1); then
log 2 "error obtaining lock config: $lock_config"
# shellcheck disable=SC2034
get_object_lock_config_err=$lock_config
@@ -28,21 +28,4 @@ get_object_lock_configuration() {
fi
lock_config=$(echo "$lock_config" | grep -v "InsecureRequestWarning")
return 0
}
get_object_lock_configuration_rest() {
log 6 "get_object_lock_configuration_rest"
if [ $# -ne 1 ]; then
log 2 "'get_object_lock_configuration_rest' requires bucket name"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/object-lock-config.txt" ./tests/rest_scripts/get_object_lock_config.sh); then
log 2 "error getting lock configuration: $result"
return 1
fi
if [[ "$result" != "200" ]]; then
log 2 "expected '200', returned '$result': $(cat "$TEST_FILE_FOLDER/object-lock-config.txt")"
return 1
fi
return 0
}

View File

@@ -20,57 +20,11 @@ get_object_retention() {
log 2 "'get object retention' command requires bucket, key"
return 1
fi
if ! retention=$(send_command aws --no-verify-ssl s3api get-object-retention --bucket "$1" --key "$2" 2>&1); then
if ! retention=$(aws --no-verify-ssl s3api get-object-retention --bucket "$1" --key "$2" 2>&1); then
log 2 "error getting object retention: $retention"
get_object_retention_error=$retention
export get_object_retention_error
return 1
fi
return 0
}
get_object_retention_rest() {
if [ $# -ne 2 ]; then
log 2 "'get_object_tagging_rest' requires bucket, key"
return 1
fi
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="GET
/$1/$2
retention=
host:$aws_endpoint_url_address
x-amz-content-sha256:$payload_hash
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
$payload_hash"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -ks -w "%{http_code}" "$header://$aws_endpoint_url_address/$1/$2?retention" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: $payload_hash" \
-H "x-amz-date: $current_date_time" \
-o "$TEST_FILE_FOLDER"/object_retention.txt 2>&1)
log 5 "reply status code: $reply"
if [[ "$reply" != "200" ]]; then
if [ "$reply" == "404" ]; then
return 1
fi
log 2 "reply error: $reply"
log 2 "get object retention command returned error: $(cat "$TEST_FILE_FOLDER"/object_retention.txt)"
return 2
fi
log 5 "object tags: $(cat "$TEST_FILE_FOLDER"/object_retention.txt)"
return 0
}
}

View File

@@ -21,12 +21,10 @@ get_object_tagging() {
return 1
fi
local result
if [[ $1 == 's3api' ]]; then
tags=$(send_command aws --no-verify-ssl s3api get-object-tagging --bucket "$2" --key "$3" 2>&1) || result=$?
elif [[ "$1" == 'mc' ]]; then
tags=$(send_command mc --insecure tag list "$MC_ALIAS"/"$2"/"$3" 2>&1) || result=$?
elif [ "$1" == 'rest' ]; then
get_object_tagging_rest "$2" "$3" || result=$?
if [[ $1 == 'aws' ]]; then
tags=$(aws --no-verify-ssl s3api get-object-tagging --bucket "$2" --key "$3" 2>&1) || result=$?
elif [[ $1 == 'mc' ]]; then
tags=$(mc --insecure tag list "$MC_ALIAS"/"$2"/"$3" 2>&1) || result=$?
else
log 2 "invalid command type $1"
return 1
@@ -43,50 +41,4 @@ get_object_tagging() {
tags=$(echo "$tags" | grep -v "InsecureRequestWarning")
fi
export tags
}
get_object_tagging_rest() {
if [ $# -ne 2 ]; then
log 2 "'get_object_tagging' requires bucket, key"
return 1
fi
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="GET
/$1/$2
tagging=
host:$aws_endpoint_url_address
x-amz-content-sha256:$payload_hash
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
$payload_hash"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -ks -w "%{http_code}" "$header://$aws_endpoint_url_address/$1/$2?tagging" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: $payload_hash" \
-H "x-amz-date: $current_date_time" \
-o "$TEST_FILE_FOLDER"/object_tags.txt 2>&1)
log 5 "reply status code: $reply"
if [[ "$reply" != "200" ]]; then
if [ "$reply" == "404" ]; then
return 1
fi
log 2 "reply error: $reply"
log 2 "get object tagging command returned error: $(cat "$TEST_FILE_FOLDER"/object_tags.txt)"
return 2
fi
log 5 "object tags: $(cat "$TEST_FILE_FOLDER"/object_tags.txt)"
return 0
}

View File

@@ -29,14 +29,14 @@ head_bucket() {
return 1
fi
local exit_code=0
if [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
bucket_info=$(send_command aws --no-verify-ssl s3api head-bucket --bucket "$2" 2>&1) || exit_code=$?
if [[ $1 == "aws" ]] || [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
bucket_info=$(aws --no-verify-ssl s3api head-bucket --bucket "$2" 2>&1) || exit_code=$?
elif [[ $1 == "s3cmd" ]]; then
bucket_info=$(send_command s3cmd --no-check-certificate info "s3://$2" 2>&1) || exit_code=$?
bucket_info=$(s3cmd --no-check-certificate info "s3://$2" 2>&1) || exit_code=$?
elif [[ $1 == 'mc' ]]; then
bucket_info=$(send_command mc --insecure stat "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
bucket_info=$(mc --insecure stat "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
else
log 2 "invalid command type $1"
fail "invalid command type $1"
fi
if [ $exit_code -ne 0 ]; then
if [[ "$bucket_info" == *"404"* ]] || [[ "$bucket_info" == *"does not exist"* ]]; then
@@ -45,7 +45,5 @@ head_bucket() {
log 2 "error getting bucket info: $bucket_info"
return 2
fi
bucket_info="$(echo -n "$bucket_info" | grep -v "InsecureRequestWarning")"
echo "$bucket_info"
return 0
}

Some files were not shown because too many files have changed in this diff Show More