Compare commits

..

4 Commits

Author SHA1 Message Date
Ben McClelland
7086579590 fix: list objects trim common prefixes that match marker prefix
This checks to see if the common prefix is before the marker and
thus would have been returned in earlier list objects request.

The error case was aws cli listing multiple entries for the same
common prefix when the listing required multiple pagination
requests.

Fixes #778
2024-09-16 14:55:55 -07:00
jonaustin09
dea5e0c0b2 feat: Implemented object versioning for multipart uploads. Implemented integration tests for the versioning implementation for multipart uploads 2024-09-16 14:51:22 -07:00
jonaustin09
16995acc17 feat: Added integration tests for bucket object versioning. Made a couple of bug fixes in the versioning implementation 2024-09-16 14:51:20 -07:00
jonaustin09
1f41f91f2d feat: basic logic implementation of bucket object versioning in posix backend
New posix backend option --versioning-dir will enable storing object versions
in specified directory.
2024-09-16 14:42:24 -07:00
302 changed files with 10697 additions and 41649 deletions

25
.github/SECURITY.md vendored
View File

@@ -1,25 +0,0 @@
# Security Policy
## Reporting a Vulnerability
If you discover a security vulnerability in `versitygw`, we strongly encourage you to report it privately and responsibly.
Please do **not** create public issues or pull requests that contain details about the vulnerability.
Instead, report the issue using GitHub's private **Security Advisories** feature:
- Go to [versitygw's Security Advisories page](https://github.com/versity/versitygw/security/advisories)
- Click on **"Report a vulnerability"**
We aim to respond within **2 business days** and work with you to quickly resolve the issue.
## Supported Versions
| Version | Supported |
| --------------- | --------- |
| Latest (v1.x.x) | ✅ |
| Older versions | ❌ |
## Responsible Disclosure
We appreciate responsible disclosures and are committed to fixing vulnerabilities in a timely manner. Thank you for helping keep `versitygw` secure.

View File

@@ -1,37 +0,0 @@
name: azurite functional tests
permissions: {}
on: pull_request
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 'stable'
id: go
- name: Set up Docker Compose
run: |
docker compose -f tests/docker-compose.yml --env-file .env.dev --project-directory . up -d azurite azuritegw
- name: Wait for Azurite to be ready
run: sleep 40
- name: Get Dependencies
run: |
go mod download
- name: Build and Run
run: |
make
./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow --azure
- name: Shut down services
run: |
docker compose -f tests/docker-compose.yml --env-file .env.dev --project-directory . down azurite azuritegw

28
.github/workflows/docker-bats.yaml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: docker bats tests
on: pull_request
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image
run: |
mv tests/.env.docker.default tests/.env.docker
mv tests/.secrets.default tests/.secrets
docker build --build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" --build-arg="MC_FOLDER=linux-amd64" \
--progress=plain -f tests/Dockerfile_test_bats -t bats_test .
- name: Set up Docker Compose
run: sudo apt-get install -y docker-compose
- name: Run Docker Container
run: docker-compose -f tests/docker-compose-bats.yml up --exit-code-from posix_backend posix_backend

View File

@@ -1,29 +0,0 @@
name: docker bats tests
permissions: {}
on: pull_request
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build Docker Image
run: |
cp tests/.env.docker.default tests/.env.docker
cp tests/.secrets.default tests/.secrets
# see https://github.com/versity/versitygw/issues/1034
docker build \
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \
--build-arg="MC_FOLDER=linux-amd64" \
--progress=plain \
-f tests/Dockerfile_test_bats \
-t bats_test .
- name: Run Docker Container
run: |
docker compose -f tests/docker-compose-bats.yml --project-directory . \
up --exit-code-from s3api_np_only s3api_np_only

View File

@@ -1,4 +1,5 @@
name: Publish Docker image
on:
release:
types: [published]

View File

@@ -1,8 +1,7 @@
name: functional tests
permissions: {}
on: pull_request
jobs:
build:
name: RunTests
runs-on: ubuntu-latest
@@ -19,7 +18,7 @@ jobs:
- name: Get Dependencies
run: |
go mod download
go get -v -t -d ./...
- name: Build and Run
run: |

View File

@@ -1,10 +1,9 @@
name: general
permissions: {}
on: pull_request
jobs:
build:
name: Go Basic Checks
name: Build
runs-on: ubuntu-latest
steps:
@@ -24,6 +23,9 @@ jobs:
run: |
go get -v -t -d ./...
- name: Build
run: make
- name: Test
run: go test -coverprofile profile.txt -race -v -timeout 30s -tags=github ./...
@@ -33,26 +35,4 @@ jobs:
- name: Run govulncheck
run: govulncheck ./...
shell: bash
verify-build:
name: Verify Build Targets
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
os: [darwin, freebsd, linux]
arch: [amd64, arm64]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: Build for ${{ matrix.os }}/${{ matrix.arch }}
run: |
GOOS=${{ matrix.os }} GOARCH=${{ matrix.arch }} go build -o versitygw-${{ matrix.os }}-${{ matrix.arch }} cmd/versitygw/*.go
shell: bash

View File

@@ -1,12 +1,16 @@
name: goreleaser
permissions:
contents: write
on:
push:
# run only against tags
tags:
- '*'
permissions:
contents: write
# packages: write
# issues: write
jobs:
goreleaser:
runs-on: ubuntu-latest
@@ -25,10 +29,10 @@ jobs:
go-version: stable
- name: Run Releaser
uses: goreleaser/goreleaser-action@v6
uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser
version: '~> v2'
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}

View File

@@ -1,13 +0,0 @@
name: host style tests
permissions: {}
on: pull_request
jobs:
build-and-run:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: run host-style tests
run: make test-host-style

View File

@@ -1,5 +1,4 @@
name: shellcheck
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,4 @@
name: staticcheck
permissions: {}
on: pull_request
jobs:

View File

@@ -1,5 +1,4 @@
name: system tests
permissions: {}
on: pull_request
jobs:
build:
@@ -9,118 +8,115 @@ jobs:
fail-fast: false
matrix:
include:
- set: "mc, posix, non-file count, non-static, folder IAM"
- set: "s3cmd, posix"
LOCAL_FOLDER: /tmp/gw1
BUCKET_ONE_NAME: versity-gwtest-bucket-one-1
BUCKET_TWO_NAME: versity-gwtest-bucket-two-1
IAM_TYPE: folder
RUN_SET: "mc-non-file-count"
USERS_FOLDER: /tmp/iam1
AWS_ENDPOINT_URL: https://127.0.0.1:7070
RUN_SET: "s3cmd"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
PORT: 7070
BACKEND: "posix"
- set: "mc, posix, file count, non-static, folder IAM"
- set: "s3, posix"
LOCAL_FOLDER: /tmp/gw2
BUCKET_ONE_NAME: versity-gwtest-bucket-one-2
BUCKET_TWO_NAME: versity-gwtest-bucket-two-2
IAM_TYPE: folder
RUN_SET: "mc-file-count"
USERS_FOLDER: /tmp/iam2
AWS_ENDPOINT_URL: https://127.0.0.1:7071
RUN_SET: "s3"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
PORT: 7071
BACKEND: "posix"
- set: "REST, posix, non-static, base|acl|multipart, folder IAM"
- set: "s3api, posix"
LOCAL_FOLDER: /tmp/gw3
BUCKET_ONE_NAME: versity-gwtest-bucket-one-3
BUCKET_TWO_NAME: versity-gwtest-bucket-two-3
IAM_TYPE: folder
RUN_SET: "rest-base,rest-acl,rest-multipart"
USERS_FOLDER: /tmp/iam3
AWS_ENDPOINT_URL: https://127.0.0.1:7072
RUN_SET: "s3api"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
PORT: 7072
BACKEND: "posix"
- set: "REST, posix, non-static, chunked|checksum|versioning|bucket, folder IAM"
- set: "mc, posix"
LOCAL_FOLDER: /tmp/gw4
BUCKET_ONE_NAME: versity-gwtest-bucket-one-4
BUCKET_TWO_NAME: versity-gwtest-bucket-two-4
IAM_TYPE: folder
RUN_SET: "rest-chunked,rest-checksum,rest-versioning,rest-bucket"
USERS_FOLDER: /tmp/iam4
AWS_ENDPOINT_URL: https://127.0.0.1:7073
RUN_SET: "mc"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
PORT: 7073
BACKEND: "posix"
- set: "s3, posix, non-file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-non-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, bucket|object|multipart, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, policy, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3api, posix, user, non-static, s3 IAM"
- set: "s3api-user, posix, s3 IAM"
LOCAL_FOLDER: /tmp/gw5
BUCKET_ONE_NAME: versity-gwtest-bucket-one-5
BUCKET_TWO_NAME: versity-gwtest-bucket-two-5
IAM_TYPE: s3
USERS_BUCKET: versity-gwtest-iam
AWS_ENDPOINT_URL: https://127.0.0.1:7074
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
PORT: 7074
BACKEND: "posix"
- set: "s3api, posix, bucket, static, folder IAM"
- set: "s3api non-policy, static buckets"
LOCAL_FOLDER: /tmp/gw6
BUCKET_ONE_NAME: versity-gwtest-bucket-one-6
BUCKET_TWO_NAME: versity-gwtest-bucket-two-6
IAM_TYPE: folder
RUN_SET: "s3api-bucket"
USERS_FOLDER: /tmp/iam6
AWS_ENDPOINT_URL: https://127.0.0.1:7075
RUN_SET: "s3api-non-policy"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
PORT: 7075
BACKEND: "posix"
- set: "s3api, posix, multipart, static, folder IAM"
- set: "s3api, s3 backend"
LOCAL_FOLDER: /tmp/gw7
BUCKET_ONE_NAME: versity-gwtest-bucket-one-7
BUCKET_TWO_NAME: versity-gwtest-bucket-two-7
IAM_TYPE: folder
RUN_SET: "s3api-multipart"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
USERS_FOLDER: /tmp/iam7
AWS_ENDPOINT_URL: https://127.0.0.1:7076
RUN_SET: "s3api"
RECREATE_BUCKETS: "true"
PORT: 7076
BACKEND: "s3"
- set: "REST, posix"
LOCAL_FOLDER: /tmp/gw8
BUCKET_ONE_NAME: versity-gwtest-bucket-one-7
BUCKET_TWO_NAME: versity-gwtest-bucket-two-7
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam8
AWS_ENDPOINT_URL: https://127.0.0.1:7077
RUN_SET: "rest"
RECREATE_BUCKETS: "true"
PORT: 7077
BACKEND: "posix"
- set: "s3api, posix, object, static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-object"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
- set: "s3api, posix, policy, static, folder IAM"
- set: "s3api policy, static buckets"
LOCAL_FOLDER: /tmp/gw9
BUCKET_ONE_NAME: versity-gwtest-bucket-one-8
BUCKET_TWO_NAME: versity-gwtest-bucket-two-8
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam9
AWS_ENDPOINT_URL: https://127.0.0.1:7078
RUN_SET: "s3api-policy"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
PORT: 7078
BACKEND: "posix"
- set: "s3api, posix, user, static, folder IAM"
- set: "s3api user, static buckets"
LOCAL_FOLDER: /tmp/gw10
BUCKET_ONE_NAME: versity-gwtest-bucket-one-9
BUCKET_TWO_NAME: versity-gwtest-bucket-two-9
IAM_TYPE: folder
USERS_FOLDER: /tmp/iam10
AWS_ENDPOINT_URL: https://127.0.0.1:7079
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "false"
DELETE_BUCKETS_AFTER_TEST: "false"
BACKEND: "posix"
# TODO fix/debug s3 gateway
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
# IAM_TYPE: folder
# RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
# RECREATE_BUCKETS: "true"
# BACKEND: "s3"
#- set: "s3api, s3, policy|user, non-static, folder IAM"
# IAM_TYPE: folder
# RUN_SET: "s3api-policy,s3api-user"
# RECREATE_BUCKETS: "true"
# BACKEND: "s3"
- set: "s3cmd, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-file-count"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3cmd, posix, non-user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-non-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
BACKEND: "posix"
- set: "s3cmd, posix, user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-user"
RECREATE_BUCKETS: "true"
DELETE_BUCKETS_AFTER_TEST: "true"
PORT: 7079
BACKEND: "posix"
steps:
- name: Check out code into the Go module directory
@@ -129,7 +125,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "stable"
go-version: 'stable'
id: go
- name: Get Dependencies
@@ -145,7 +141,6 @@ jobs:
- name: Install s3cmd
run: |
sudo apt-get update
sudo apt-get install s3cmd
- name: Install mc
@@ -153,38 +148,28 @@ jobs:
curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o /usr/local/bin/mc
chmod 755 /usr/local/bin/mc
- name: Install xml libraries (for rest)
- name: Install xmllint (for rest)
run: |
sudo apt-get update
sudo apt-get install libxml2-utils xmlstarlet
sudo apt-get install libxml2-utils
# see https://github.com/versity/versitygw/issues/1034
- name: Install AWS cli
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.22.35.zip" -o "awscliv2.zip"
unzip -o awscliv2.zip
./aws/install -i ${{ github.workspace }}/aws-cli -b ${{ github.workspace }}/bin
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Build and run
- name: Build and run, posix backend
env:
LOCAL_FOLDER: ${{ matrix.LOCAL_FOLDER }}
BUCKET_ONE_NAME: ${{ matrix.BUCKET_ONE_NAME }}
BUCKET_TWO_NAME: ${{ matrix.BUCKET_TWO_NAME }}
USERS_FOLDER: ${{ matrix.USERS_FOLDER }}
USERS_BUCKET: ${{ matrix.USERS_BUCKET }}
IAM_TYPE: ${{ matrix.IAM_TYPE }}
AWS_ENDPOINT_URL: ${{ matrix.AWS_ENDPOINT_URL }}
RUN_SET: ${{ matrix.RUN_SET }}
PORT: ${{ matrix.PORT }}
AWS_PROFILE: versity
VERSITY_EXE: ${{ github.workspace }}/versitygw
RUN_VERSITYGW: true
BACKEND: ${{ matrix.BACKEND }}
RECREATE_BUCKETS: ${{ matrix.RECREATE_BUCKETS }}
DELETE_BUCKETS_AFTER_TEST: ${{ matrix.DELETE_BUCKETS_AFTER_TEST }}
CERT: ${{ github.workspace }}/cert.pem
KEY: ${{ github.workspace }}/versitygw.pem
LOCAL_FOLDER: /tmp/gw
BUCKET_ONE_NAME: versity-gwtest-bucket-one
BUCKET_TWO_NAME: versity-gwtest-bucket-two
USERS_FOLDER: /tmp/iam
USERS_BUCKET: versity-gwtest-iam
AWS_ENDPOINT_URL: https://127.0.0.1:7070
PORT: 7070
S3CMD_CONFIG: tests/s3cfg.local.default
MC_ALIAS: versity
LOG_LEVEL: 4
@@ -194,13 +179,6 @@ jobs:
USERNAME_TWO: HIJKLMN
PASSWORD_TWO: 8901234
TEST_FILE_FOLDER: ${{ github.workspace }}/versity-gwtest-files
REMOVE_TEST_FILE_FOLDER: true
VERSIONING_DIR: ${{ github.workspace }}/versioning
COMMAND_LOG: command.log
TIME_LOG: time.log
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
AUTOGENERATE_USERS: true
USER_AUTOGENERATION_PREFIX: github-actions-test-
run: |
make testbin
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
@@ -208,7 +186,6 @@ jobs:
export AWS_REGION=us-east-1
export AWS_ACCESS_KEY_ID_TWO=user
export AWS_SECRET_ACCESS_KEY_TWO=pass
export AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity
aws configure set aws_region $AWS_REGION --profile versity
@@ -222,9 +199,6 @@ jobs:
fi
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/run.sh $RUN_SET
- name: Time report
run: cat ${{ github.workspace }}/time.log
- name: Coverage report
run: |
go tool covdata percent -i=cover

6
.gitignore vendored
View File

@@ -62,8 +62,4 @@ tests/!s3cfg.local.default
*.patch
# grafana's local database (kept on filesystem for survival between instantiations)
metrics-exploration/grafana_data/**
# bats tools
/tests/bats-assert
/tests/bats-support
metrics-exploration/grafana_data/**

View File

@@ -1,5 +1,3 @@
version: 2
before:
hooks:
- go mod tidy
@@ -25,7 +23,7 @@ builds:
- -X=main.Build={{.Commit}} -X=main.BuildTime={{.Date}} -X=main.Version={{.Version}}
archives:
- formats: [ 'tar.gz' ]
- format: tar.gz
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
{{ .ProjectName }}_v{{ .Version }}_
@@ -45,7 +43,7 @@ archives:
# use zip for windows archives
format_overrides:
- goos: windows
formats: [ 'zip' ]
format: zip
# Additional files/globs you want to add to the archive.
#
@@ -60,7 +58,7 @@ checksum:
name_template: 'checksums.txt'
snapshot:
version_template: "{{ incpatch .Version }}-{{.ShortCommit}}"
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
@@ -88,7 +86,7 @@ nfpms:
license: Apache 2.0
ids:
builds:
- versitygw
formats:

View File

@@ -18,10 +18,6 @@ GOBUILD=$(GOCMD) build
GOCLEAN=$(GOCMD) clean
GOTEST=$(GOCMD) test
# docker-compose
DCCMD=docker-compose
DOCKERCOMPOSE=$(DCCMD) -f tests/docker-compose.yml --env-file .env.dev --project-directory .
BIN=versitygw
VERSION := $(shell if test -e VERSION; then cat VERSION; else git describe --abbrev=0 --tags HEAD; fi)
@@ -72,33 +68,22 @@ dist:
rm -f VERSION
gzip -f $(TARFILE)
.PHONY: snapshot
snapshot:
# brew install goreleaser/tap/goreleaser
goreleaser release --snapshot --skip publish --clean
# Creates and runs S3 gateway instance in a docker container
.PHONY: up-posix
up-posix:
$(DOCKERCOMPOSE) up posix
docker compose --env-file .env.dev up posix
# Creates and runs S3 gateway proxy instance in a docker container
.PHONY: up-proxy
up-proxy:
$(DOCKERCOMPOSE) up proxy
docker compose --env-file .env.dev up proxy
# Creates and runs S3 gateway to azurite instance in a docker container
.PHONY: up-azurite
up-azurite:
$(DOCKERCOMPOSE) up azurite azuritegw
docker compose --env-file .env.dev up azurite azuritegw
# Creates and runs both S3 gateway and proxy server instances in docker containers
.PHONY: up-app
up-app:
$(DOCKERCOMPOSE) up
# Run the host-style tests in docker containers
.PHONY: test-host-style
test-host-style:
docker compose -f tests/host-style-tests/docker-compose.yml up --build --abort-on-container-exit --exit-code-from test
docker compose --env-file .env.dev up

View File

@@ -40,7 +40,7 @@ Versity Gateway, a simple to use tool for seamless inline translation between AW
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage, Versitys open source ScoutFS filesystem, Azure Blob Storage, and other S3 servers.
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage and Versitys open source ScoutFS filesystem.
The gateway is completely stateless. Multiple Versity Gateway instances may be deployed in a cluster to increase aggregate throughput. The Versity Gateways stateless architecture allows any request to be serviced by any gateway thereby distributing workloads and enhancing performance. Load balancers may be used to evenly distribute requests across the cluster of gateways for optimal performance.

View File

@@ -1,201 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"context"
"encoding/json"
"errors"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
)
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
// Verify destination bucket access
if err := VerifyAccess(ctx, be, opts); err != nil {
return err
}
// Verify source bucket access
srcBucket, srcObject, found := strings.Cut(copySource, "/")
if !found {
return s3err.GetAPIError(s3err.ErrInvalidCopySource)
}
// Get source bucket ACL
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
if err != nil {
return err
}
var srcBucketAcl ACL
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
return err
}
if err := VerifyAccess(ctx, be, AccessOptions{
Acl: srcBucketAcl,
AclPermission: PermissionRead,
IsRoot: opts.IsRoot,
Acc: opts.Acc,
Bucket: srcBucket,
Object: srcObject,
Action: GetObjectAction,
}); err != nil {
return err
}
return nil
}
type AccessOptions struct {
Acl ACL
AclPermission Permission
IsRoot bool
Acc Account
Bucket string
Object string
Action Action
Readonly bool
IsBucketPublic bool
}
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
// Skip the access check for public buckets
if opts.IsBucketPublic {
return nil
}
if opts.Readonly {
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
if policyErr != nil {
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
return policyErr
}
} else {
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
}
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
return err
}
return nil
}
// Detects if the action is policy related
// e.g.
// 'GetBucketPolicy', 'PutBucketPolicy'
func isPolicyAction(action Action) bool {
return action == GetBucketPolicyAction || action == PutBucketPolicyAction
}
// VerifyPublicAccess checks if the bucket is publically accessible by ACL or Policy
func VerifyPublicAccess(ctx context.Context, be backend.Backend, action Action, permission Permission, bucket, object string) error {
// ACL disabled
policy, err := be.GetBucketPolicy(ctx, bucket)
if err != nil && !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
return err
}
if err == nil {
err = VerifyPublicBucketPolicy(policy, bucket, object, action)
if err == nil {
// if ACLs are disabled, and the bucket grants public access,
// policy actions should return 'MethodNotAllowed'
if isPolicyAction(action) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil
}
}
// if the action is not in the ACL whitelist the access is denied
_, ok := publicACLAllowedActions[action]
if !ok {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
err = VerifyPublicBucketACL(ctx, be, bucket, action, permission)
if err != nil {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
return nil
}
func MayCreateBucket(acct Account, isRoot bool) error {
if isRoot {
return nil
}
if acct.Role == RoleUser {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
return nil
}
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
// Owner check
if acct.Access == acl.Owner {
return nil
}
// Root user has access over almost everything
if isRoot {
return nil
}
// Admin user case
if acct.Role == RoleAdmin {
return nil
}
// Return access denied in all other cases
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
type PublicACLAllowedActions map[Action]struct{}
var publicACLAllowedActions PublicACLAllowedActions = PublicACLAllowedActions{
ListBucketAction: struct{}{},
PutObjectAction: struct{}{},
ListBucketMultipartUploadsAction: struct{}{},
DeleteObjectAction: struct{}{},
ListBucketVersionsAction: struct{}{},
GetObjectAction: struct{}{},
GetObjectAttributesAction: struct{}{},
GetObjectAclAction: struct{}{},
}

View File

@@ -17,7 +17,6 @@ package auth
import (
"context"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"strings"
@@ -33,25 +32,13 @@ type ACL struct {
Grantees []Grantee
}
// IsPublic specifies if the acl grants public read access
func (acl *ACL) IsPublic(permission Permission) bool {
for _, grt := range acl.Grantees {
if grt.Permission == permission && grt.Type == types.TypeGroup && grt.Access == "all-users" {
return true
}
}
return false
}
type Grantee struct {
Permission Permission
Permission types.Permission
Access string
Type types.Type
}
type GetBucketAclOutput struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy"`
Owner *types.Owner
AccessControlList AccessControlList
}
@@ -72,124 +59,20 @@ type AccessControlPolicy struct {
Owner *types.Owner
}
func (acp *AccessControlPolicy) Validate() error {
if !acp.AccessControlList.isValid() {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
// The Owner can't be nil
if acp.Owner == nil {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
// The Owner ID can't be empty
if acp.Owner.ID == nil || *acp.Owner.ID == "" {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
return nil
}
type AccessControlList struct {
Grants []Grant `xml:"Grant"`
}
// Validates the AccessControlList
func (acl *AccessControlList) isValid() bool {
for _, el := range acl.Grants {
if !el.isValid() {
return false
}
}
return true
}
type Permission string
const (
PermissionFullControl Permission = "FULL_CONTROL"
PermissionWrite Permission = "WRITE"
PermissionWriteAcp Permission = "WRITE_ACP"
PermissionRead Permission = "READ"
PermissionReadAcp Permission = "READ_ACP"
)
// Check if the permission is valid
func (p Permission) isValid() bool {
return p == PermissionFullControl ||
p == PermissionRead ||
p == PermissionReadAcp ||
p == PermissionWrite ||
p == PermissionWriteAcp
}
type Grant struct {
Grantee *Grt `xml:"Grantee"`
Permission Permission `xml:"Permission"`
}
// Checks if Grant is valid
func (g *Grant) isValid() bool {
return g.Permission.isValid() && g.Grantee.isValid()
Grantee *Grt
Permission types.Permission
}
type Grt struct {
XMLNS string `xml:"xmlns:xsi,attr"`
Type types.Type `xml:"xsi:type,attr"`
ID string `xml:"ID"`
}
// Custom Unmarshalling for Grt to parse xsi:type properly
func (g *Grt) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Iterate through the XML tokens to process the attributes
for _, attr := range start.Attr {
// Check if the attribute is xsi:type and belongs to the xsi namespace
if attr.Name.Space == "http://www.w3.org/2001/XMLSchema-instance" && attr.Name.Local == "type" {
g.Type = types.Type(attr.Value)
}
// Handle xmlns:xsi
if attr.Name.Local == "xmlns:xsi" {
g.XMLNS = attr.Value
}
}
// Decode the inner XML elements like ID
for {
t, err := d.Token()
if err != nil {
return err
}
switch se := t.(type) {
case xml.StartElement:
if se.Name.Local == "ID" {
if err := d.DecodeElement(&g.ID, &se); err != nil {
return err
}
}
case xml.EndElement:
if se.Name.Local == start.Name.Local {
return nil
}
}
}
}
// Validates Grt
func (g *Grt) isValid() bool {
// Validate the Type
// Only these 2 types are supported in the gateway
if g.Type != types.TypeCanonicalUser && g.Type != types.TypeGroup {
return false
}
// The ID prop shouldn't be empty
if g.ID == "" {
return false
}
return true
XMLNS string `xml:"xmlns:xsi,attr"`
XMLXSI types.Type `xml:"xsi:type,attr"`
Type types.Type `xml:"Type"`
ID string `xml:"ID"`
}
func ParseACL(data []byte) (ACL, error) {
@@ -204,32 +87,22 @@ func ParseACL(data []byte) (ACL, error) {
return acl, nil
}
func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
grants := []Grant{}
if len(data) == 0 {
return GetBucketAclOutput{
Owner: &types.Owner{
ID: &owner,
},
AccessControlList: AccessControlList{
Grants: grants,
},
}, nil
}
func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
var acl ACL
if err := json.Unmarshal(data, &acl); err != nil {
return GetBucketAclOutput{}, fmt.Errorf("parse acl: %w", err)
}
grants := []Grant{}
for _, elem := range acl.Grantees {
acs := elem.Access
grants = append(grants, Grant{
Grantee: &Grt{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
ID: acs,
Type: elem.Type,
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: elem.Type,
ID: acs,
Type: elem.Type,
},
Permission: elem.Permission,
})
@@ -252,7 +125,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
defaultGrantees := []Grantee{
{
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Access: acl.Owner,
Type: types.TypeCanonicalUser,
},
@@ -263,19 +136,19 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
switch input.ACL {
case types.BucketCannedACLPublicRead:
defaultGrantees = append(defaultGrantees, Grantee{
Permission: PermissionRead,
Permission: types.PermissionRead,
Access: "all-users",
Type: types.TypeGroup,
})
case types.BucketCannedACLPublicReadWrite:
defaultGrantees = append(defaultGrantees, []Grantee{
{
Permission: PermissionRead,
Permission: types.PermissionRead,
Access: "all-users",
Type: types.TypeGroup,
},
{
Permission: PermissionWrite,
Permission: types.PermissionWrite,
Access: "all-users",
Type: types.TypeGroup,
},
@@ -292,7 +165,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range fullControlList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Type: types.TypeCanonicalUser,
})
}
@@ -302,7 +175,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range readList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionRead,
Permission: types.PermissionRead,
Type: types.TypeCanonicalUser,
})
}
@@ -312,7 +185,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range readACPList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionReadAcp,
Permission: types.PermissionReadAcp,
Type: types.TypeCanonicalUser,
})
}
@@ -322,7 +195,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range writeList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionWrite,
Permission: types.PermissionWrite,
Type: types.TypeCanonicalUser,
})
}
@@ -332,7 +205,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range writeACPList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionWriteAcp,
Permission: types.PermissionWriteAcp,
Type: types.TypeCanonicalUser,
})
}
@@ -389,8 +262,8 @@ func CheckIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
result = append(result, acc)
continue
}
if errors.Is(err, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)) {
return nil, err
if err == ErrNotSupported {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
return nil, fmt.Errorf("check user account: %w", err)
}
@@ -413,7 +286,7 @@ func splitUnique(s, divider string) []string {
return result
}
func verifyACL(acl ACL, access string, permission Permission) error {
func verifyACL(acl ACL, access string, permission types.Permission) error {
grantee := Grantee{
Access: access,
Permission: permission,
@@ -421,7 +294,7 @@ func verifyACL(acl ACL, access string, permission Permission) error {
}
granteeFullCtrl := Grantee{
Access: access,
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Type: types.TypeCanonicalUser,
}
granteeAllUsers := Grantee{
@@ -446,50 +319,118 @@ func verifyACL(acl ACL, access string, permission Permission) error {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
// Verifies if the bucket acl grants public access
func VerifyPublicBucketACL(ctx context.Context, be backend.Backend, bucket string, action Action, permission Permission) error {
aclBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{
Bucket: &bucket,
})
if err != nil {
return err
func MayCreateBucket(acct Account, isRoot bool) error {
if isRoot {
return nil
}
acl, err := ParseACL(aclBytes)
if err != nil {
return err
}
if !acl.IsPublic(permission) {
return ErrAccessDenied
if acct.Role == RoleUser {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
return nil
}
// UpdateBucketACLOwner sets default ACL with new owner and removes
// any previous bucket policy that was in place
func UpdateBucketACLOwner(ctx context.Context, be backend.Backend, bucket, newOwner string) error {
acl := ACL{
Owner: newOwner,
Grantees: []Grantee{
{
Permission: PermissionFullControl,
Access: newOwner,
Type: types.TypeCanonicalUser,
},
},
func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
// Owner check
if acct.Access == acl.Owner {
return nil
}
result, err := json.Marshal(acl)
if err != nil {
return fmt.Errorf("marshal ACL: %w", err)
// Root user has access over almost everything
if isRoot {
return nil
}
err = be.PutBucketAcl(ctx, bucket, result)
// Admin user case
if acct.Role == RoleAdmin {
return nil
}
// Return access denied in all other cases
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
type AccessOptions struct {
Acl ACL
AclPermission types.Permission
IsRoot bool
Acc Account
Bucket string
Object string
Action Action
Readonly bool
}
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
if opts.Readonly {
if opts.AclPermission == types.PermissionWrite || opts.AclPermission == types.PermissionWriteAcp {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
policy, policyErr := be.GetBucketPolicy(ctx, opts.Bucket)
if policyErr != nil {
if !errors.Is(policyErr, s3err.GetAPIError(s3err.ErrNoSuchBucketPolicy)) {
return policyErr
}
} else {
return VerifyBucketPolicy(policy, opts.Acc.Access, opts.Bucket, opts.Object, opts.Action)
}
if err := verifyACL(opts.Acl, opts.Acc.Access, opts.AclPermission); err != nil {
return err
}
return nil
}
func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource string, opts AccessOptions) error {
if opts.IsRoot {
return nil
}
if opts.Acc.Role == RoleAdmin {
return nil
}
// Verify destination bucket access
if err := VerifyAccess(ctx, be, opts); err != nil {
return err
}
// Verify source bucket access
srcBucket, srcObject, found := strings.Cut(copySource, "/")
if !found {
return s3err.GetAPIError(s3err.ErrInvalidCopySource)
}
// Get source bucket ACL
srcBucketACLBytes, err := be.GetBucketAcl(ctx, &s3.GetBucketAclInput{Bucket: &srcBucket})
if err != nil {
return err
}
return be.DeleteBucketPolicy(ctx, bucket)
var srcBucketAcl ACL
if err := json.Unmarshal(srcBucketACLBytes, &srcBucketAcl); err != nil {
return err
}
if err := VerifyAccess(ctx, be, AccessOptions{
Acl: srcBucketAcl,
AclPermission: types.PermissionRead,
IsRoot: opts.IsRoot,
Acc: opts.Acc,
Bucket: srcBucket,
Object: srcObject,
Action: GetObjectAction,
}); err != nil {
return err
}
return nil
}

View File

@@ -22,48 +22,20 @@ import (
"github.com/versity/versitygw/s3err"
)
var ErrAccessDenied = errors.New("access denied")
type policyErr string
func (p policyErr) Error() string {
return string(p)
}
const (
policyErrResourceMismatch = policyErr("Action does not apply to any resource(s) in statement")
policyErrInvalidResource = policyErr("Policy has invalid resource")
policyErrInvalidPrincipal = policyErr("Invalid principal in policy")
policyErrInvalidAction = policyErr("Policy has invalid action")
policyErrInvalidPolicy = policyErr("This policy contains invalid Json")
policyErrInvalidFirstChar = policyErr("Policies must be valid JSON and the first byte must be '{'")
policyErrEmptyStatement = policyErr("Could not parse the policy: Statement is empty!")
policyErrMissingStatmentField = policyErr("Missing required field Statement")
var (
errResourceMismatch = errors.New("Action does not apply to any resource(s) in statement")
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
errInvalidResource = errors.New("Policy has invalid resource")
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
errInvalidPrincipal = errors.New("Invalid principal in policy")
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
errInvalidAction = errors.New("Policy has invalid action")
)
type BucketPolicy struct {
Statement []BucketPolicyItem `json:"Statement"`
}
func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
var tmp struct {
Statement *[]BucketPolicyItem `json:"Statement"`
}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
// If Statement is nil (not present in JSON), return an error
if tmp.Statement == nil {
return policyErrMissingStatmentField
}
// Assign the parsed value to the actual struct
bp.Statement = *tmp.Statement
return nil
}
func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
for _, statement := range bp.Statement {
err := statement.Validate(bucket, iam)
@@ -76,37 +48,18 @@ func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
}
func (bp *BucketPolicy) isAllowed(principal string, action Action, resource string) bool {
var isAllowed bool
for _, statement := range bp.Statement {
if statement.findMatch(principal, action, resource) {
switch statement.Effect {
case BucketPolicyAccessTypeAllow:
isAllowed = true
return true
case BucketPolicyAccessTypeDeny:
return false
}
}
}
return isAllowed
}
// isPublic checks if the bucket policy statements contain
// an entity granting public access
func (bp *BucketPolicy) isPublic(resource string, action Action) bool {
var isAllowed bool
for _, statement := range bp.Statement {
if statement.isPublic(resource, action) {
switch statement.Effect {
case BucketPolicyAccessTypeAllow:
isAllowed = true
case BucketPolicyAccessTypeDeny:
return false
}
}
}
return isAllowed
return false
}
type BucketPolicyItem struct {
@@ -136,10 +89,10 @@ func (bpi *BucketPolicyItem) Validate(bucket string, iam IAMService) error {
break
}
if *isObjectAction && !containsObjectAction {
return policyErrResourceMismatch
return errResourceMismatch
}
if !*isObjectAction && !containsBucketAction {
return policyErrResourceMismatch
return errResourceMismatch
}
}
@@ -154,11 +107,6 @@ func (bpi *BucketPolicyItem) findMatch(principal string, action Action, resource
return false
}
// isPublic checks if the bucket policy statemant grants public access
func (bpi *BucketPolicyItem) isPublic(resource string, action Action) bool {
return bpi.Principals.IsPublic() && bpi.Actions.FindMatch(action) && bpi.Resources.FindMatch(resource)
}
func getMalformedPolicyError(err error) error {
return s3err.APIError{
Code: "MalformedPolicy",
@@ -168,20 +116,14 @@ func getMalformedPolicyError(err error) error {
}
func ValidatePolicyDocument(policyBin []byte, bucket string, iam IAMService) error {
if len(policyBin) == 0 || policyBin[0] != '{' {
return getMalformedPolicyError(policyErrInvalidFirstChar)
}
var policy BucketPolicy
if err := json.Unmarshal(policyBin, &policy); err != nil {
var pe policyErr
if errors.As(err, &pe) {
return getMalformedPolicyError(err)
}
return getMalformedPolicyError(policyErrInvalidPolicy)
return getMalformedPolicyError(err)
}
if len(policy.Statement) == 0 {
return getMalformedPolicyError(policyErrEmptyStatement)
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
return getMalformedPolicyError(errors.New("Could not parse the policy: Statement is empty!"))
}
if err := policy.Validate(bucket, iam); err != nil {
@@ -208,22 +150,3 @@ func VerifyBucketPolicy(policy []byte, access, bucket, object string, action Act
return nil
}
// Checks if the bucket policy grants public access
func VerifyPublicBucketPolicy(policy []byte, bucket, object string, action Action) error {
var bucketPolicy BucketPolicy
if err := json.Unmarshal(policy, &bucketPolicy); err != nil {
return err
}
resource := bucket
if object != "" {
resource += "/" + object
}
if !bucketPolicy.isPublic(resource, action) {
return ErrAccessDenied
}
return nil
}

View File

@@ -58,8 +58,6 @@ const (
BypassGovernanceRetentionAction Action = "s3:BypassGovernanceRetention"
PutBucketOwnershipControlsAction Action = "s3:PutBucketOwnershipControls"
GetBucketOwnershipControlsAction Action = "s3:GetBucketOwnershipControls"
PutBucketCorsAction Action = "s3:PutBucketCORS"
GetBucketCorsAction Action = "s3:GetBucketCORS"
AllActions Action = "s3:*"
)
@@ -91,7 +89,6 @@ var supportedActionList = map[Action]struct{}{
DeleteObjectTaggingAction: {},
ListBucketVersionsAction: {},
ListBucketAction: {},
GetBucketObjectLockConfigurationAction: {},
PutBucketObjectLockConfigurationAction: {},
GetObjectLegalHoldAction: {},
PutObjectLegalHoldAction: {},
@@ -100,8 +97,6 @@ var supportedActionList = map[Action]struct{}{
BypassGovernanceRetentionAction: {},
PutBucketOwnershipControlsAction: {},
GetBucketOwnershipControlsAction: {},
PutBucketCorsAction: {},
GetBucketCorsAction: {},
AllActions: {},
}
@@ -130,7 +125,7 @@ var supportedObjectActionList = map[Action]struct{}{
// Validates Action: it should either wildcard match with supported actions list or be in it
func (a Action) IsValid() error {
if !strings.HasPrefix(string(a), "s3:") {
return policyErrInvalidAction
return errInvalidAction
}
if a == AllActions {
@@ -145,12 +140,12 @@ func (a Action) IsValid() error {
}
}
return policyErrInvalidAction
return errInvalidAction
}
_, found := supportedActionList[a]
if !found {
return policyErrInvalidAction
return errInvalidAction
}
return nil
}
@@ -196,7 +191,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
var err error
if err = json.Unmarshal(data, &ss); err == nil {
if len(ss) == 0 {
return policyErrInvalidAction
return errInvalidAction
}
*a = make(Actions)
for _, s := range ss {
@@ -209,7 +204,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
var s string
if err = json.Unmarshal(data, &s); err == nil {
if s == "" {
return policyErrInvalidAction
return errInvalidAction
}
*a = make(Actions)
err = a.Add(s)

View File

@@ -36,7 +36,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
if err = json.Unmarshal(data, &ss); err == nil {
if len(ss) == 0 {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
for _, s := range ss {
@@ -45,7 +45,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
return nil
} else if err = json.Unmarshal(data, &s); err == nil {
if s == "" {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
p.Add(s)
@@ -53,7 +53,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
return nil
} else if err = json.Unmarshal(data, &k); err == nil {
if k.AWS == "" {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
p.Add(k.AWS)
@@ -65,7 +65,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
}
if err = json.Unmarshal(data, &sk); err == nil {
if len(sk.AWS) == 0 {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
for _, s := range sk.AWS {
@@ -97,7 +97,7 @@ func (p Principals) Validate(iam IAMService) error {
if len(p) == 1 {
return nil
}
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
accs, err := CheckIfAccountsExist(p.ToSlice(), iam)
@@ -105,7 +105,7 @@ func (p Principals) Validate(iam IAMService) error {
return err
}
if len(accs) > 0 {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
return nil
@@ -121,10 +121,3 @@ func (p Principals) Contains(userAccess string) bool {
_, found := p[userAccess]
return found
}
// Bucket policy grants public access, if it contains
// a wildcard match to all the users
func (p Principals) IsPublic() bool {
_, ok := p["*"]
return ok
}

View File

@@ -29,7 +29,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
var err error
if err = json.Unmarshal(data, &ss); err == nil {
if len(ss) == 0 {
return policyErrInvalidResource
return errInvalidResource
}
*r = make(Resources)
for _, s := range ss {
@@ -42,7 +42,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
var s string
if err = json.Unmarshal(data, &s); err == nil {
if s == "" {
return policyErrInvalidResource
return errInvalidResource
}
*r = make(Resources)
err = r.Add(s)
@@ -59,7 +59,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
func (r Resources) Add(rc string) error {
ok, pattern := isValidResource(rc)
if !ok {
return policyErrInvalidResource
return errInvalidResource
}
r[pattern] = struct{}{}
@@ -93,7 +93,7 @@ func (r Resources) ContainsBucketPattern() bool {
func (r Resources) Validate(bucket string) error {
for resource := range r {
if !strings.HasPrefix(resource, bucket) {
return policyErrInvalidResource
return errInvalidResource
}
}
@@ -102,45 +102,21 @@ func (r Resources) Validate(bucket string) error {
func (r Resources) FindMatch(resource string) bool {
for res := range r {
if r.Match(res, resource) {
return true
if strings.HasSuffix(res, "*") {
pattern := strings.TrimSuffix(res, "*")
if strings.HasPrefix(resource, pattern) {
return true
}
} else {
if res == resource {
return true
}
}
}
return false
}
// Match checks if the input string matches the given pattern with wildcards (`*`, `?`).
// - `?` matches exactly one occurrence of any character.
// - `*` matches arbitrary many (including zero) occurrences of any character.
func (r Resources) Match(pattern, input string) bool {
pIdx, sIdx := 0, 0
starIdx, matchIdx := -1, 0
for sIdx < len(input) {
if pIdx < len(pattern) && (pattern[pIdx] == '?' || pattern[pIdx] == input[sIdx]) {
sIdx++
pIdx++
} else if pIdx < len(pattern) && pattern[pIdx] == '*' {
starIdx = pIdx
matchIdx = sIdx
pIdx++
} else if starIdx != -1 {
pIdx = starIdx + 1
matchIdx++
sIdx = matchIdx
} else {
return false
}
}
for pIdx < len(pattern) && pattern[pIdx] == '*' {
pIdx++
}
return pIdx == len(pattern)
}
// Checks the resource to have arn prefix and not starting with /
func isValidResource(rc string) (isValid bool, pattern string) {
if !strings.HasPrefix(rc, ResourceArnPrefix) {

View File

@@ -1,182 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"encoding/json"
"testing"
)
func TestUnmarshalJSON(t *testing.T) {
var r Resources
cases := []struct {
input string
expected int
wantErr bool
}{
{`"arn:aws:s3:::my-bucket/*"`, 1, false},
{`["arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket"]`, 2, false},
{`""`, 0, true},
{`[]`, 0, true},
{`["invalid-bucket"]`, 0, true},
}
for _, tc := range cases {
r = Resources{}
err := json.Unmarshal([]byte(tc.input), &r)
if (err != nil) != tc.wantErr {
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
}
if len(r) != tc.expected {
t.Errorf("Expected %d resources, got %d", tc.expected, len(r))
}
}
}
func TestAdd(t *testing.T) {
r := Resources{}
cases := []struct {
input string
wantErr bool
}{
{"arn:aws:s3:::valid-bucket/*", false},
{"arn:aws:s3:::valid-bucket/object", false},
{"invalid-bucket/*", true},
{"/invalid-start", true},
}
for _, tc := range cases {
err := r.Add(tc.input)
if (err != nil) != tc.wantErr {
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
}
}
}
func TestContainsObjectPattern(t *testing.T) {
cases := []struct {
resources []string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket/my-object"}, true},
{[]string{"arn:aws:s3:::my-bucket/*"}, true},
{[]string{"arn:aws:s3:::my-bucket"}, false},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.ContainsObjectPattern() != tc.expected {
t.Errorf("Expected object pattern to be %v for %v", tc.expected, tc.resources)
}
}
}
func TestContainsBucketPattern(t *testing.T) {
cases := []struct {
resources []string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket"}, true},
{[]string{"arn:aws:s3:::my-bucket/*"}, false},
{[]string{"arn:aws:s3:::my-bucket/object"}, false},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.ContainsBucketPattern() != tc.expected {
t.Errorf("Expected bucket pattern to be %v for %v", tc.expected, tc.resources)
}
}
}
func TestValidate(t *testing.T) {
cases := []struct {
resources []string
bucket string
expected bool
}{
{[]string{"arn:aws:s3:::valid-bucket/*"}, "valid-bucket", true},
{[]string{"arn:aws:s3:::wrong-bucket/*"}, "valid-bucket", false},
{[]string{"arn:aws:s3:::valid-bucket/*", "arn:aws:s3:::valid-bucket/object/*"}, "valid-bucket", true},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if (r.Validate(tc.bucket) == nil) != tc.expected {
t.Errorf("Expected validation to be %v for bucket %s", tc.expected, tc.bucket)
}
}
}
func TestFindMatch(t *testing.T) {
cases := []struct {
resources []string
input string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket/*"}, "my-bucket/my-object", true},
{[]string{"arn:aws:s3:::my-bucket/object"}, "other-bucket/my-object", false},
{[]string{"arn:aws:s3:::my-bucket/object"}, "my-bucket/object", true},
{[]string{"arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket/*"}, "other-bucket/something", true},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.FindMatch(tc.input) != tc.expected {
t.Errorf("Expected FindMatch to be %v for input %s", tc.expected, tc.input)
}
}
}
func TestMatch(t *testing.T) {
r := Resources{}
cases := []struct {
pattern string
input string
expected bool
}{
{"my-bucket/*", "my-bucket/object", true},
{"my-bucket/?bject", "my-bucket/object", true},
{"my-bucket/*", "other-bucket/object", false},
{"*", "any-bucket/object", true},
{"my-bucket/*", "my-bucket/subdir/object", true},
{"my-bucket/*", "other-bucket", false},
{"my-bucket/*/*", "my-bucket/hello", false},
{"my-bucket/*/*", "my-bucket/hello/world", true},
{"foo/???/bar", "foo/qux/bar", true},
{"foo/???/bar", "foo/quxx/bar", false},
{"foo/???/bar/*/?", "foo/qux/bar/hello/g", true},
{"foo/???/bar/*/?", "foo/qux/bar/hello/smth", false},
}
for _, tc := range cases {
if r.Match(tc.pattern, tc.input) != tc.expected {
t.Errorf("Match(%s, %s) failed, expected %v", tc.pattern, tc.input, tc.expected)
}
}
}

View File

@@ -18,8 +18,6 @@ import (
"errors"
"fmt"
"time"
"github.com/versity/versitygw/s3err"
)
type Role string
@@ -30,19 +28,6 @@ const (
RoleUserPlus Role = "userplus"
)
func (r Role) IsValid() bool {
switch r {
case RoleAdmin:
return true
case RoleUser:
return true
case RoleUserPlus:
return true
default:
return false
}
}
// Account is a gateway IAM account
type Account struct {
Access string `json:"access"`
@@ -52,26 +37,13 @@ type Account struct {
GroupID int `json:"groupID"`
}
type ListUserAccountsResult struct {
Accounts []Account
}
// Mutable props, which could be changed when updating an IAM account
type MutableProps struct {
Secret *string `json:"secret"`
Role Role `json:"role"`
UserID *int `json:"userID"`
GroupID *int `json:"groupID"`
}
func (m MutableProps) Validate() error {
if m.Role != "" && !m.Role.IsValid() {
return s3err.GetAPIError(s3err.ErrAdminInvalidUserRole)
}
return nil
}
func updateAcc(acc *Account, props MutableProps) {
if props.Secret != nil {
acc.Secret = *props.Secret
@@ -82,9 +54,6 @@ func updateAcc(acc *Account, props MutableProps) {
if props.UserID != nil {
acc.UserID = *props.UserID
}
if props.Role != "" {
acc.Role = props.Role
}
}
// IAMService is the interface for all IAM service implementations
@@ -121,7 +90,6 @@ type Opts struct {
LDAPGroupIdAtr string
VaultEndpointURL string
VaultSecretStoragePath string
VaultAuthMethod string
VaultMountPath string
VaultRootToken string
VaultRoleId string
@@ -139,12 +107,6 @@ type Opts struct {
CacheDisable bool
CacheTTL int
CachePrune int
IpaHost string
IpaVaultName string
IpaUser string
IpaPassword string
IpaInsecure bool
IpaDebug bool
}
func New(o *Opts) (IAMService, error) {
@@ -167,16 +129,13 @@ func New(o *Opts) (IAMService, error) {
o.S3Endpoint, o.S3Bucket)
case o.VaultEndpointURL != "":
svc, err = NewVaultIAMService(o.RootAccount, o.VaultEndpointURL, o.VaultSecretStoragePath,
o.VaultAuthMethod, o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
case o.IpaHost != "":
svc, err = NewIpaIAMService(o.RootAccount, o.IpaHost, o.IpaVaultName, o.IpaUser, o.IpaPassword, o.IpaInsecure, o.IpaDebug)
fmt.Printf("initializing IPA IAM with %q\n", o.IpaHost)
default:
// if no iam options selected, default to the single user mode
fmt.Println("No IAM service configured, enabling single account mode")
return NewIAMServiceSingle(o.RootAccount), nil
return IAMServiceSingle{}, nil
}
if err != nil {

View File

@@ -290,49 +290,93 @@ func (s *IAMServiceInternal) readIAMData() ([]byte, error) {
func (s *IAMServiceInternal) storeIAM(update UpdateAcctFunc) error {
// We are going to be racing with other running gateways without any
// coordination. So the strategy here is to read the current file data,
// update the data, write back out to a temp file, then rename the
// temp file to the original file. This rename will replace the
// original file with the new file. This is atomic and should always
// allow for a consistent view of the data. There is a small
// window where the file could be read and then updated by
// another process. In this case any updates the other process did
// will be lost. This is a limitation of the internal IAM service.
// This should be rare, and even when it does happen should result
// in a valid IAM file, just without the other process's updates.
// coordination. So the strategy here is to read the current file data.
// If the file doesn't exist, then we assume someone else is currently
// updating the file. So we just need to keep retrying. We also need
// to make sure the data is consistent within a single update. So racing
// writes to a file would possibly leave this in some invalid state.
// We can get atomic updates with rename. If we read the data, update
// the data, write to a temp file, then rename the tempfile back to the
// data file. This should always result in a complete data image.
iamFname := filepath.Join(s.dir, iamFile)
backupFname := filepath.Join(s.dir, iamBackupFile)
// There is at least one unsolved failure mode here.
// If a gateway removes the data file and then crashes, all other
// gateways will retry forever thinking that the original will eventually
// write the file.
b, err := os.ReadFile(iamFname)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("read iam file: %w", err)
}
retries := 0
fname := filepath.Join(s.dir, iamFile)
// save copy of data
datacopy := make([]byte, len(b))
copy(datacopy, b)
for {
b, err := os.ReadFile(fname)
if errors.Is(err, fs.ErrNotExist) {
// racing with someone else updating
// keep retrying after backoff
retries++
if retries < maxretry {
time.Sleep(backoff)
continue
}
// make a backup copy in case something happens
err = s.writeUsingTempFile(b, backupFname)
if err != nil {
return fmt.Errorf("write backup iam file: %w", err)
}
// we have been unsuccessful trying to read the iam file
// so this must be the case where something happened and
// the file did not get updated successfully, and probably
// isn't going to be. The recovery procedure would be to
// copy the backup file into place of the original.
return fmt.Errorf("no iam file, needs backup recovery")
}
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("read iam file: %w", err)
}
b, err = update(b)
if err != nil {
return fmt.Errorf("update iam data: %w", err)
}
// reset retries on successful read
retries = 0
err = s.writeUsingTempFile(b, iamFname)
if err != nil {
return fmt.Errorf("write iam file: %w", err)
err = os.Remove(fname)
if errors.Is(err, fs.ErrNotExist) {
// racing with someone else updating
// keep retrying after backoff
time.Sleep(backoff)
continue
}
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove old iam file: %w", err)
}
// save copy of data
datacopy := make([]byte, len(b))
copy(datacopy, b)
// make a backup copy in case we crash before update
// this is after remove, so there is a small window something
// can go wrong, but the remove should barrier other gateways
// from trying to write backup at the same time. Only one
// gateway will successfully remove the file.
os.WriteFile(filepath.Join(s.dir, iamBackupFile), b, iamMode)
b, err = update(b)
if err != nil {
// update failed, try to write old data back out
os.WriteFile(fname, datacopy, iamMode)
return fmt.Errorf("update iam data: %w", err)
}
err = s.writeTempFile(b)
if err != nil {
// update failed, try to write old data back out
os.WriteFile(fname, datacopy, iamMode)
return err
}
break
}
return nil
}
func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
func (s *IAMServiceInternal) writeTempFile(b []byte) error {
fname := filepath.Join(s.dir, iamFile)
f, err := os.CreateTemp(s.dir, iamFile)
if err != nil {
return fmt.Errorf("create temp file: %w", err)
@@ -340,7 +384,6 @@ func (s *IAMServiceInternal) writeUsingTempFile(b []byte, fname string) error {
defer os.Remove(f.Name())
_, err = f.Write(b)
f.Close()
if err != nil {
return fmt.Errorf("write temp file: %w", err)
}

View File

@@ -1,503 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"slices"
"strconv"
"strings"
"syscall"
"time"
)
const IpaVersion = "2.254"
type IpaIAMService struct {
client http.Client
id int
version string
host string
vaultName string
username string
password string
kraTransportKey *rsa.PublicKey
debug bool
rootAcc Account
}
var _ IAMService = &IpaIAMService{}
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure, debug bool) (*IpaIAMService, error) {
ipa := IpaIAMService{
id: 0,
version: IpaVersion,
host: host,
vaultName: vaultName,
username: username,
password: password,
debug: debug,
rootAcc: rootAcc,
}
jar, err := cookiejar.New(nil)
if err != nil {
// this should never happen
return nil, fmt.Errorf("cookie jar creation: %w", err)
}
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
tr := &http.Transport{
TLSClientConfig: mTLSConfig,
Proxy: http.ProxyFromEnvironment,
}
ipa.client = http.Client{Jar: jar, Transport: tr}
err = ipa.login()
if err != nil {
return nil, fmt.Errorf("ipa login failed: %w", err)
}
req, err := ipa.newRequest("vaultconfig_show/1", []string{}, map[string]any{"all": true})
if err != nil {
return nil, fmt.Errorf("ipa vaultconfig_show: %w", err)
}
vaultConfig := struct {
Kra_Server_Server []string
Transport_Cert Base64EncodedWrapped
Wrapping_default_algorithm string
Wrapping_supported_algorithms []string
}{}
err = ipa.rpc(req, &vaultConfig)
if err != nil {
return nil, fmt.Errorf("ipa vault config: %w", err)
}
cert, err := x509.ParseCertificate(vaultConfig.Transport_Cert)
if err != nil {
return nil, fmt.Errorf("ipa cannot parse vault certificate: %w", err)
}
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
isSupported := slices.Contains(vaultConfig.Wrapping_supported_algorithms, "aes-128-cbc")
if !isSupported {
return nil,
fmt.Errorf("IPA vault does not support aes-128-cbc. Only %v supported",
vaultConfig.Wrapping_supported_algorithms)
}
return &ipa, nil
}
func (ipa *IpaIAMService) CreateAccount(account Account) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
if access == ipa.rootAcc.Access {
return ipa.rootAcc, nil
}
req, err := ipa.newRequest("user_show/1", []string{access}, map[string]any{})
if err != nil {
return Account{}, fmt.Errorf("ipa user_show: %w", err)
}
userResult := struct {
Gidnumber []string
Uidnumber []string
}{}
err = ipa.rpc(req, &userResult)
if err != nil {
return Account{}, err
}
uid, err := strconv.Atoi(userResult.Uidnumber[0])
if err != nil {
return Account{}, fmt.Errorf("ipa uid invalid: %w", err)
}
gid, err := strconv.Atoi(userResult.Gidnumber[0])
if err != nil {
return Account{}, fmt.Errorf("ipa gid invalid: %w", err)
}
account := Account{
Access: access,
Role: RoleUser,
UserID: uid,
GroupID: gid,
}
session_key := make([]byte, 16)
_, err = rand.Read(session_key)
if err != nil {
return account, fmt.Errorf("ipa cannot generate session key: %w", err)
}
encryptedKey, err := rsa.EncryptPKCS1v15(rand.Reader, ipa.kraTransportKey, session_key)
if err != nil {
return account, fmt.Errorf("ipa vault secret retrieval: %w", err)
}
req, err = ipa.newRequest("vault_retrieve_internal/1", []string{ipa.vaultName},
map[string]any{"username": access,
"session_key": Base64EncodedWrapped(encryptedKey),
"wrapping_algo": "aes-128-cbc"})
if err != nil {
return Account{}, fmt.Errorf("ipa vault_retrieve_internal: %w", err)
}
data := struct {
Vault_data Base64EncodedWrapped
Nonce Base64EncodedWrapped
}{}
err = ipa.rpc(req, &data)
if err != nil {
return account, err
}
aes, err := aes.NewCipher(session_key)
if err != nil {
return account, fmt.Errorf("ipa cannot create AES cipher: %w", err)
}
cbc := cipher.NewCBCDecrypter(aes, data.Nonce)
cbc.CryptBlocks(data.Vault_data, data.Vault_data)
secretUnpaddedJson, err := pkcs7Unpad(data.Vault_data, 16)
if err != nil {
return account, fmt.Errorf("ipa cannot unpad decrypted result: %w", err)
}
secret := struct {
Data Base64Encoded
}{}
json.Unmarshal(secretUnpaddedJson, &secret)
account.Secret = string(secret.Data)
return account, nil
}
func (ipa *IpaIAMService) UpdateUserAccount(access string, props MutableProps) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) DeleteUserAccount(access string) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) ListUserAccounts() ([]Account, error) {
return []Account{}, fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) Shutdown() error {
return nil
}
// Implementation
const requestRetries = 3
func (ipa *IpaIAMService) login() error {
form := url.Values{}
form.Set("user", ipa.username)
form.Set("password", ipa.password)
req, err := http.NewRequest(
"POST",
fmt.Sprintf("%s/ipa/session/login_password", ipa.host),
strings.NewReader(form.Encode()))
if err != nil {
return err
}
req.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
var resp *http.Response
for i := range requestRetries {
resp, err = ipa.client.Do(req)
if err == nil {
break
}
// Check for transient network errors
if isRetryable(err) {
time.Sleep(time.Second * time.Duration(i+1))
continue
}
return fmt.Errorf("login POST to %s failed: %w", req.URL, err)
}
if err != nil {
return fmt.Errorf("login POST to %s failed after retries: %w",
req.URL, err)
}
defer resp.Body.Close()
if resp.StatusCode == 401 {
return errors.New("cannot login to FreeIPA: invalid credentials")
}
if resp.StatusCode != 200 {
return fmt.Errorf("cannot login to FreeIPA: status code %d",
resp.StatusCode)
}
return nil
}
type rpcRequest = string
type rpcResponse struct {
Result json.RawMessage
Principal string
Id int
Version string
}
func (p rpcResponse) String() string {
return string(p.Result)
}
var errRpc = errors.New("IPA RPC error")
func (ipa *IpaIAMService) rpc(req rpcRequest, value any) error {
err := ipa.login()
if err != nil {
return err
}
res, err := ipa.rpcInternal(req)
if err != nil {
return err
}
return json.Unmarshal(res.Result, value)
}
func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
httpReq, err := http.NewRequest("POST",
fmt.Sprintf("%s/ipa/session/json", ipa.host),
strings.NewReader(req))
if err != nil {
return rpcResponse{}, err
}
ipa.log(fmt.Sprintf("%v", req))
httpReq.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
httpReq.Header.Set("Content-Type", "application/json")
var httpResp *http.Response
for i := range requestRetries {
httpResp, err = ipa.client.Do(httpReq)
if err == nil {
break
}
// Check for transient network errors
if isRetryable(err) {
time.Sleep(time.Second * time.Duration(i+1))
continue
}
return rpcResponse{}, fmt.Errorf("ipa request to %s failed: %w",
httpReq.URL, err)
}
if err != nil {
return rpcResponse{},
fmt.Errorf("ipa request to %s failed after retries: %w",
httpReq.URL, err)
}
defer httpResp.Body.Close()
bytes, err := io.ReadAll(httpResp.Body)
ipa.log(string(bytes))
if err != nil {
return rpcResponse{}, err
}
result := struct {
Result struct {
Json json.RawMessage `json:"result"`
Value string `json:"value"`
Summary any `json:"summary"`
} `json:"result"`
Error json.RawMessage `json:"error"`
Id int `json:"id"`
Principal string `json:"principal"`
Version string `json:"version"`
}{}
err = json.Unmarshal(bytes, &result)
if err != nil {
return rpcResponse{}, err
}
if string(result.Error) != "null" {
return rpcResponse{}, fmt.Errorf("%s: %w", string(result.Error), errRpc)
}
return rpcResponse{
Result: result.Result.Json,
Principal: result.Principal,
Id: result.Id,
Version: result.Version,
}, nil
}
func isRetryable(err error) bool {
if err == nil {
return false
}
if errors.Is(err, io.EOF) {
return true
}
if err, ok := err.(net.Error); ok && err.Timeout() {
return true
}
if opErr, ok := err.(*net.OpError); ok {
if sysErr, ok := opErr.Err.(*syscall.Errno); ok {
if *sysErr == syscall.ECONNRESET {
return true
}
}
}
return false
}
func (ipa *IpaIAMService) newRequest(method string, args []string, dict map[string]any) (rpcRequest, error) {
id := ipa.id
ipa.id++
dict["version"] = ipa.version
jmethod, errMethod := json.Marshal(method)
jargs, errArgs := json.Marshal(args)
jdict, errDict := json.Marshal(dict)
err := errors.Join(errMethod, errArgs, errDict)
if err != nil {
return "", fmt.Errorf("ipa request invalid: %w", err)
}
request := map[string]interface{}{
"id": id,
"method": json.RawMessage(jmethod),
"params": []json.RawMessage{json.RawMessage(jargs), json.RawMessage(jdict)},
}
requestJSON, err := json.Marshal(request)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
return string(requestJSON), nil
}
// pkcs7Unpad validates and unpads data from the given bytes slice.
// The returned value will be 1 to n bytes smaller depending on the
// amount of padding, where n is the block size.
func pkcs7Unpad(b []byte, blocksize int) ([]byte, error) {
if blocksize <= 0 {
return nil, errors.New("invalid blocksize")
}
if len(b) == 0 {
return nil, errors.New("invalid PKCS7 data (empty or not padded)")
}
if len(b)%blocksize != 0 {
return nil, errors.New("invalid padding on input")
}
c := b[len(b)-1]
n := int(c)
if n == 0 || n > len(b) {
return nil, errors.New("invalid padding on input")
}
for i := 0; i < n; i++ {
if b[len(b)-n+i] != c {
return nil, errors.New("invalid padding on input")
}
}
return b[:len(b)-n], nil
}
/*
e.g.
"value" {
"__base64__": "aGVsbG93b3JsZAo="
}
*/
type Base64EncodedWrapped []byte
func (b *Base64EncodedWrapped) UnmarshalJSON(data []byte) error {
intermediate := struct {
Base64 string `json:"__base64__"`
}{}
err := json.Unmarshal(data, &intermediate)
if err != nil {
return err
}
*b, err = base64.StdEncoding.DecodeString(intermediate.Base64)
return err
}
func (b *Base64EncodedWrapped) MarshalJSON() ([]byte, error) {
intermediate := struct {
Base64 string `json:"__base64__"`
}{Base64: base64.StdEncoding.EncodeToString(*b)}
return json.Marshal(intermediate)
}
/*
e.g.
"value": "aGVsbG93b3JsZAo="
*/
type Base64Encoded []byte
func (b *Base64Encoded) UnmarshalJSON(data []byte) error {
var intermediate string
err := json.Unmarshal(data, &intermediate)
if err != nil {
return err
}
*b, err = base64.StdEncoding.DecodeString(intermediate)
return err
}
func (ipa *IpaIAMService) log(msg string) {
if ipa.debug {
log.Println(msg)
}
}

View File

@@ -111,13 +111,11 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
entry := result.Entries[0]
groupId, err := strconv.Atoi(entry.GetAttributeValue(ld.groupIdAtr))
if err != nil {
return Account{}, fmt.Errorf("invalid entry value for group-id %q: %w",
entry.GetAttributeValue(ld.groupIdAtr), err)
return Account{}, fmt.Errorf("invalid entry value for group-id: %v", entry.GetAttributeValue(ld.groupIdAtr))
}
userId, err := strconv.Atoi(entry.GetAttributeValue(ld.userIdAtr))
if err != nil {
return Account{}, fmt.Errorf("invalid entry value for user-id %q: %w",
entry.GetAttributeValue(ld.userIdAtr), err)
return Account{}, fmt.Errorf("invalid entry value for group-id: %v", entry.GetAttributeValue(ld.userIdAtr))
}
return Account{
Access: entry.GetAttributeValue(ld.accessAtr),
@@ -139,9 +137,6 @@ func (ld *LdapIAMService) UpdateUserAccount(access string, props MutableProps) e
if props.UserID != nil {
req.Replace(ld.userIdAtr, []string{fmt.Sprint(*props.UserID)})
}
if props.Role != "" {
req.Replace(ld.roleAtr, []string{string(props.Role)})
}
err := ld.conn.Modify(req)
//TODO: Handle non existing user case
@@ -188,13 +183,11 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
for _, el := range resp.Entries {
groupId, err := strconv.Atoi(el.GetAttributeValue(ld.groupIdAtr))
if err != nil {
return nil, fmt.Errorf("invalid entry value for group-id %q: %w",
el.GetAttributeValue(ld.groupIdAtr), err)
return nil, fmt.Errorf("invalid entry value for group-id: %v", el.GetAttributeValue(ld.groupIdAtr))
}
userId, err := strconv.Atoi(el.GetAttributeValue(ld.userIdAtr))
if err != nil {
return nil, fmt.Errorf("invalid entry value for user-id %q: %w",
el.GetAttributeValue(ld.userIdAtr), err)
return nil, fmt.Errorf("invalid entry value for group-id: %v", el.GetAttributeValue(ld.userIdAtr))
}
result = append(result, Account{
Access: el.GetAttributeValue(ld.accessAtr),

View File

@@ -15,49 +15,39 @@
package auth
import (
"github.com/versity/versitygw/s3err"
"errors"
)
// IAMServiceSingle manages the single tenant (root-only) IAM service
type IAMServiceSingle struct {
root Account
}
type IAMServiceSingle struct{}
var _ IAMService = &IAMServiceSingle{}
func NewIAMServiceSingle(r Account) IAMService {
return &IAMServiceSingle{
root: r,
}
}
var ErrNotSupported = errors.New("method is not supported")
// CreateAccount not valid in single tenant mode
func (IAMServiceSingle) CreateAccount(account Account) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// GetUserAccount returns root account, if the root access key
// is provided and "ErrAdminUserNotFound" otherwise
func (s IAMServiceSingle) GetUserAccount(access string) (Account, error) {
if access == s.root.Access {
return s.root, nil
}
return Account{}, s3err.GetAPIError(s3err.ErrAdminUserNotFound)
// GetUserAccount no accounts in single tenant mode
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
return Account{}, ErrNoSuchUser
}
// UpdateUserAccount no accounts in single tenant mode
func (IAMServiceSingle) UpdateUserAccount(access string, props MutableProps) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// DeleteUserAccount no accounts in single tenant mode
func (IAMServiceSingle) DeleteUserAccount(access string) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// ListUserAccounts no accounts in single tenant mode
func (IAMServiceSingle) ListUserAccounts() ([]Account, error) {
return []Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return []Account{}, nil
}
// Shutdown graceful termination of service

View File

@@ -19,7 +19,6 @@ import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
@@ -27,25 +26,20 @@ import (
"github.com/hashicorp/vault-client-go/schema"
)
const requestTimeout = 10 * time.Second
type VaultIAMService struct {
client *vault.Client
authReqOpts []vault.RequestOption
kvReqOpts []vault.RequestOption
reqOpts []vault.RequestOption
secretStoragePath string
rootAcc Account
creds schema.AppRoleLoginRequest
}
var _ IAMService = &VaultIAMService{}
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
authMethod, mountPath, rootToken, roleID, roleSecret, serverCert,
clientCert, clientCertKey string) (IAMService, error) {
func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath, rootToken, roleID, roleSecret, serverCert, clientCert, clientCertKey string) (IAMService, error) {
opts := []vault.ClientOption{
vault.WithAddress(endpoint),
vault.WithRequestTimeout(requestTimeout),
// set request timeout to 10 secs
vault.WithRequestTimeout(10 * time.Second),
}
if serverCert != "" {
tls := vault.TLSConfiguration{}
@@ -53,7 +47,7 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
tls.ServerCertificate.FromBytes = []byte(serverCert)
if clientCert != "" {
if clientCertKey == "" {
return nil, fmt.Errorf("client certificate and client certificate key should both be specified")
return nil, fmt.Errorf("client certificate and client certificate should both be specified")
}
tls.ClientCertificate.FromBytes = []byte(clientCert)
@@ -68,21 +62,10 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
return nil, fmt.Errorf("init vault client: %w", err)
}
authReqOpts := []vault.RequestOption{}
// if auth method path is not specified, it defaults to "approle"
if authMethod != "" {
authReqOpts = append(authReqOpts, vault.WithMountPath(authMethod))
}
kvReqOpts := []vault.RequestOption{}
// if mount path is not specified, it defaults to "kv-v2"
reqOpts := []vault.RequestOption{}
// if mount path is not specified, it defaults to "approle"
if mountPath != "" {
kvReqOpts = append(kvReqOpts, vault.WithMountPath(mountPath))
}
creds := schema.AppRoleLoginRequest{
RoleId: roleID,
SecretId: roleSecret,
reqOpts = append(reqOpts, vault.WithMountPath(mountPath))
}
// Authentication
@@ -97,8 +80,12 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
return nil, fmt.Errorf("role id and role secret must both be specified")
}
resp, err := client.Auth.AppRoleLogin(context.Background(),
creds, authReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := client.Auth.AppRoleLogin(ctx, schema.AppRoleLoginRequest{
RoleId: roleID,
SecretId: roleSecret,
}, reqOpts...)
cancel()
if err != nil {
return nil, fmt.Errorf("approle authentication failure: %w", err)
}
@@ -112,77 +99,33 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath,
return &VaultIAMService{
client: client,
authReqOpts: authReqOpts,
kvReqOpts: kvReqOpts,
reqOpts: reqOpts,
secretStoragePath: secretStoragePath,
rootAcc: rootAcc,
creds: creds,
}, nil
}
func (vt *VaultIAMService) reAuthIfNeeded(err error) error {
if err == nil {
return nil
}
// Vault returns 403 for expired/revoked tokens
// pass all other errors back unchanged
if !vault.IsErrorStatus(err, http.StatusForbidden) {
return err
}
resp, authErr := vt.client.Auth.AppRoleLogin(context.Background(),
vt.creds, vt.authReqOpts...)
if authErr != nil {
return fmt.Errorf("vault re-authentication failure: %w", authErr)
}
if err := vt.client.SetToken(resp.Auth.ClientToken); err != nil {
return fmt.Errorf("vault re-authentication set token failure: %w", err)
}
return nil
}
func (vt *VaultIAMService) CreateAccount(account Account) error {
if vt.rootAcc.Access == account.Access {
return ErrUserExists
}
_, err := vt.client.Secrets.KvV2Write(context.Background(),
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
Data: map[string]any{
account.Access: account,
},
Options: map[string]any{
"cas": 0,
},
}, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err := vt.client.Secrets.KvV2Write(ctx, vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
Data: map[string]any{
account.Access: account,
},
Options: map[string]interface{}{
"cas": 0,
},
}, vt.reqOpts...)
cancel()
if err != nil {
if strings.Contains(err.Error(), "check-and-set") {
return ErrUserExists
}
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
return reauthErr
}
// retry once after re-auth
_, err = vt.client.Secrets.KvV2Write(context.Background(),
vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
Data: map[string]any{
account.Access: account,
},
Options: map[string]any{
"cas": 0,
},
}, vt.kvReqOpts...)
if err != nil {
if strings.Contains(err.Error(), "check-and-set") {
return ErrUserExists
}
return err
}
return nil
return err
}
return nil
}
@@ -190,84 +133,66 @@ func (vt *VaultIAMService) GetUserAccount(access string) (Account, error) {
if vt.rootAcc.Access == access {
return vt.rootAcc, nil
}
resp, err := vt.client.Secrets.KvV2Read(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := vt.client.Secrets.KvV2Read(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
cancel()
if err != nil {
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
return Account{}, reauthErr
}
// retry once after re-auth
resp, err = vt.client.Secrets.KvV2Read(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
if err != nil {
return Account{}, err
}
return Account{}, err
}
acc, err := parseVaultUserAccount(resp.Data.Data, access)
if err != nil {
return Account{}, err
}
return acc, nil
}
func (vt *VaultIAMService) UpdateUserAccount(access string, props MutableProps) error {
//TODO: We need something like a transaction here ?
acc, err := vt.GetUserAccount(access)
if err != nil {
return err
}
updateAcc(&acc, props)
err = vt.DeleteUserAccount(access)
if err != nil {
return err
}
err = vt.CreateAccount(acc)
if err != nil {
return err
}
return nil
}
func (vt *VaultIAMService) DeleteUserAccount(access string) error {
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
cancel()
if err != nil {
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
return reauthErr
}
// retry once after re-auth
_, err = vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(context.Background(),
vt.secretStoragePath+"/"+access, vt.kvReqOpts...)
if err != nil {
return err
}
return err
}
return nil
}
func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
resp, err := vt.client.Secrets.KvV2List(context.Background(),
vt.secretStoragePath, vt.kvReqOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := vt.client.Secrets.KvV2List(ctx, vt.secretStoragePath, vt.reqOpts...)
cancel()
if err != nil {
reauthErr := vt.reAuthIfNeeded(err)
if reauthErr != nil {
if vault.IsErrorStatus(err, http.StatusNotFound) {
return []Account{}, nil
}
return nil, reauthErr
}
// retry once after re-auth
resp, err = vt.client.Secrets.KvV2List(context.Background(),
vt.secretStoragePath, vt.kvReqOpts...)
if err != nil {
if vault.IsErrorStatus(err, http.StatusNotFound) {
return []Account{}, nil
}
return nil, err
if vault.IsErrorStatus(err, 404) {
return []Account{}, nil
}
return nil, err
}
accs := []Account{}
for _, acss := range resp.Data.Keys {
acc, err := vt.GetUserAccount(acss)
if err != nil {
@@ -275,6 +200,7 @@ func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
}
accs = append(accs, acc)
}
return accs, nil
}
@@ -285,8 +211,8 @@ func (vt *VaultIAMService) Shutdown() error {
var errInvalidUser error = errors.New("invalid user account entry in secrets engine")
func parseVaultUserAccount(data map[string]any, access string) (acc Account, err error) {
usrAcc, ok := data[access].(map[string]any)
func parseVaultUserAccount(data map[string]interface{}, access string) (acc Account, err error) {
usrAcc, ok := data[access].(map[string]interface{})
if !ok {
return acc, errInvalidUser
}

View File

@@ -25,7 +25,6 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
type BucketLockConfig struct {
@@ -93,12 +92,12 @@ func ParseBucketLockConfigurationOutput(input []byte) (*types.ObjectLockConfigur
}
func ParseObjectLockRetentionInput(input []byte) ([]byte, error) {
var retention s3response.PutObjectRetentionInput
var retention types.ObjectLockRetention
if err := xml.Unmarshal(input, &retention); err != nil {
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if retention.RetainUntilDate.Before(time.Now()) {
if retention.RetainUntilDate == nil || retention.RetainUntilDate.Before(time.Now()) {
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
}
switch retention.Mode {
@@ -120,23 +119,23 @@ func ParseObjectLockRetentionOutput(input []byte) (*types.ObjectLockRetention, e
return &retention, nil
}
func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResult {
func ParseObjectLegalHoldOutput(status *bool) *types.ObjectLockLegalHold {
if status == nil {
return nil
}
if *status {
return &s3response.GetObjectLegalHoldResult{
return &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOn,
}
}
return &s3response.GetObjectLegalHoldResult{
return &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOff,
}
}
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []types.ObjectIdentifier, bypass, isBucketPublic bool, be backend.Backend) error {
func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects []string, bypass bool, be backend.Backend) error {
data, err := be.GetObjectLockConfiguration(ctx, bucket)
if err != nil {
if errors.Is(err, s3err.GetAPIError(s3err.ErrObjectLockConfigurationNotFound)) {
@@ -172,15 +171,8 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
}
for _, obj := range objects {
var key, versionId string
if obj.Key != nil {
key = *obj.Key
}
if obj.VersionId != nil {
versionId = *obj.VersionId
}
checkRetention := true
retentionData, err := be.GetObjectRetention(ctx, bucket, key, versionId)
retentionData, err := be.GetObjectRetention(ctx, bucket, obj, "")
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
continue
}
@@ -211,11 +203,7 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
if err != nil {
return err
}
if isBucketPublic {
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
} else {
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
}
err = VerifyBucketPolicy(policy, userAccess, bucket, obj, BypassGovernanceRetentionAction)
if err != nil {
return s3err.GetAPIError(s3err.ErrObjectLocked)
}
@@ -229,11 +217,8 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
checkLegalHold := true
status, err := be.GetObjectLegalHold(ctx, bucket, key, versionId)
status, err := be.GetObjectLegalHold(ctx, bucket, obj, "")
if err != nil {
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) {
continue
}
if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchObjectLockConfiguration)) {
checkLegalHold = false
} else {
@@ -258,11 +243,7 @@ func CheckObjectAccess(ctx context.Context, bucket, userAccess string, objects [
if err != nil {
return err
}
if isBucketPublic {
err = VerifyPublicBucketPolicy(policy, bucket, key, BypassGovernanceRetentionAction)
} else {
err = VerifyBucketPolicy(policy, userAccess, bucket, key, BypassGovernanceRetentionAction)
}
err = VerifyBucketPolicy(policy, userAccess, bucket, obj, BypassGovernanceRetentionAction)
if err != nil {
return s3err.GetAPIError(s3err.ErrObjectLocked)
}

File diff suppressed because it is too large Load Diff

View File

@@ -40,7 +40,7 @@ func azErrToS3err(azErr *azcore.ResponseError) s3err.APIError {
case "BlobNotFound":
return s3err.GetAPIError(s3err.ErrNoSuchKey)
case "TagsTooLarge":
return s3err.GetAPIError(s3err.ErrInvalidTagValue)
return s3err.GetAPIError(s3err.ErrInvalidTag)
case "Requested Range Not Satisfiable":
return s3err.GetAPIError(s3err.ErrInvalidRange)
}

View File

@@ -32,40 +32,37 @@ type Backend interface {
Shutdown()
// bucket operations
ListBuckets(context.Context, s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error)
ListBuckets(_ context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error)
CreateBucket(_ context.Context, _ *s3.CreateBucketInput, defaultACL []byte) error
PutBucketAcl(_ context.Context, bucket string, data []byte) error
DeleteBucket(_ context.Context, bucket string) error
DeleteBucket(context.Context, *s3.DeleteBucketInput) error
PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error
GetBucketVersioning(_ context.Context, bucket string) (s3response.GetBucketVersioningOutput, error)
GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error)
PutBucketPolicy(_ context.Context, bucket string, policy []byte) error
GetBucketPolicy(_ context.Context, bucket string) ([]byte, error)
DeleteBucketPolicy(_ context.Context, bucket string) error
PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error
GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error)
DeleteBucketOwnershipControls(_ context.Context, bucket string) error
PutBucketCors(context.Context, []byte) error
GetBucketCors(_ context.Context, bucket string) ([]byte, error)
DeleteBucketCors(_ context.Context, bucket string) error
// multipart operations
CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (_ s3response.CompleteMultipartUploadResult, versionid string, _ error)
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error)
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
// standard object operations
PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error)
PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error)
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error)
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
@@ -96,7 +93,7 @@ type Backend interface {
GetObjectLegalHold(_ context.Context, bucket, object, versionId string) (*bool, error)
// non AWS actions
ChangeBucketOwner(_ context.Context, bucket, owner string) error
ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
}
@@ -111,7 +108,7 @@ func (BackendUnsupported) Shutdown() {}
func (BackendUnsupported) String() string {
return "Unsupported"
}
func (BackendUnsupported) ListBuckets(context.Context, s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
func (BackendUnsupported) ListBuckets(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
@@ -126,14 +123,14 @@ func (BackendUnsupported) CreateBucket(context.Context, *s3.CreateBucketInput, [
func (BackendUnsupported) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteBucket(_ context.Context, bucket string) error {
func (BackendUnsupported) DeleteBucket(context.Context, *s3.DeleteBucketInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketVersioning(_ context.Context, bucket string, status types.BucketVersioningStatus) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetBucketVersioning(_ context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
return s3response.GetBucketVersioningOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) GetBucketVersioning(_ context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketPolicy(_ context.Context, bucket string, policy []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
@@ -153,21 +150,12 @@ func (BackendUnsupported) GetBucketOwnershipControls(_ context.Context, bucket s
func (BackendUnsupported) DeleteBucketOwnershipControls(_ context.Context, bucket string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketCors(context.Context, []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetBucketCors(_ context.Context, bucket string) ([]byte, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteBucketCors(_ context.Context, bucket string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
func (BackendUnsupported) CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
return s3response.CompleteMultipartUploadResult{}, "", s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
@@ -178,14 +166,14 @@ func (BackendUnsupported) ListMultipartUploads(context.Context, *s3.ListMultipar
func (BackendUnsupported) ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error) {
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error) {
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
@@ -197,11 +185,11 @@ func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput) (*s3.Ge
func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
return s3response.GetObjectAttributesResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
return s3response.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
@@ -280,7 +268,7 @@ func (BackendUnsupported) GetObjectLegalHold(_ context.Context, bucket, object,
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, owner string) error {
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket string, acl []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error) {

View File

@@ -17,17 +17,12 @@ package backend
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"io/fs"
"net/url"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
@@ -39,9 +34,6 @@ const (
// this is the media type for directories in AWS and Nextcloud
DirContentType = "application/x-directory"
DefaultContentType = "binary/octet-stream"
// this is the minimum allowed size for mp parts
MinPartSize = 5 * 1024 * 1024
)
func IsValidBucketName(name string) bool { return true }
@@ -58,148 +50,53 @@ func (d ByObjectName) Len() int { return len(d) }
func (d ByObjectName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByObjectName) Less(i, j int) bool { return *d[i].Key < *d[j].Key }
func GetPtrFromString(str string) *string {
if str == "" {
return nil
}
return &str
}
func GetStringFromPtr(str *string) string {
if str == nil {
return ""
}
return *str
func GetStringPtr(s string) *string {
return &s
}
func GetTimePtr(t time.Time) *time.Time {
return &t
}
func TrimEtag(etag *string) *string {
if etag == nil {
return nil
}
return GetPtrFromString(strings.Trim(*etag, "\""))
}
var (
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
)
// ParseObjectRange parses input range header and returns startoffset, length, isValid
// and error. If no endoffset specified, then length is set to the object size
// for invalid inputs, it returns no error, but isValid=false
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
func ParseObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
if acceptRange == "" {
return 0, size, false, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) != 2 {
return 0, size, false, nil
}
if rangeKv[0] != "bytes" {
return 0, size, false, nil
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) != 2 {
return 0, size, false, nil
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil && bRange[0] != "" {
return 0, size, false, nil
}
if bRange[1] == "" {
if bRange[0] == "" {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
return startOffset, size - startOffset, true, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, size, false, nil
}
if startOffset > endOffset {
return 0, size, false, nil
}
// for ranges like 'bytes=-100' return the last bytes specified with 'endOffset'
if bRange[0] == "" {
endOffset = min(endOffset, size)
return size - endOffset, endOffset, true, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if endOffset >= size {
endOffset = size - 1
}
return startOffset, endOffset - startOffset + 1, true, nil
}
// ParseCopySourceRange parses input range header and returns startoffset, length
// and error. If no endoffset specified, then length is set to the object size
func ParseCopySourceRange(size int64, acceptRange string) (int64, int64, error) {
// ParseRange parses input range header and returns startoffset, length, and
// error. If no endoffset specified, then length is set to -1.
func ParseRange(size int64, acceptRange string) (int64, int64, error) {
if acceptRange == "" {
return 0, size, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) != 2 {
return 0, 0, errInvalidCopySourceRange
}
if rangeKv[0] != "bytes" {
return 0, 0, errInvalidCopySourceRange
if len(rangeKv) < 2 {
return 0, 0, errInvalidRange
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) != 2 {
return 0, 0, errInvalidCopySourceRange
if len(bRange) < 1 || len(bRange) > 2 {
return 0, 0, errInvalidRange
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, 0, errInvalidCopySourceRange
return 0, 0, errInvalidRange
}
if startOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
endOffset := int64(-1)
if len(bRange) == 1 || bRange[1] == "" {
return startOffset, endOffset, nil
}
if bRange[1] == "" {
return startOffset, size - startOffset + 1, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, 0, errInvalidCopySourceRange
return 0, 0, errInvalidRange
}
if endOffset < startOffset {
return 0, 0, errInvalidCopySourceRange
}
if endOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
if endOffset <= startOffset {
return 0, 0, errInvalidRange
}
return startOffset, endOffset - startOffset + 1, nil
@@ -212,13 +109,15 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
copySourceHeader = copySourceHeader[1:]
}
var copySource, versionId string
i := strings.LastIndex(copySourceHeader, "?versionId=")
if i == -1 {
copySource = copySourceHeader
} else {
copySource = copySourceHeader[:i]
versionId = copySourceHeader[i+11:]
cSplitted := strings.Split(copySourceHeader, "?")
copySource := cSplitted[0]
var versionId string
if len(cSplitted) > 1 {
versionIdParts := strings.Split(cSplitted[1], "=")
if len(versionIdParts) != 2 || versionIdParts[0] != "versionId" {
return "", "", "", s3err.GetAPIError(s3err.ErrInvalidRequest)
}
versionId = versionIdParts[1]
}
srcBucket, srcObject, ok := strings.Cut(copySource, "/")
@@ -229,82 +128,12 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
return srcBucket, srcObject, versionId, nil
}
// ParseObjectTags parses the url encoded input string into
// map[string]string with unescaped key/value pair
func ParseObjectTags(tagging string) (map[string]string, error) {
if tagging == "" {
return nil, nil
func CreateExceedingRangeErr(objSize int64) s3err.APIError {
return s3err.APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
HTTPStatusCode: http.StatusBadRequest,
}
tagSet := make(map[string]string)
for tagging != "" {
var tag string
tag, tagging, _ = strings.Cut(tagging, "&")
// if 'tag' before the first appearance of '&' is empty continue
if tag == "" {
continue
}
key, value, found := strings.Cut(tag, "=")
// if key is empty, but "=" is present, return invalid url ecnoding err
if found && key == "" {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
// return invalid tag key, if the key is longer than 128
if len(key) > 128 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// return invalid tag value, if tag value is longer than 256
if len(value) > 256 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// query unescape tag key
key, err := url.QueryUnescape(key)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
// query unescape tag value
value, err = url.QueryUnescape(value)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
// check tag key to be valid
if !isValidTagComponent(key) {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// check tag value to be valid
if !isValidTagComponent(value) {
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// duplicate keys are not allowed: return invalid url encoding err
_, ok := tagSet[key]
if ok {
return nil, s3err.GetAPIError(s3err.ErrInvalidURLEncodedTagging)
}
tagSet[key] = value
}
return tagSet, nil
}
var validTagComponent = regexp.MustCompile(`^[a-zA-Z0-9:/_.\-+ ]+$`)
// isValidTagComponent matches strings which contain letters, decimal digits,
// and special chars: '/', '_', '-', '+', '.', ' ' (space)
func isValidTagComponent(str string) bool {
if str == "" {
return true
}
return validTagComponent.Match([]byte(str))
}
func GetMultipartMD5(parts []types.CompletedPart) string {
@@ -312,8 +141,8 @@ func GetMultipartMD5(parts []types.CompletedPart) string {
for _, part := range parts {
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)
}
return fmt.Sprintf("\"%s-%d\"", md5String(partsEtagBytes), len(parts))
s3MD5 := fmt.Sprintf("%s-%d", md5String(partsEtagBytes), len(parts))
return s3MD5
}
func getEtagBytes(etag string) []byte {
@@ -341,65 +170,3 @@ func (f *FileSectionReadCloser) Read(p []byte) (int, error) {
func (f *FileSectionReadCloser) Close() error {
return f.F.Close()
}
// MoveFile moves a file from source to destination.
func MoveFile(source, destination string, perm os.FileMode) error {
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
err := os.Rename(source, destination)
if err == nil || !errors.Is(err, syscall.EXDEV) {
return err
}
// Rename can fail if the source and destination are not on the same
// filesystem. The fallback is to copy the file and then remove the source.
// We need to be careful that the desination does not exist before copying
// to prevent any other simultaneous writes to the file.
sourceFile, err := os.Open(source)
if err != nil {
return fmt.Errorf("open source: %w", err)
}
defer sourceFile.Close()
var destFile *os.File
for {
destFile, err = os.OpenFile(destination, os.O_CREATE|os.O_EXCL|os.O_WRONLY, perm)
if err != nil {
if errors.Is(err, fs.ErrExist) {
if removeErr := os.Remove(destination); removeErr != nil {
return fmt.Errorf("remove existing destination: %w", removeErr)
}
continue
}
return fmt.Errorf("create destination: %w", err)
}
break
}
defer destFile.Close()
_, err = io.Copy(destFile, sourceFile)
if err != nil {
return fmt.Errorf("copy data: %w", err)
}
err = os.Remove(source)
if err != nil {
return fmt.Errorf("remove source: %w", err)
}
return nil
}
// GenerateEtag generates a new quoted etag from the provided hash.Hash
func GenerateEtag(h hash.Hash) string {
dataSum := h.Sum(nil)
return fmt.Sprintf("\"%s\"", hex.EncodeToString(dataSum[:]))
}
// AreEtagsSame compares 2 etags by ignoring quotes
func AreEtagsSame(e1, e2 string) bool {
return strings.Trim(e1, `"`) == strings.Trim(e2, `"`)
}

View File

@@ -14,19 +14,17 @@
package meta
import "os"
// MetadataStorer defines the interface for managing metadata.
// When object == "", the operation is on the bucket.
type MetadataStorer interface {
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
// Returns the value of the attribute, or an error if the attribute does not exist.
RetrieveAttribute(f *os.File, bucket, object, attribute string) ([]byte, error)
RetrieveAttribute(bucket, object, attribute string) ([]byte, error)
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
// If attribute already exists, new attribute should replace existing.
// Returns an error if the operation fails.
StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error
StoreAttribute(bucket, object, attribute string, value []byte) error
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
// Returns an error if the operation fails.

View File

@@ -1,54 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package meta
import (
"os"
)
// NoMeta is a metadata storer that does not store metadata.
// This can be useful for read only mounts where attempting to store metadata
// would fail.
type NoMeta struct{}
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
// always returns ErrNoSuchKey
func (NoMeta) RetrieveAttribute(_ *os.File, _, _, _ string) ([]byte, error) {
return nil, ErrNoSuchKey
}
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
// always returns nil without storing the attribute
func (NoMeta) StoreAttribute(_ *os.File, _, _, _ string, _ []byte) error {
return nil
}
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
// always returns nil without deleting the attribute
func (NoMeta) DeleteAttribute(_, _, _ string) error {
return nil
}
// ListAttributes lists all attributes for an object or a bucket.
// always returns an empty list of attributes
func (NoMeta) ListAttributes(_, _ string) ([]string, error) {
return []string{}, nil
}
// DeleteAttributes removes all attributes for an object or a bucket.
// always returns nil without deleting any attributes
func (NoMeta) DeleteAttributes(bucket, object string) error {
return nil
}

View File

@@ -1,139 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package meta
import (
"errors"
"fmt"
"os"
"path/filepath"
)
// SideCar is a metadata storer that uses sidecar files to store metadata.
type SideCar struct {
dir string
}
const (
sidecarmeta = "meta"
)
// NewSideCar creates a new SideCar metadata storer.
func NewSideCar(dir string) (SideCar, error) {
fi, err := os.Lstat(dir)
if err != nil {
return SideCar{}, fmt.Errorf("failed to stat directory: %v", err)
}
if !fi.IsDir() {
return SideCar{}, fmt.Errorf("not a directory")
}
return SideCar{dir: dir}, nil
}
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
func (s SideCar) RetrieveAttribute(_ *os.File, bucket, object, attribute string) ([]byte, error) {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
attr := filepath.Join(metadir, attribute)
value, err := os.ReadFile(attr)
if errors.Is(err, os.ErrNotExist) {
return nil, ErrNoSuchKey
}
if err != nil {
return nil, fmt.Errorf("failed to read attribute: %v", err)
}
return value, nil
}
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
func (s SideCar) StoreAttribute(_ *os.File, bucket, object, attribute string, value []byte) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
err := os.MkdirAll(metadir, 0777)
if err != nil {
return fmt.Errorf("failed to create metadata directory: %v", err)
}
attr := filepath.Join(metadir, attribute)
err = os.WriteFile(attr, value, 0666)
if err != nil {
return fmt.Errorf("failed to write attribute: %v", err)
}
return nil
}
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
func (s SideCar) DeleteAttribute(bucket, object, attribute string) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
attr := filepath.Join(metadir, attribute)
err := os.Remove(attr)
if errors.Is(err, os.ErrNotExist) {
return ErrNoSuchKey
}
if err != nil {
return fmt.Errorf("failed to remove attribute: %v", err)
}
return nil
}
// ListAttributes lists all attributes for an object or a bucket.
func (s SideCar) ListAttributes(bucket, object string) ([]string, error) {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
ents, err := os.ReadDir(metadir)
if errors.Is(err, os.ErrNotExist) {
return []string{}, nil
}
if err != nil {
return nil, fmt.Errorf("failed to list attributes: %v", err)
}
var attrs []string
for _, ent := range ents {
attrs = append(attrs, ent.Name())
}
return attrs, nil
}
// DeleteAttributes removes all attributes for an object or a bucket.
func (s SideCar) DeleteAttributes(bucket, object string) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
err := os.RemoveAll(metadir)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("failed to remove attributes: %v", err)
}
return nil
}

View File

@@ -17,13 +17,11 @@ package meta
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/pkg/xattr"
"github.com/versity/versitygw/s3err"
)
const (
@@ -38,15 +36,7 @@ var (
type XattrMeta struct{}
// RetrieveAttribute retrieves the value of a specific attribute for an object in a bucket.
func (x XattrMeta) RetrieveAttribute(f *os.File, bucket, object, attribute string) ([]byte, error) {
if f != nil {
b, err := xattr.FGet(f, xattrPrefix+attribute)
if errors.Is(err, xattr.ENOATTR) {
return nil, ErrNoSuchKey
}
return b, err
}
func (x XattrMeta) RetrieveAttribute(bucket, object, attribute string) ([]byte, error) {
b, err := xattr.Get(filepath.Join(bucket, object), xattrPrefix+attribute)
if errors.Is(err, xattr.ENOATTR) {
return nil, ErrNoSuchKey
@@ -55,20 +45,8 @@ func (x XattrMeta) RetrieveAttribute(f *os.File, bucket, object, attribute strin
}
// StoreAttribute stores the value of a specific attribute for an object in a bucket.
func (x XattrMeta) StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error {
if f != nil {
err := xattr.FSet(f, xattrPrefix+attribute, value)
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
}
err := xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
func (x XattrMeta) StoreAttribute(bucket, object, attribute string, value []byte) error {
return xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
}
// DeleteAttribute removes the value of a specific attribute for an object in a bucket.
@@ -77,9 +55,6 @@ func (x XattrMeta) DeleteAttribute(bucket, object, attribute string) error {
if errors.Is(err, xattr.ENOATTR) {
return ErrNoSuchKey
}
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
}

View File

@@ -15,6 +15,11 @@ import (
"github.com/versity/versitygw/s3err"
)
var (
// TODO: make this configurable
defaultDirPerm fs.FileMode = 0755
)
// MkdirAll is similar to os.MkdirAll but it will return
// ErrObjectParentIsFile when appropriate
// MkdirAll creates a directory named path,
@@ -27,7 +32,7 @@ import (
// and returns nil.
// Any directory created will be set to provided uid/gid ownership
// if doChown is true.
func MkdirAll(path string, uid, gid int, doChown bool, dirPerm fs.FileMode) error {
func MkdirAll(path string, uid, gid int, doChown bool) error {
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := os.Stat(path)
if err == nil {
@@ -50,14 +55,14 @@ func MkdirAll(path string, uid, gid int, doChown bool, dirPerm fs.FileMode) erro
if j > 1 {
// Create parent.
err = MkdirAll(path[:j-1], uid, gid, doChown, dirPerm)
err = MkdirAll(path[:j-1], uid, gid, doChown)
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
err = os.Mkdir(path, dirPerm)
err = os.Mkdir(path, defaultDirPerm)
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.

File diff suppressed because it is too large Load Diff

View File

@@ -29,7 +29,6 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"golang.org/x/sys/unix"
)
@@ -44,7 +43,6 @@ type tmpfile struct {
needsChown bool
uid int
gid int
newDirPerm fs.FileMode
}
var (
@@ -52,13 +50,9 @@ var (
defaultFilePerm uint32 = 0644
)
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool, forceNoTmpFile bool) (*tmpfile, error) {
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, dofalloc bool) (*tmpfile, error) {
uid, gid, doChown := p.getChownIDs(acct)
if forceNoTmpFile {
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
}
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
// This can help reduce contention within the namespace (parent directories),
// etc. And will auto cleanup the inode on close if we never link this
@@ -67,12 +61,38 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
// this is not supported.
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
// O_TMPFILE not supported, try fallback
err = backend.MkdirAll(dir, uid, gid, doChown)
if err != nil {
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
return nil, err
}
tmp := &tmpfile{
f: f,
bucket: bucket,
objname: obj,
size: size,
needsChown: doChown,
uid: uid,
gid: gid,
}
// falloc is best effort, its fine if this fails
if size > 0 && dofalloc {
tmp.falloc()
}
// O_TMPFILE not supported, try fallback
return p.openMkTemp(dir, bucket, obj, size, dofalloc, uid, gid, doChown)
if doChown {
err := f.Chown(uid, gid)
if err != nil {
return nil, fmt.Errorf("set temp file ownership: %w", err)
}
}
return tmp, nil
}
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
@@ -88,7 +108,6 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
needsChown: doChown,
uid: uid,
gid: gid,
newDirPerm: p.newDirPerm,
}
// falloc is best effort, its fine if this fails
@@ -106,46 +125,6 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
return tmp, nil
}
func (p *Posix) openMkTemp(dir, bucket, obj string, size int64, dofalloc bool, uid, gid int, doChown bool) (*tmpfile, error) {
err := backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, err
}
tmp := &tmpfile{
f: f,
bucket: bucket,
objname: obj,
size: size,
needsChown: doChown,
uid: uid,
gid: gid,
}
// falloc is best effort, its fine if this fails
if size > 0 && dofalloc {
tmp.falloc()
}
if doChown {
err := f.Chown(uid, gid)
if err != nil {
return nil, fmt.Errorf("set temp file ownership: %w", err)
}
}
return tmp, nil
}
func (tmp *tmpfile) falloc() error {
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
if err != nil {
@@ -155,9 +134,6 @@ func (tmp *tmpfile) falloc() error {
}
func (tmp *tmpfile) link() error {
// make sure this is cleaned up in all error cases
defer tmp.f.Close()
// We use Linkat/Rename as the atomic operation for object puts. The
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
// with any other simultaneous uploads. The final operation is to move the
@@ -172,7 +148,7 @@ func (tmp *tmpfile) link() error {
dir := filepath.Dir(objPath)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown, tmp.newDirPerm)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown)
if err != nil {
return fmt.Errorf("make parent dir: %w", err)
}
@@ -194,21 +170,11 @@ func (tmp *tmpfile) link() error {
}
defer dirf.Close()
for {
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if errors.Is(err, syscall.EEXIST) {
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
continue
}
if err != nil {
return fmt.Errorf("link tmpfile (fd %q as %q): %w",
filepath.Base(tmp.f.Name()), objPath, err)
}
break
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if err != nil {
return fmt.Errorf("link tmpfile (%q in %q): %w",
filepath.Dir(objPath), filepath.Base(tmp.f.Name()), err)
}
err = tmp.f.Close()
@@ -236,9 +202,7 @@ func (tmp *tmpfile) fallbackLink() error {
objPath := filepath.Join(tmp.bucket, tmp.objname)
err = os.Rename(tempname, objPath)
if err != nil {
// rename only works for files within the same filesystem
// if this fails fallback to copy
return backend.MoveFile(tempname, objPath, fs.FileMode(defaultFilePerm))
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil

View File

@@ -24,11 +24,9 @@ import (
"io/fs"
"os"
"path/filepath"
"syscall"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
)
type tmpfile struct {
@@ -38,24 +36,18 @@ type tmpfile struct {
size int64
}
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool, _ bool) (*tmpfile, error) {
func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account, _ bool) (*tmpfile, error) {
uid, gid, doChown := p.getChownIDs(acct)
// Create a temp file for upload while in progress (see link comments below).
var err error
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
err = backend.MkdirAll(dir, uid, gid, doChown)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("create temp file: %w", err)
}
@@ -80,17 +72,31 @@ func (tmp *tmpfile) link() error {
// this will no longer exist
defer os.Remove(tempname)
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
objPath := filepath.Join(tmp.bucket, tmp.objname)
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
// reset default file mode because CreateTemp uses 0600
tmp.f.Chmod(defaultFilePerm)
err := tmp.f.Close()
err = tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
return backend.MoveFile(tempname, objPath, defaultFilePerm)
err = os.Rename(tempname, objPath)
if err != nil {
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) Write(b []byte) (int, error) {

View File

@@ -36,11 +36,6 @@ func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
if s.endpoint != "" {
return s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = &s.endpoint
o.UsePathStyle = s.usePathStyle
// The http body stream is not seekable, so most operations cannot
// be retried. The error returned to the original client may be
// retried by the client.
o.Retryer = aws.NopRetryer{}
}), nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -23,7 +23,6 @@ import (
"os"
"path/filepath"
"strconv"
"syscall"
"golang.org/x/sys/unix"
@@ -32,7 +31,6 @@ import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
"github.com/versity/versitygw/s3err"
)
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
@@ -42,7 +40,6 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
ChownUID: opts.ChownUID,
ChownGID: opts.ChownGID,
BucketLinks: opts.BucketLinks,
NewDirPerm: opts.NewDirPerm,
})
if err != nil {
return nil, err
@@ -54,15 +51,13 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
}
return &ScoutFS{
Posix: p,
rootfd: f,
rootdir: rootdir,
meta: metastore,
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
glaciermode: opts.GlacierMode,
newDirPerm: opts.NewDirPerm,
disableNoArchive: opts.DisableNoArchive,
Posix: p,
rootfd: f,
rootdir: rootdir,
meta: metastore,
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
glaciermode: opts.GlacierMode,
}, nil
}
@@ -76,10 +71,10 @@ type tmpfile struct {
needsChown bool
uid int
gid int
newDirPerm fs.FileMode
}
var (
// TODO: make this configurable
defaultFilePerm uint32 = 0644
)
@@ -92,9 +87,6 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
// file descriptor into the namespace.
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, err
}
@@ -110,7 +102,6 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
needsChown: doChown,
uid: uid,
gid: gid,
newDirPerm: s.newDirPerm,
}
if doChown {
@@ -138,7 +129,7 @@ func (tmp *tmpfile) link() error {
dir := filepath.Dir(objPath)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown, tmp.newDirPerm)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown)
if err != nil {
return fmt.Errorf("make parent dir: %w", err)
}
@@ -155,20 +146,10 @@ func (tmp *tmpfile) link() error {
}
defer dirf.Close()
for {
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if errors.Is(err, fs.ErrExist) {
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
continue
}
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
break
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
err = tmp.f.Close()
@@ -193,10 +174,6 @@ func (tmp *tmpfile) cleanup() {
tmp.f.Close()
}
func (tmp *tmpfile) File() *os.File {
return tmp.f
}
func moveData(from *os.File, to *os.File) error {
return scoutfs.MoveData(from, to)
}

View File

@@ -28,7 +28,9 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
return nil, fmt.Errorf("scoutfs only available on linux")
}
type tmpfile struct{}
type tmpfile struct {
f *os.File
}
var (
errNotSupported = errors.New("not supported")
@@ -54,10 +56,6 @@ func (tmp *tmpfile) Write(b []byte) (int, error) {
func (tmp *tmpfile) cleanup() {
}
func (tmp *tmpfile) File() *os.File {
return nil
}
func moveData(_, _ *os.File) error {
return errNotSupported
}

View File

@@ -19,8 +19,9 @@ import (
"errors"
"fmt"
"io/fs"
"os"
"sort"
"strings"
"syscall"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3response"
@@ -37,65 +38,22 @@ type GetObjFunc func(path string, d fs.DirEntry) (s3response.Object, error)
var ErrSkipObj = errors.New("skip this object")
// map to store object common prefixes
type cpMap map[string]int
func (c cpMap) Add(key string) {
_, ok := c[key]
if !ok {
c[key] = len(c)
}
}
// Len returns the length of the map
func (c cpMap) Len() int {
return len(c)
}
// CpArray converts the map into a sorted []types.CommonPrefixes array
func (c cpMap) CpArray() []types.CommonPrefix {
commonPrefixes := make([]types.CommonPrefix, c.Len())
for cp, i := range c {
pfx := cp
commonPrefixes[i] = types.CommonPrefix{
Prefix: &pfx,
}
}
return commonPrefixes
}
// Walk walks the supplied fs.FS and returns results compatible with list
// objects responses
func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
cpmap := cpMap{}
cpmap := make(map[string]struct{})
var objects []s3response.Object
// if max is 0, it should return empty non-truncated result
if max == 0 {
return WalkResults{
Truncated: false,
}, nil
}
var pastMarker bool
if marker == "" {
pastMarker = true
}
var pastMax bool
pastMax := max == 0
var newMarker string
var truncated bool
root := "."
if strings.Contains(prefix, "/") {
idx := strings.LastIndex(prefix, "/")
if idx > 0 {
root = prefix[:idx]
}
}
err := fs.WalkDir(fileSystem, root, func(path string, d fs.DirEntry, err error) error {
err := fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
@@ -110,9 +68,14 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipDir
}
// After this point, return skipflag instead of nil
// so we can skip a directory without an early return
var skipflag error
if pastMax {
if len(objects) != 0 {
newMarker = *objects[len(objects)-1].Key
truncated = true
}
return fs.SkipAll
}
if d.IsDir() {
// If prefix is defined and the directory does not match prefix,
// do not descend into the directory because nothing will
@@ -122,66 +85,51 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
// building to match. So only skip if path isn't a prefix of prefix
// and prefix isn't a prefix of path.
if prefix != "" &&
!strings.HasPrefix(path+"/", prefix) &&
!strings.HasPrefix(prefix, path+"/") {
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
return fs.SkipDir
}
// Don't recurse into subdirectories which contain the delimiter
// after reaching the prefix
if delimiter != "" &&
strings.HasPrefix(path+"/", prefix) &&
strings.Contains(strings.TrimPrefix(path+"/", prefix), delimiter) {
skipflag = fs.SkipDir
} else {
if delimiter == "" {
dirobj, err := getObj(path+"/", d)
if err == ErrSkipObj {
return skipflag
}
if err != nil {
return fmt.Errorf("directory to object %q: %w", path, err)
}
if pastMax {
truncated = true
return fs.SkipAll
}
objects = append(objects, dirobj)
if (len(objects) + cpmap.Len()) == int(max) {
newMarker = path
pastMax = true
}
return skipflag
}
// TODO: can we do better here rather than a second readdir
// per directory?
ents, err := fs.ReadDir(fileSystem, path)
if err != nil {
return fmt.Errorf("readdir %q: %w", path, err)
}
if len(ents) != 0 {
return skipflag
}
// TODO: can we do better here rather than a second readdir
// per directory?
ents, err := fs.ReadDir(fileSystem, path)
if err != nil {
return fmt.Errorf("readdir %q: %w", path, err)
}
path += string(os.PathSeparator)
if len(ents) == 0 && delimiter == "" {
dirobj, err := getObj(path, d)
if err == ErrSkipObj {
return nil
}
if err != nil {
return fmt.Errorf("directory to object %q: %w", path, err)
}
objects = append(objects, dirobj)
return nil
}
if len(ents) != 0 {
return nil
}
path += "/"
}
if !pastMarker {
if path == marker {
pastMarker = true
return skipflag
return nil
}
if path < marker {
return skipflag
return nil
}
}
// If object doesn't have prefix, don't include in results.
if prefix != "" && !strings.HasPrefix(path, prefix) {
return skipflag
return nil
}
if delimiter == "" {
@@ -189,24 +137,18 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
// prefix are included in results
obj, err := getObj(path, d)
if err == ErrSkipObj {
return skipflag
return nil
}
if err != nil {
return fmt.Errorf("file to object %q: %w", path, err)
}
if pastMax {
truncated = true
return fs.SkipAll
}
objects = append(objects, obj)
if (len(objects) + cpmap.Len()) == int(max) {
newMarker = path
if max > 0 && (len(objects)+len(cpmap)) == int(max) {
pastMax = true
}
return skipflag
return nil
}
// Since delimiter is specified, we only want results that
@@ -235,21 +177,16 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
if !found {
obj, err := getObj(path, d)
if err == ErrSkipObj {
return skipflag
return nil
}
if err != nil {
return fmt.Errorf("file to object %q: %w", path, err)
}
if pastMax {
truncated = true
return fs.SkipAll
}
objects = append(objects, obj)
if (len(objects) + cpmap.Len()) == int(max) {
newMarker = path
if (len(objects) + len(cpmap)) == int(max) {
pastMax = true
}
return skipflag
return nil
}
// Common prefixes are a set, so should not have duplicates.
@@ -259,40 +196,42 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
cpref := prefix + before + delimiter
if cpref == marker {
pastMarker = true
return skipflag
return nil
}
if marker != "" && strings.HasPrefix(marker, cprefNoDelim) {
// skip common prefixes that are before the marker
return skipflag
return nil
}
if pastMax {
cpmap[cpref] = struct{}{}
if (len(objects) + len(cpmap)) == int(max) {
newMarker = cpref
truncated = true
return fs.SkipAll
}
cpmap.Add(cpref)
if (len(objects) + cpmap.Len()) == int(max) {
newMarker = cpref
pastMax = true
}
return skipflag
return nil
})
if err != nil {
// suppress file not found caused by user's prefix
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
return WalkResults{}, nil
}
return WalkResults{}, err
}
if !truncated {
newMarker = ""
var commonPrefixStrings []string
for k := range cpmap {
commonPrefixStrings = append(commonPrefixStrings, k)
}
sort.Strings(commonPrefixStrings)
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
for _, cp := range commonPrefixStrings {
pfx := cp
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
Prefix: &pfx,
})
}
return WalkResults{
CommonPrefixes: cpmap.CpArray(),
CommonPrefixes: commonPrefixes,
Objects: objects,
Truncated: truncated,
NextMarker: newMarker,
@@ -310,7 +249,7 @@ func contains(a string, strs []string) bool {
type WalkVersioningResults struct {
CommonPrefixes []types.CommonPrefix
ObjectVersions []s3response.ObjectVersion
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
Truncated bool
NextMarker string
@@ -318,7 +257,7 @@ type WalkVersioningResults struct {
}
type ObjVersionFuncResult struct {
ObjectVersions []s3response.ObjectVersion
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
NextVersionIdMarker string
Truncated bool
@@ -329,8 +268,8 @@ type GetVersionsFunc func(path, versionIdMarker string, pastVersionIdMarker *boo
// WalkVersions walks the supplied fs.FS and returns results compatible with
// ListObjectVersions action response
func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyMarker, versionIdMarker string, max int, getObj GetVersionsFunc, skipdirs []string) (WalkVersioningResults, error) {
cpmap := cpMap{}
var objects []s3response.ObjectVersion
cpmap := make(map[string]struct{})
var objects []types.ObjectVersion
var delMarkers []types.DeleteMarkerEntry
var pastMarker bool
@@ -376,20 +315,12 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
// building to match. So only skip if path isn't a prefix of prefix
// and prefix isn't a prefix of path.
if prefix != "" &&
!strings.HasPrefix(path+"/", prefix) &&
!strings.HasPrefix(prefix, path+"/") {
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
return fs.SkipDir
}
// Don't recurse into subdirectories when listing with delimiter.
if delimiter == "/" &&
prefix != path+"/" &&
strings.HasPrefix(path+"/", prefix) {
cpmap.Add(path + "/")
return fs.SkipDir
}
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -416,7 +347,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
if delimiter == "" {
// If no delimiter specified, then all files with matching
// prefix are included in results
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -459,7 +390,7 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
suffix := strings.TrimPrefix(path, prefix)
before, _, found := strings.Cut(suffix, delimiter)
if !found {
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-cpmap.Len(), d)
res, err := getObj(path, versionIdMarker, &pastVersionIdMarker, max-len(objects)-len(delMarkers)-len(cpmap), d)
if err == ErrSkipObj {
return nil
}
@@ -481,8 +412,8 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
// Common prefixes are a set, so should not have duplicates.
// These are abstractly a "directory", so need to include the
// delimiter at the end.
cpmap.Add(prefix + before + delimiter)
if (len(objects) + cpmap.Len()) == int(max) {
cpmap[prefix+before+delimiter] = struct{}{}
if (len(objects) + len(cpmap)) == int(max) {
nextMarker = path
truncated = true
@@ -495,8 +426,21 @@ func WalkVersions(ctx context.Context, fileSystem fs.FS, prefix, delimiter, keyM
return WalkVersioningResults{}, err
}
var commonPrefixStrings []string
for k := range cpmap {
commonPrefixStrings = append(commonPrefixStrings, k)
}
sort.Strings(commonPrefixStrings)
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
for _, cp := range commonPrefixStrings {
pfx := cp
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
Prefix: &pfx,
})
}
return WalkVersioningResults{
CommonPrefixes: cpmap.CpArray(),
CommonPrefixes: commonPrefixes,
ObjectVersions: objects,
DelMarkers: delMarkers,
Truncated: truncated,

View File

@@ -31,18 +31,9 @@ import (
)
type walkTest struct {
fsys fs.FS
getobj backend.GetObjFunc
cases []testcase
}
type testcase struct {
name string
prefix string
delimiter string
marker string
maxObjs int32
expected backend.WalkResults
fsys fs.FS
expected backend.WalkResults
getobj backend.GetObjFunc
}
func getObj(path string, d fs.DirEntry) (s3response.Object, error) {
@@ -97,154 +88,50 @@ func TestWalk(t *testing.T) {
"photos/2006/February/sample3.jpg": {},
"photos/2006/February/sample4.jpg": {},
},
getobj: getObj,
cases: []testcase{
{
name: "aws example",
delimiter: "/",
maxObjs: 1000,
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photos/"),
}},
Objects: []s3response.Object{{
Key: backend.GetPtrFromString("sample.jpg"),
}},
},
},
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetStringPtr("photos/"),
}},
Objects: []s3response.Object{{
Key: backend.GetStringPtr("sample.jpg"),
}},
},
getobj: getObj,
},
{
// test case single dir/single file
fsys: fstest.MapFS{
"test/file": {},
},
getobj: getObj,
cases: []testcase{
{
name: "single dir single file",
delimiter: "/",
maxObjs: 1000,
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("test/"),
}},
Objects: []s3response.Object{},
},
},
},
},
{
// non-standard delimiter
fsys: fstest.MapFS{
"photo|s/200|6/Januar|y/sampl|e1.jpg": {},
"photo|s/200|6/Januar|y/sampl|e2.jpg": {},
"photo|s/200|6/Januar|y/sampl|e3.jpg": {},
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetStringPtr("test/"),
}},
Objects: []s3response.Object{},
},
getobj: getObj,
cases: []testcase{
{
name: "different delimiter 1",
delimiter: "|",
maxObjs: 1000,
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|"),
}},
},
},
{
name: "different delimiter 2",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|"),
}},
},
},
{
name: "different delimiter 3",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|6/Januar|"),
}},
},
},
{
name: "different delimiter 4",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|6/Januar|"),
}},
},
},
{
name: "different delimiter 5",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|6/Januar|",
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|"),
}},
},
},
{
name: "different delimiter 6",
delimiter: "|",
maxObjs: 1000,
prefix: "photo|s/200|6/Januar|y/sampl|",
expected: backend.WalkResults{
Objects: []s3response.Object{
{
Key: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|e1.jpg"),
},
{
Key: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|e2.jpg"),
},
{
Key: backend.GetPtrFromString("photo|s/200|6/Januar|y/sampl|e3.jpg"),
},
},
},
},
},
},
}
for _, tt := range tests {
for _, tc := range tt.cases {
res, err := backend.Walk(context.Background(),
tt.fsys, tc.prefix, tc.delimiter, tc.marker, tc.maxObjs,
tt.getobj, []string{})
if err != nil {
t.Errorf("tc.name: walk: %v", err)
}
compareResults(tc.name, res, tc.expected, t)
res, err := backend.Walk(context.Background(), tt.fsys, "", "/", "", 1000, tt.getobj, []string{})
if err != nil {
t.Fatalf("walk: %v", err)
}
compareResults(res, tt.expected, t)
}
}
func compareResults(name string, got, wanted backend.WalkResults, t *testing.T) {
func compareResults(got, wanted backend.WalkResults, t *testing.T) {
if !compareCommonPrefix(got.CommonPrefixes, wanted.CommonPrefixes) {
t.Errorf("%v: unexpected common prefix, got %v wanted %v",
name,
t.Errorf("unexpected common prefix, got %v wanted %v",
printCommonPrefixes(got.CommonPrefixes),
printCommonPrefixes(wanted.CommonPrefixes))
}
if !compareObjects(got.Objects, wanted.Objects) {
t.Errorf("%v: unexpected object, got %v wanted %v",
name,
t.Errorf("unexpected object, got %v wanted %v",
printObjects(got.Objects),
printObjects(wanted.Objects))
}
@@ -315,16 +202,10 @@ func containsObject(c s3response.Object, list []s3response.Object) bool {
func printObjects(list []s3response.Object) string {
res := "["
for _, cp := range list {
var key string
if cp.Key == nil {
key = "<nil>"
} else {
key = *cp.Key
}
if res == "[" {
res = res + key
res = res + *cp.Key
} else {
res = res + ", " + key
res = res + ", " + *cp.Key
}
}
return res + "]"

View File

@@ -19,7 +19,7 @@ import (
"crypto/sha256"
"crypto/tls"
"encoding/hex"
"encoding/xml"
"encoding/json"
"fmt"
"io"
"net/http"
@@ -29,7 +29,6 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3response"
@@ -38,7 +37,6 @@ import (
var (
adminAccess string
adminSecret string
adminRegion string
adminEndpoint string
allowInsecure bool
)
@@ -100,11 +98,6 @@ func adminCommand() *cli.Command {
Usage: "secret access key for the new user",
Aliases: []string{"s"},
},
&cli.StringFlag{
Name: "role",
Usage: "the new user role",
Aliases: []string{"r"},
},
&cli.IntFlag{
Name: "user-id",
Usage: "userID for the new user",
@@ -178,14 +171,6 @@ func adminCommand() *cli.Command {
Required: true,
Destination: &adminSecret,
},
&cli.StringFlag{
Name: "region",
Usage: "admin s3 region string",
EnvVars: []string{"ADMIN_REGION"},
Value: "us-east-1",
Destination: &adminRegion,
Aliases: []string{"r"},
},
&cli.StringFlag{
Name: "endpoint-url",
Usage: "admin apis endpoint url",
@@ -230,24 +215,24 @@ func createUser(ctx *cli.Context) error {
GroupID: groupID,
}
accxml, err := xml.Marshal(acc)
accJson, err := json.Marshal(acc)
if err != nil {
return fmt.Errorf("failed to parse user data: %w", err)
}
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user", adminEndpoint), bytes.NewBuffer(accxml))
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user", adminEndpoint), bytes.NewBuffer(accJson))
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
signer := v4.NewSigner()
hashedPayload := sha256.Sum256(accxml)
hashedPayload := sha256.Sum256(accJson)
hexPayload := hex.EncodeToString(hashedPayload[:])
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -266,9 +251,11 @@ func createUser(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Printf("%s\n", body)
return nil
}
@@ -290,7 +277,7 @@ func deleteUser(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -309,21 +296,17 @@ func deleteUser(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Printf("%s\n", body)
return nil
}
func updateUser(ctx *cli.Context) error {
access, secret, userId, groupId, role := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id"), auth.Role(ctx.String("role"))
access, secret, userId, groupId := ctx.String("access"), ctx.String("secret"), ctx.Int("user-id"), ctx.Int("group-id")
props := auth.MutableProps{}
if ctx.IsSet("role") {
if !role.IsValid() {
return fmt.Errorf("invalid user role: %v", role)
}
props.Role = role
}
if ctx.IsSet("secret") {
props.Secret = &secret
}
@@ -334,24 +317,24 @@ func updateUser(ctx *cli.Context) error {
props.GroupID = &groupId
}
propsxml, err := xml.Marshal(props)
propsJSON, err := json.Marshal(props)
if err != nil {
return fmt.Errorf("failed to parse user attributes: %w", err)
}
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/update-user?access=%v", adminEndpoint, access), bytes.NewBuffer(propsxml))
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/update-user?access=%v", adminEndpoint, access), bytes.NewBuffer(propsJSON))
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
signer := v4.NewSigner()
hashedPayload := sha256.Sum256(propsxml)
hashedPayload := sha256.Sum256(propsJSON)
hexPayload := hex.EncodeToString(hashedPayload[:])
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -370,9 +353,11 @@ func updateUser(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Printf("%s\n", body)
return nil
}
@@ -389,7 +374,7 @@ func listUsers(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -408,15 +393,15 @@ func listUsers(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
var accs auth.ListUserAccountsResult
if err := xml.Unmarshal(body, &accs); err != nil {
var accs []auth.Account
if err := json.Unmarshal(body, &accs); err != nil {
return err
}
printAcctTable(accs.Accounts)
printAcctTable(accs)
return nil
}
@@ -456,7 +441,7 @@ func changeBucketOwner(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -475,9 +460,11 @@ func changeBucketOwner(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
fmt.Println(string(body))
return nil
}
@@ -506,7 +493,7 @@ func listBuckets(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -525,26 +512,15 @@ func listBuckets(ctx *cli.Context) error {
}
if resp.StatusCode >= 400 {
return parseApiError(body)
return fmt.Errorf("%s", body)
}
var result s3response.ListBucketsResult
if err := xml.Unmarshal(body, &result); err != nil {
var buckets []s3response.Bucket
if err := json.Unmarshal(body, &buckets); err != nil {
return err
}
printBuckets(result.Buckets)
printBuckets(buckets)
return nil
}
func parseApiError(body []byte) error {
var apiErr smithy.GenericAPIError
err := xml.Unmarshal(body, &apiErr)
if err != nil {
apiErr.Code = "InternalServerError"
apiErr.Message = err.Error()
}
return &apiErr
}

View File

@@ -7,7 +7,6 @@ import (
"path/filepath"
"sync"
"testing"
"time"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
@@ -58,9 +57,7 @@ func initPosix(ctx context.Context) {
log.Fatalf("make temp directory: %v", err)
}
be, err := posix.New(tempdir, meta.XattrMeta{}, posix.PosixOpts{
NewDirPerm: 0755,
})
be, err := posix.New(tempdir, meta.XattrMeta{}, posix.PosixOpts{})
if err != nil {
log.Fatalf("init posix: %v", err)
}
@@ -78,9 +75,6 @@ func initPosix(ctx context.Context) {
}
wg.Done()
}()
// wait for server to start
time.Sleep(1 * time.Second)
}
func TestIntegration(t *testing.T) {

View File

@@ -50,7 +50,6 @@ var (
logWebhookURL, accessLog string
adminLogFile string
healthPath string
virtualDomain string
debug bool
pprof string
quiet bool
@@ -61,10 +60,10 @@ var (
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
ldapUserIdAtr, ldapGroupIdAtr string
vaultEndpointURL, vaultSecretStoragePath string
vaultAuthMethod, vaultMountPath string
vaultRootToken, vaultRoleId string
vaultRoleSecret, vaultServerCert string
vaultClientCert, vaultClientCertKey string
vaultMountPath, vaultRootToken string
vaultRoleId, vaultRoleSecret string
vaultServerCert, vaultClientCert string
vaultClientCertKey string
s3IamAccess, s3IamSecret string
s3IamRegion, s3IamBucket string
s3IamEndpoint string
@@ -75,9 +74,6 @@ var (
metricsService string
statsdServers string
dogstatsServers string
ipaHost, ipaVaultName string
ipaUser, ipaPassword string
ipaInsecure, ipaDebug bool
)
var (
@@ -99,7 +95,6 @@ func main() {
scoutfsCommand(),
s3Command(),
azureCommand(),
pluginCommand(),
adminCommand(),
testCommand(),
utilsCommand(),
@@ -211,7 +206,6 @@ func initFlags() []cli.Flag {
&cli.BoolFlag{
Name: "debug",
Usage: "enable debug output",
Value: false,
EnvVars: []string{"VGW_DEBUG"},
Destination: &debug,
},
@@ -228,13 +222,6 @@ func initFlags() []cli.Flag {
Destination: &quiet,
Aliases: []string{"q"},
},
&cli.StringFlag{
Name: "virtual-domain",
Usage: "enables the virtual host style bucket addressing with the specified arg as the base domain",
EnvVars: []string{"VGW_VIRTUAL_DOMAIN"},
Destination: &virtualDomain,
Aliases: []string{"vd"},
},
&cli.StringFlag{
Name: "access-log",
Usage: "enable server access logging to specified file",
@@ -380,12 +367,6 @@ func initFlags() []cli.Flag {
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_PATH"},
Destination: &vaultSecretStoragePath,
},
&cli.StringFlag{
Name: "iam-vault-auth-method",
Usage: "vault server auth method",
EnvVars: []string{"VGW_IAM_VAULT_AUTH_METHOD"},
Destination: &vaultAuthMethod,
},
&cli.StringFlag{
Name: "iam-vault-mount-path",
Usage: "vault server mount path",
@@ -525,42 +506,6 @@ func initFlags() []cli.Flag {
Aliases: []string{"mds"},
Destination: &dogstatsServers,
},
&cli.StringFlag{
Name: "ipa-host",
Usage: "FreeIPA server url e.g. https://ipa.example.test",
EnvVars: []string{"VGW_IPA_HOST"},
Destination: &ipaHost,
},
&cli.StringFlag{
Name: "ipa-vault-name",
Usage: "A name of the user vault containing their secret",
EnvVars: []string{"VGW_IPA_VAULT_NAME"},
Destination: &ipaVaultName,
},
&cli.StringFlag{
Name: "ipa-user",
Usage: "Username used to connect to FreeIPA (requires permissions to read user vault contents)",
EnvVars: []string{"VGW_IPA_USER"},
Destination: &ipaUser,
},
&cli.StringFlag{
Name: "ipa-password",
Usage: "Password of the user used to connect to FreeIPA",
EnvVars: []string{"VGW_IPA_PASSWORD"},
Destination: &ipaPassword,
},
&cli.BoolFlag{
Name: "ipa-insecure",
Usage: "Disable verify TLS certificate of FreeIPA server",
EnvVars: []string{"VGW_IPA_INSECURE"},
Destination: &ipaInsecure,
},
&cli.BoolFlag{
Name: "ipa-debug",
Usage: "FreeIPA IAM debug output",
EnvVars: []string{"VGW_IPA_DEBUG"},
Destination: &ipaDebug,
},
}
}
@@ -617,9 +562,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
if readonly {
opts = append(opts, s3api.WithReadOnly())
}
if virtualDomain != "" {
opts = append(opts, s3api.WithHostStyle(virtualDomain))
}
admApp := fiber.New(fiber.Config{
AppName: "versitygw",
@@ -664,7 +606,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
LDAPGroupIdAtr: ldapGroupIdAtr,
VaultEndpointURL: vaultEndpointURL,
VaultSecretStoragePath: vaultSecretStoragePath,
VaultAuthMethod: vaultAuthMethod,
VaultMountPath: vaultMountPath,
VaultRootToken: vaultRootToken,
VaultRoleId: vaultRoleId,
@@ -682,12 +623,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
CacheDisable: iamCacheDisable,
CacheTTL: iamCacheTTL,
CachePrune: iamCachePrune,
IpaHost: ipaHost,
IpaVaultName: ipaVaultName,
IpaUser: ipaUser,
IpaPassword: ipaPassword,
IpaInsecure: ipaInsecure,
IpaDebug: ipaDebug,
})
if err != nil {
return fmt.Errorf("setup iam: %w", err)

View File

@@ -1,74 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"errors"
"fmt"
"plugin"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/plugins"
)
func pluginCommand() *cli.Command {
return &cli.Command{
Name: "plugin",
Usage: "load a backend from a plugin",
Description: "Runs a s3 gateway and redirects the requests to the backend defined in the plugin",
Action: runPluginBackend,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "config",
Usage: "location of the config file",
Aliases: []string{"c"},
},
},
}
}
func runPluginBackend(ctx *cli.Context) error {
if ctx.NArg() == 0 {
return fmt.Errorf("no plugin file provided to be loaded")
}
pluginPath := ctx.Args().Get(0)
config := ctx.String("config")
p, err := plugin.Open(pluginPath)
if err != nil {
return err
}
backendSymbol, err := p.Lookup("Backend")
if err != nil {
return err
}
backendPluginPtr, ok := backendSymbol.(*plugins.BackendPlugin)
if !ok {
return errors.New("plugin is not of type *plugins.BackendPlugin")
}
if backendPluginPtr == nil {
return errors.New("variable Backend is nil")
}
be, err := (*backendPluginPtr).New(config)
if err != nil {
return err
}
return runGateway(ctx.Context, be)
}

View File

@@ -16,8 +16,6 @@ package main
import (
"fmt"
"io/fs"
"math"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/backend/meta"
@@ -28,10 +26,6 @@ var (
chownuid, chowngid bool
bucketlinks bool
versioningDir string
dirPerms uint
sidecar string
nometa bool
forceNoTmpFile bool
)
func posixCommand() *cli.Command {
@@ -74,32 +68,6 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
EnvVars: []string{"VGW_VERSIONING_DIR"},
Destination: &versioningDir,
},
&cli.UintFlag{
Name: "dir-perms",
Usage: "default directory permissions for new directories",
EnvVars: []string{"VGW_DIR_PERMS"},
Destination: &dirPerms,
DefaultText: "0755",
Value: 0755,
},
&cli.StringFlag{
Name: "sidecar",
Usage: "use provided sidecar directory to store metadata",
EnvVars: []string{"VGW_META_SIDECAR"},
Destination: &sidecar,
},
&cli.BoolFlag{
Name: "nometa",
Usage: "disable metadata storage",
EnvVars: []string{"VGW_META_NONE"},
Destination: &nometa,
},
&cli.BoolFlag{
Name: "disableotmp",
Usage: "disable O_TMPFILE support for new objects",
EnvVars: []string{"VGW_DISABLE_OTMP"},
Destination: &forceNoTmpFile,
},
},
}
}
@@ -110,46 +78,19 @@ func runPosix(ctx *cli.Context) error {
}
gwroot := (ctx.Args().Get(0))
if dirPerms > math.MaxUint32 {
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
}
if nometa && sidecar != "" {
return fmt.Errorf("cannot use both nometa and sidecar metadata")
}
opts := posix.PosixOpts{
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
NewDirPerm: fs.FileMode(dirPerms),
ForceNoTmpFile: forceNoTmpFile,
}
var ms meta.MetadataStorer
switch {
case sidecar != "":
sc, err := meta.NewSideCar(sidecar)
if err != nil {
return fmt.Errorf("failed to init sidecar metadata: %w", err)
}
ms = sc
opts.SideCarDir = sidecar
case nometa:
ms = meta.NoMeta{}
default:
ms = meta.XattrMeta{}
err := meta.XattrMeta{}.Test(gwroot)
if err != nil {
return fmt.Errorf("xattr check failed: %w", err)
}
}
be, err := posix.New(gwroot, ms, opts)
err := meta.XattrMeta{}.Test(gwroot)
if err != nil {
return fmt.Errorf("failed to init posix backend: %w", err)
return fmt.Errorf("posix xattr check: %v", err)
}
be, err := posix.New(gwroot, meta.XattrMeta{}, posix.PosixOpts{
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
})
if err != nil {
return fmt.Errorf("init posix: %v", err)
}
return runGateway(ctx.Context, be)

View File

@@ -26,10 +26,8 @@ var (
s3proxySecret string
s3proxyEndpoint string
s3proxyRegion string
s3proxyMetaBucket string
s3proxyDisableChecksum bool
s3proxySslSkipVerify bool
s3proxyUsePathStyle bool
s3proxyDebug bool
)
@@ -73,12 +71,6 @@ to an s3 storage backend service.`,
EnvVars: []string{"VGW_S3_REGION"},
Destination: &s3proxyRegion,
},
&cli.StringFlag{
Name: "meta-bucket",
Usage: "s3 service meta bucket to store buckets acl/policy",
EnvVars: []string{"VGW_S3_META_BUCKET"},
Destination: &s3proxyMetaBucket,
},
&cli.BoolFlag{
Name: "disable-checksum",
Usage: "disable gateway to server object checksums",
@@ -93,13 +85,6 @@ to an s3 storage backend service.`,
Value: false,
Destination: &s3proxySslSkipVerify,
},
&cli.BoolFlag{
Name: "use-path-style",
Usage: "use path style addressing for s3 proxy",
EnvVars: []string{"VGW_S3_USE_PATH_STYLE"},
Value: false,
Destination: &s3proxyUsePathStyle,
},
&cli.BoolFlag{
Name: "debug",
Usage: "output extra debug tracing",
@@ -112,8 +97,8 @@ to an s3 storage backend service.`,
}
func runS3(ctx *cli.Context) error {
be, err := s3proxy.New(ctx.Context, s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
s3proxyMetaBucket, s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyUsePathStyle, s3proxyDebug)
be, err := s3proxy.New(s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyDebug)
if err != nil {
return fmt.Errorf("init s3 backend: %w", err)
}

View File

@@ -16,16 +16,13 @@ package main
import (
"fmt"
"io/fs"
"math"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/backend/scoutfs"
)
var (
glacier bool
disableNoArchive bool
glacier bool
)
func scoutfsCommand() *cli.Command {
@@ -72,20 +69,6 @@ move interfaces as well as support for tiered filesystems.`,
EnvVars: []string{"VGW_BUCKET_LINKS"},
Destination: &bucketlinks,
},
&cli.UintFlag{
Name: "dir-perms",
Usage: "default directory permissions for new directories",
EnvVars: []string{"VGW_DIR_PERMS"},
Destination: &dirPerms,
DefaultText: "0755",
Value: 0755,
},
&cli.BoolFlag{
Name: "disable-noarchive",
Usage: "disable setting noarchive for multipart part uploads",
EnvVars: []string{"VGW_DISABLE_NOARCHIVE"},
Destination: &disableNoArchive,
},
},
}
}
@@ -95,17 +78,11 @@ func runScoutfs(ctx *cli.Context) error {
return fmt.Errorf("no directory provided for operation")
}
if dirPerms > math.MaxUint32 {
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
}
var opts scoutfs.ScoutfsOpts
opts.GlacierMode = glacier
opts.ChownUID = chownuid
opts.ChownGID = chowngid
opts.BucketLinks = bucketlinks
opts.NewDirPerm = fs.FileMode(dirPerms)
opts.DisableNoArchive = disableNoArchive
be, err := scoutfs.New(ctx.Args().Get(0), opts)
if err != nil {

View File

@@ -34,11 +34,9 @@ var (
totalReqs int
upload bool
download bool
hostStyle bool
pathStyle bool
checksumDisable bool
versioningEnabled bool
azureTests bool
tlsStatus bool
)
func testCommand() *cli.Command {
@@ -74,24 +72,12 @@ func initTestFlags() []cli.Flag {
Destination: &endpoint,
Aliases: []string{"e"},
},
&cli.BoolFlag{
Name: "host-style",
Usage: "Use host-style bucket addressing",
Value: false,
Destination: &hostStyle,
},
&cli.BoolFlag{
Name: "debug",
Usage: "enable debug mode",
Aliases: []string{"d"},
Destination: &debug,
},
&cli.BoolFlag{
Name: "allow-insecure",
Usage: "skip tls verification",
Aliases: []string{"ai"},
Destination: &tlsStatus,
},
}
}
@@ -109,31 +95,12 @@ func initTestCommands() []*cli.Command {
Destination: &versioningEnabled,
Aliases: []string{"vs"},
},
&cli.BoolFlag{
Name: "azure-test-mode",
Usage: "Skips tests that are not supported by Azure",
Destination: &azureTests,
Aliases: []string{"azure"},
},
},
},
{
Name: "posix",
Usage: "Tests posix specific features",
Action: getAction(integration.TestPosix),
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "versioning-enabled",
Usage: "Test posix when versioning is enabled",
Destination: &versioningEnabled,
Aliases: []string{"vs"},
},
},
},
{
Name: "scoutfs",
Usage: "Tests scoutfs full flow",
Action: getAction(integration.TestScoutfs),
},
{
Name: "iam",
@@ -197,6 +164,12 @@ func initTestCommands() []*cli.Command {
Value: 1,
Destination: &concurrency,
},
&cli.BoolFlag{
Name: "pathStyle",
Usage: "Use Pathstyle bucket addressing",
Value: false,
Destination: &pathStyle,
},
&cli.BoolFlag{
Name: "checksumDis",
Usage: "Disable server checksum",
@@ -223,13 +196,12 @@ func initTestCommands() []*cli.Command {
integration.WithEndpoint(endpoint),
integration.WithConcurrency(concurrency),
integration.WithPartSize(partSize),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
if pathStyle {
opts = append(opts, integration.WithPathStyle())
}
if checksumDisable {
opts = append(opts, integration.WithDisableChecksum())
@@ -284,7 +256,6 @@ func initTestCommands() []*cli.Command {
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithConcurrency(concurrency),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -292,9 +263,6 @@ func initTestCommands() []*cli.Command {
if checksumDisable {
opts = append(opts, integration.WithDisableChecksum())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s3conf := integration.NewS3Conf(opts...)
@@ -313,7 +281,6 @@ func getAction(tf testFunc) func(*cli.Context) error {
integration.WithSecret(awsSecret),
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -321,12 +288,6 @@ func getAction(tf testFunc) func(*cli.Context) error {
if versioningEnabled {
opts = append(opts, integration.WithVersioningEnabled())
}
if azureTests {
opts = append(opts, integration.WithAzureMode())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s := integration.NewS3Conf(opts...)
tf(s)
@@ -354,30 +315,15 @@ func extractIntTests() (commands []*cli.Command) {
integration.WithSecret(awsSecret),
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
}
if versioningEnabled {
opts = append(opts, integration.WithVersioningEnabled())
}
if hostStyle {
opts = append(opts, integration.WithHostStyle())
}
s := integration.NewS3Conf(opts...)
err := testFunc(s)
return err
},
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "versioning-enabled",
Usage: "Test the bucket object versioning, if the versioning is enabled",
Destination: &versioningEnabled,
Aliases: []string{"vs"},
},
},
})
}
return

View File

@@ -99,26 +99,6 @@ ROOT_SECRET_ACCESS_KEY=
# endpoint is unauthenticated, and returns a 200 status for GET.
#VGW_HEALTH=
# Enable VGW_READ_ONLY to only allow read operations to the S3 server. No write
# operations will be allowed.
#VGW_READ_ONLY=false
# The VGW_VIRTUAL_DOMAIN option enables the virtual host style bucket
# addressing. The path style addressing is the default, and remains enabled
# even when virtual host style is enabled. The VGW_VIRTUAL_DOMAIN option
# specifies the domain name that will be used for the virtual host style
# addressing. For virtual addressing, access to a bucket is in the request
# form:
# https://<bucket>.<VGW_VIRTUAL_DOMAIN>/
# for example: https://mybucket.example.com/ where
# VGW_VIRTUAL_DOMAIN=example.com
# and all subdomains of VGW_VIRTUAL_DOMAIN should be reserved for buckets.
# This means that virtual host addressing will generally require a DNS
# entry for each bucket that needs to be accessed.
# The default path style request is of the form:
# https://<VGW_ENDPOINT>/<bucket>
#VGW_VIRTUAL_DOMAIN=
###############
# Access Logs #
###############
@@ -260,24 +240,6 @@ ROOT_SECRET_ACCESS_KEY=
#VGW_IAM_LDAP_USER_ID_ATR=
#VGW_IAM_LDAP_GROUP_ID_ATR=
# The FreeIPA options will enable the FreeIPA IAM service with accounts stored
# in an external FreeIPA service. Currently the FreeIPA IAM service only
# supports account retrieval. Creating and modifying accounts must be done
# outside of the versitygw service.
# FreeIPA server url e.g. https://ipa.example.test
#VGW_IPA_HOST=
# A name of the user vault containing their secret
#VGW_IPA_VAULT_NAME=
# Username used to connect to FreeIPA (requires permissions to read user vault
# contents)
#VGW_IPA_USER=
# Password of the user used to connect to FreeIPA
#VGW_IPA_PASSWORD=
# Disable verify TLS certificate of FreeIPA server
#VGW_IPA_INSECURE=false
# FreeIPA IAM debug output
#VGW_IPA_DEBUG=false
###############
# IAM caching #
###############
@@ -349,46 +311,6 @@ ROOT_SECRET_ACCESS_KEY=
# to directories at the top level gateway directory as buckets.
#VGW_BUCKET_LINKS=false
# The default permissions mode when creating new directories is 0755. Use
# VGW_DIR_PERMS option to set a different mode for any new directory that the
# gateway creates. This applies to buckets created through the gateway as well
# as any parent directories automatically created with object uploads.
#VGW_DIR_PERMS=0755
# To enable object versions, the VGW_VERSIONING_DIR option must be set to the
# directory that will be used to store the object versions. The version
# directory must NOT be a subdirectory of the VGW_BACKEND_ARG directory.
#VGW_VERSIONING_DIR=
# The gateway uses xattrs to store metadata for objects by default. For systems
# that do not support xattrs, the VGW_META_SIDECAR option can be set to a
# directory that will be used to store the metadata for objects. This is
# currently experimental, and may have issues for some edge cases.
#VGW_META_SIDECAR=
# The VGW_META_NONE option will disable the metadata functionality for the
# gateway. This will cause the gateway to not store any metadata for objects
# or buckets. This include bucket ACLs and Policy. This may be useful for
# read only access to pre-existing data where the gateway should not modify
# the data. It is recommened to enable VGW_READ_ONLY (Global Options) along
# with this.
#VGW_META_NONE=false
# The gateway will use O_TMPFILE for writing objects while uploading and
# link the file to the final object name when the upload is complete if the
# filesystem supports O_TMPFILE. This creates an atomic object creation
# that is not visible to other clients or racing uploads until the upload
# is complete. This will not work if there is a different filesystem mounted
# below the bucket level than where the bucket resides. The VGW_DISABLE_OTMP
# option can be set to true to disable this functionality and force the fallback
# mode when O_TMPFILE is not available. This fallback will create a temporary
# file in the bucket directory and rename it to the final object name when
# the upload is complete if the final location is in the same filesystem, or
# copy the file to the final location if the final location is in a different
# filesystem. This fallback mode is still atomic, but may be less efficient
# than O_TMPFILE when the data needs to be copied into the final location.
#VGW_DISABLE_OTMP=false
###########
# scoutfs #
###########
@@ -424,19 +346,6 @@ ROOT_SECRET_ACCESS_KEY=
# to directories at the top level gateway directory as buckets.
#VGW_BUCKET_LINKS=false
# The default permissions mode when creating new directories is 0755. Use
# VGW_DIR_PERMS option to set a different mode for any new directory that the
# gateway creates. This applies to buckets created through the gateway as well
# as any parent directories automatically created with object uploads.
#VGW_DIR_PERMS=0755
# The default behavior of the gateway is to automatically set the noarchive
# flag on the multipart upload parts while the multipart upload is in progress.
# This is to prevent the parts from being archived since they are temporary
# and will be deleted after the multipart upload is completed or aborted. The
# VGW_DISABLE_NOARCHIVE option can be set to true to disable this behavior.
#VGW_DISABLE_NOARCHIVE=false
######
# s3 #
######

104
go.mod
View File

@@ -1,83 +1,81 @@
module github.com/versity/versitygw
go 1.23.0
toolchain go1.24.1
go 1.21.0
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
github.com/DataDog/datadog-go/v5 v5.6.0
github.com/aws/aws-sdk-go-v2 v1.36.5
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
github.com/aws/smithy-go v1.22.4
github.com/go-ldap/ldap/v3 v3.4.11
github.com/gofiber/fiber/v2 v2.52.8
github.com/google/go-cmp v0.7.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0
github.com/DataDog/datadog-go/v5 v5.5.0
github.com/aws/aws-sdk-go-v2 v1.30.5
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2
github.com/aws/smithy-go v1.20.4
github.com/go-ldap/ldap/v3 v3.4.8
github.com/gofiber/fiber/v2 v2.52.5
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/hashicorp/vault-client-go v0.4.3
github.com/nats-io/nats.go v1.43.0
github.com/oklog/ulid/v2 v2.1.1
github.com/pkg/xattr v0.4.12
github.com/segmentio/kafka-go v0.4.48
github.com/smira/go-statsd v1.3.4
github.com/urfave/cli/v2 v2.27.7
github.com/valyala/fasthttp v1.63.0
github.com/nats-io/nats.go v1.37.0
github.com/oklog/ulid/v2 v2.1.0
github.com/pkg/xattr v0.4.10
github.com/segmentio/kafka-go v0.4.47
github.com/smira/go-statsd v1.3.3
github.com/urfave/cli/v2 v2.27.4
github.com/valyala/fasthttp v1.55.0
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
golang.org/x/sync v0.16.0
golang.org/x/sys v0.34.0
golang.org/x/sys v0.25.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/crypto v0.27.0 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/text v0.18.0 // indirect
golang.org/x/time v0.6.0 // indirect
)
require (
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.17
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect
github.com/aws/aws-sdk-go-v2/config v1.27.33
github.com/aws/aws-sdk-go-v2/credentials v1.17.32
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
)

242
go.sum
View File

@@ -1,102 +1,98 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw=
github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/aws/aws-sdk-go-v2 v1.36.5 h1:0OF9RiEMEdDdZEMqF9MRjevyxAQcf6gY+E7vwBILFj0=
github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0=
github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8=
github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0=
github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83 h1:08otkOELsIi0toRRGMytlJhOctcN8xfKfKFR2NXz3kE=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83/go.mod h1:dGsGb2wI8JDWeMAhjVPP+z+dqvYjL6k6o+EujcRNk5c=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U=
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 h1:5Y75q0RPQoAbieyOuGLhjV9P3txvYgXv2lg0UwJOfmE=
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w=
github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw=
github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g=
github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw=
github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU=
github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks=
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I=
github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18 h1:9DIp7vhmOPmueCDwpXa45bEbLHHTt1kcxChdTJWWxvI=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18/go.mod h1:aJv/Fwz8r56ozwYFRC4bzoeL1L17GYQYemfblOBux1M=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 h1:Roo69qTpfu8OlJ2Tb7pAYVuF0CpuUMB0IYWwYP/4DZM=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17/go.mod h1:NcWPxQzGM1USQggaTVwz6VpqMZPX1CvDJLDh6jnOCa4=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 h1:FLMkfEiRjhgeDTCjjLoc3URo/TBkgeQbocA78lfkzSI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19/go.mod h1:Vx+GucNSsdhaxs3aZIKfSUjKVGsxN25nX2SRcdhuw08=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 h1:u+EfGmksnJc/x5tq3A+OD7LrMbSSR/5TrKLvkdy/fhY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17/go.mod h1:VaMx6302JHax2vHJWgRo+5n9zvbacs3bLU/23DNQrTY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 h1:Kp6PWAlXwP1UvIflkIP6MFZYBNDCa4mFCGtxrpICVOg=
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2/go.mod h1:5FmD/Dqq57gP+XwaUnd5WFPipAuzrf0HmupX27Gvjvc=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o=
github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
github.com/gofiber/fiber/v2 v2.52.8 h1:xl4jJQ0BV5EJTA2aWiKw/VddRpHrKeZLF0QPUxqn0x4=
github.com/gofiber/fiber/v2 v2.52.8/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ=
github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
@@ -113,42 +109,39 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug=
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/xattr v0.4.12 h1:rRTkSyFNTRElv6pkA3zpjHpQ90p/OdHQC1GmGh1aTjM=
github.com/pkg/xattr v0.4.12/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -156,27 +149,30 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/segmentio/kafka-go v0.4.48 h1:9jyu9CWK4W5W+SroCe8EffbrRZVqAOkuaLd/ApID4Vs=
github.com/segmentio/kafka-go v0.4.48/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smira/go-statsd v1.3.4 h1:kBYWcLSGT+qC6JVbvfz48kX7mQys32fjDOPrfmsSx2c=
github.com/smira/go-statsd v1.3.4/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
github.com/smira/go-statsd v1.3.3 h1:WnMlmGTyMpzto+HvOJWRPoLaLlk5EGfzsnlQBcvj4yI=
github.com/smira/go-statsd v1.3.3/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8=
github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.63.0 h1:DisIL8OjB7ul2d7cBaMRcKTQDYnrGy56R4FCiuDP0Ns=
github.com/valyala/fasthttp v1.63.0/go.mod h1:REc4IeW+cAEyLrRPa5A81MIjvz0QE1laoTX2EaPHKJM=
github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8=
github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
@@ -187,35 +183,38 @@ github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -227,18 +226,23 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -246,10 +250,11 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
@@ -259,6 +264,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -72,17 +72,6 @@ var (
ActionPutBucketOwnershipControls = "s3_PutBucketOwnershipControls"
ActionGetBucketOwnershipControls = "s3_GetBucketOwnershipControls"
ActionDeleteBucketOwnershipControls = "s3_DeleteBucketOwnershipControls"
ActionPutBucketCors = "s3_PutBucketCors"
ActionGetBucketCors = "s3_GetBucketCors"
ActionDeleteBucketCors = "s3_DeleteBucketCors"
// Admin actions
ActionAdminCreateUser = "admin_CreateUser"
ActionAdminUpdateUser = "admin_UpdateUser"
ActionAdminDeleteUser = "admin_DeleteUser"
ActionAdminChangeBucketOwner = "admin_ChangeBucketOwner"
ActionAdminListUsers = "admin_ListUsers"
ActionAdminListBuckets = "admin_ListBuckets"
)
func init() {
@@ -269,16 +258,4 @@ func init() {
Name: "UploadPartCopy",
Service: "s3",
}
ActionMap[ActionPutBucketCors] = Action{
Name: "PutBucketCors",
Service: "s3",
}
ActionMap[ActionGetBucketCors] = Action{
Name: "GetBucketCors",
Service: "s3",
}
ActionMap[ActionDeleteBucketCors] = Action{
Name: "DeleteBucketCors",
Service: "s3",
}
}

View File

@@ -1,35 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package plugins
import "github.com/versity/versitygw/backend"
// BackendPlugin defines an interface for creating backend
// implementation instances.
// Plugins implementing this interface can be built as shared
// libraries using Go's plugin system (to build use `go build -buildmode=plugin`).
// The shared library should export an instance of
// this interface in a variable named `Backend`.
type BackendPlugin interface {
// New creates and initializes a new backend.Backend instance.
// The config parameter specifies the path of the file containing
// the configuration for the backend.
//
// Implementations of this method should perform the necessary steps to
// establish a connection to the underlying storage system or service
// (e.g., network storage system, distributed storage system, cloud storage)
// and configure it according to the provided configuration.
New(config string) (backend.Backend, error)
}

View File

@@ -5,27 +5,17 @@ rm -rf /tmp/gw
mkdir /tmp/gw
rm -rf /tmp/covdata
mkdir /tmp/covdata
rm -rf /tmp/versioing.covdata
mkdir /tmp/versioning.covdata
rm -rf /tmp/versioningdir
mkdir /tmp/versioningdir
# setup tls certificate and key
ECHO "Generating TLS certificate and key in the cert.pem and key.pem files"
openssl genpkey -algorithm RSA -out key.pem -pkeyopt rsa_keygen_bits:2048
openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
ECHO "Running the sdk test over http"
# run server in background not versioning-enabled
# port: 7070(default)
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
# run server in background
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_PID=$!
# wait a second for server to start up
sleep 1
# check if gateway process is still running
# check if server is still running
if ! kill -0 $GW_PID; then
echo "server no longer running"
exit 1
@@ -33,7 +23,7 @@ fi
# run tests
# full flow tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow -vs; then
echo "full flow tests failed"
kill $GW_PID
exit 1
@@ -51,110 +41,8 @@ if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 iam; then
exit 1
fi
# kill off server
kill $GW_PID
ECHO "Running the sdk test over https"
# run server in background with TLS certificate
# port: 7071(default)
GOCOVERDIR=/tmp/https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7071 -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
GW_HTTPS_PID=$!
sleep 1
# check if https gateway process is still running
if ! kill -0 $GW_HTTPS_PID; then
echo "server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow; then
echo "full flow tests failed"
kill $GW_HTTPS_PID
exit 1
fi
# posix tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 posix; then
echo "posix tests failed"
kill $GW_HTTPS_PID
exit 1
fi
# iam tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 iam; then
echo "iam tests failed"
kill $GW_HTTPS_PID
exit 1
fi
kill $GW_HTTPS_PID
ECHO "Running the sdk test over http against the versioning-enabled gateway"
# run server in background versioning-enabled
# port: 7072
GOCOVERDIR=/tmp/versioning.covdata ./versitygw -p :7072 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_VS_PID=$!
# wait a second for server to start up
sleep 1
# check if versioning-enabled gateway process is still running
if ! kill -0 $GW_VS_PID; then
echo "versioning-enabled server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs; then
echo "versioning-enabled full-flow tests failed"
kill $GW_VS_PID
exit 1
fi
# posix tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 posix -vs; then
echo "versiongin-enabled posix tests failed"
kill $GW_VS_PID
exit 1
fi
# kill off server
kill $GW_VS_PID
ECHO "Running the sdk test over https against the versioning-enabled gateway"
# run server in background versioning-enabled
# port: 7073
GOCOVERDIR=/tmp/versioning.https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7073 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_VS_HTTPS_PID=$!
# wait a second for server to start up
sleep 1
# check if versioning-enabled gateway process is still running
if ! kill -0 $GW_VS_HTTPS_PID; then
echo "versioning-enabled server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs; then
echo "versioning-enabled full-flow tests failed"
kill $GW_VS_HTTPS_PID
exit 1
fi
# posix tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 posix -vs; then
echo "versiongin-enabled posix tests failed"
kill $GW_VS_HTTPS_PID
exit 1
fi
# kill off server
kill $GW_VS_HTTPS_PID
exit 0
# if the above binary was built with -cover enabled (make testbin),

View File

@@ -53,9 +53,6 @@ func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUse
app.Use(middlewares.VerifyV4Signature(root, iam, l, nil, region, false))
app.Use(middlewares.VerifyMD5Body(l))
// Admin role checker
app.Use(middlewares.IsAdmin(l))
server.router.Init(app, be, iam, l)
return server

View File

@@ -15,17 +15,16 @@
package controllers
import (
"encoding/xml"
"net/http"
"encoding/json"
"errors"
"fmt"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
"github.com/versity/versitygw/s3response"
)
type AdminController struct {
@@ -39,151 +38,288 @@ func NewAdminController(iam auth.IAMService, be backend.Backend, l s3log.AuditLo
}
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:CreateUser",
})
}
var usr auth.Account
err := xml.Unmarshal(ctx.Body(), &usr)
err := json.Unmarshal(ctx.Body(), &usr)
if err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
return sendResponse(ctx, fmt.Errorf("failed to parse request body: %w", err), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusBadRequest,
action: "admin:CreateUser",
})
}
if !usr.Role.IsValid() {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminInvalidUserRole),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
if usr.Role != auth.RoleAdmin && usr.Role != auth.RoleUser && usr.Role != auth.RoleUserPlus {
return sendResponse(ctx, errors.New("invalid parameters: user role have to be one of the following: 'user', 'admin', 'userplus'"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusBadRequest,
action: "admin:CreateUser",
})
}
err = c.iam.CreateAccount(usr)
if err != nil {
status := fiber.StatusInternalServerError
err = fmt.Errorf("failed to create user: %w", err)
if strings.Contains(err.Error(), "user already exists") {
err = s3err.GetAPIError(s3err.ErrAdminUserExists)
status = fiber.StatusConflict
}
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
return sendResponse(ctx, err, nil,
&metaOptions{
status: status,
logger: c.l,
action: "admin:CreateUser",
})
}
return SendResponse(ctx, nil,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminCreateUser,
Status: http.StatusCreated,
})
return sendResponse(ctx, nil, "The user has been created successfully", &metaOptions{
status: fiber.StatusCreated,
logger: c.l,
action: "admin:CreateUser",
})
}
func (c AdminController) UpdateUser(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:UpdateUser",
})
}
access := ctx.Query("access")
if access == "" {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminMissingUserAcess),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
return sendResponse(ctx, errors.New("missing user access parameter"), nil,
&metaOptions{
status: fiber.StatusBadRequest,
logger: c.l,
action: "admin:UpdateUser",
})
}
var props auth.MutableProps
if err := xml.Unmarshal(ctx.Body(), &props); err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedXML),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
if err := json.Unmarshal(ctx.Body(), &props); err != nil {
return sendResponse(ctx, fmt.Errorf("invalid request body %w", err), nil,
&metaOptions{
status: fiber.StatusBadRequest,
logger: c.l,
action: "admin:UpdateUser",
})
}
err := props.Validate()
err := c.iam.UpdateUserAccount(access, props)
if err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminInvalidUserRole),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
})
}
status := fiber.StatusInternalServerError
err = fmt.Errorf("failed to update user account: %w", err)
err = c.iam.UpdateUserAccount(access, props)
if err != nil {
if strings.Contains(err.Error(), "user not found") {
err = s3err.GetAPIError(s3err.ErrAdminUserNotFound)
status = fiber.StatusNotFound
}
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
return sendResponse(ctx, err, nil,
&metaOptions{
status: status,
logger: c.l,
action: "admin:UpdateUser",
})
}
return SendResponse(ctx, nil,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminUpdateUser,
return sendResponse(ctx, nil, "the user has been updated successfully",
&metaOptions{
logger: c.l,
action: "admin:UpdateUser",
})
}
func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
access := ctx.Query("access")
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:DeleteUser",
})
}
err := c.iam.DeleteUserAccount(access)
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminDeleteUser,
if err != nil {
return sendResponse(ctx, err, nil,
&metaOptions{
logger: c.l,
action: "admin:DeleteUser",
})
}
return sendResponse(ctx, nil, "The user has been deleted successfully",
&metaOptions{
logger: c.l,
action: "admin:DeleteUser",
})
}
func (c AdminController) ListUsers(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:ListUsers",
})
}
accs, err := c.iam.ListUserAccounts()
return SendXMLResponse(ctx,
auth.ListUserAccountsResult{
Accounts: accs,
}, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminListUsers,
return sendResponse(ctx, err, accs,
&metaOptions{
logger: c.l,
action: "admin:ListUsers",
})
}
func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:ChangeBucketOwner",
})
}
owner := ctx.Query("owner")
bucket := ctx.Query("bucket")
accs, err := auth.CheckIfAccountsExist([]string{owner}, c.iam)
if err != nil {
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
return sendResponse(ctx, err, nil,
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
})
}
if len(accs) > 0 {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminUserNotFound),
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
return sendResponse(ctx, errors.New("user specified as the new bucket owner does not exist"), nil,
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
status: fiber.StatusNotFound,
})
}
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, owner)
return SendResponse(ctx, err,
&MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminChangeBucketOwner,
acl := auth.ACL{
Owner: owner,
Grantees: []auth.Grantee{
{
Permission: types.PermissionFullControl,
Access: owner,
Type: types.TypeCanonicalUser,
},
},
}
aclParsed, err := json.Marshal(acl)
if err != nil {
return sendResponse(ctx, fmt.Errorf("failed to marshal the bucket acl: %w", err), nil,
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
})
}
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, aclParsed)
return sendResponse(ctx, err, "Bucket owner has been updated successfully",
&metaOptions{
logger: c.l,
action: "admin:ChangeBucketOwner",
})
}
func (c AdminController) ListBuckets(ctx *fiber.Ctx) error {
acct := ctx.Locals("account").(auth.Account)
if acct.Role != "admin" {
return sendResponse(ctx, errors.New("access denied: only admin users have access to this resource"), nil,
&metaOptions{
logger: c.l,
status: fiber.StatusForbidden,
action: "admin:ListBuckets",
})
}
buckets, err := c.be.ListBucketsAndOwners(ctx.Context())
return SendXMLResponse(ctx,
s3response.ListBucketsResult{
Buckets: buckets,
}, err, &MetaOpts{
Logger: c.l,
Action: metrics.ActionAdminListBuckets,
return sendResponse(ctx, err, buckets,
&metaOptions{
logger: c.l,
action: "admin:ListBuckets",
})
}
type metaOptions struct {
action string
status int
logger s3log.AuditLogger
}
func sendResponse(ctx *fiber.Ctx, err error, data any, m *metaOptions) error {
status := m.status
if err != nil {
if status == 0 {
status = fiber.StatusInternalServerError
}
if m.logger != nil {
m.logger.Log(ctx, err, []byte(err.Error()), s3log.LogMeta{
Action: m.action,
HttpStatus: status,
})
}
return ctx.Status(status).SendString(err.Error())
}
if status == 0 {
status = fiber.StatusOK
}
msg, ok := data.(string)
if ok {
if m.logger != nil {
m.logger.Log(ctx, nil, []byte(msg), s3log.LogMeta{
Action: m.action,
HttpStatus: status,
})
}
return ctx.Status(status).SendString(msg)
}
dataJSON, err := json.Marshal(data)
if err != nil {
return err
}
if m.logger != nil {
m.logger.Log(ctx, nil, dataJSON, s3log.LogMeta{
HttpStatus: status,
Action: m.action,
})
}
ctx.Set(fiber.HeaderContentType, fiber.MIMEApplicationJSON)
return ctx.Status(status).Send(dataJSON)
}

View File

@@ -15,11 +15,12 @@
package controllers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gofiber/fiber/v2"
@@ -42,26 +43,33 @@ func TestAdminController_CreateUser(t *testing.T) {
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/create-user", adminController.CreateUser)
succUser := `
<Account>
<Access>access</Access>
<Secret>secret</Secret>
<Role>admin</Role>
<UserID>0</UserID>
<GroupID>0</GroupID>
</Account>
`
invuser := `
<Account>
<Access>access</Access>
<Secret>secret</Secret>
<Role>invalid_role</Role>
<UserID>0</UserID>
<GroupID>0</GroupID>
</Account>
`
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
usr := auth.Account{
Access: "access",
Secret: "secret",
Role: "invalid role",
}
user, _ := json.Marshal(&usr)
usr.Role = "admin"
succUsr, _ := json.Marshal(&usr)
appErr.Patch("/create-user", adminController.CreateUser)
tests := []struct {
name string
@@ -71,31 +79,31 @@ func TestAdminController_CreateUser(t *testing.T) {
statusCode int
}{
{
name: "Admin-create-user-malformed-body",
name: "Admin-create-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", nil),
req: httptest.NewRequest(http.MethodPatch, "/create-user", bytes.NewBuffer(succUsr)),
},
wantErr: false,
statusCode: 201,
},
{
name: "Admin-create-user-invalid-user-role",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", bytes.NewBuffer(user)),
},
wantErr: false,
statusCode: 400,
},
{
name: "Admin-create-user-invalid-requester-role",
app: app,
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", strings.NewReader(invuser)),
req: httptest.NewRequest(http.MethodPatch, "/create-user", nil),
},
wantErr: false,
statusCode: 400,
},
{
name: "Admin-create-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user", strings.NewReader(succUser)),
},
wantErr: false,
statusCode: 201,
statusCode: 403,
},
}
for _, tt := range tests {
@@ -126,8 +134,24 @@ func TestAdminController_UpdateUser(t *testing.T) {
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/update-user", adminController.UpdateUser)
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appErr.Patch("/update-user", adminController.UpdateUser)
successBody, _ := json.Marshal(auth.MutableProps{Secret: getPtr("hello")})
adminControllerErr := AdminController{
iam: &IAMServiceMock{
UpdateUserAccountFunc: func(access string, props auth.MutableProps) error {
@@ -138,15 +162,12 @@ func TestAdminController_UpdateUser(t *testing.T) {
appNotFound := fiber.New()
appNotFound.Patch("/update-user", adminControllerErr.UpdateUser)
appNotFound.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
succUser := `
<Account>
<Secret>secret</Secret>
<UserID>0</UserID>
<GroupID>0</GroupID>
</Account>
`
appNotFound.Patch("/update-user", adminControllerErr.UpdateUser)
tests := []struct {
name string
@@ -159,7 +180,7 @@ func TestAdminController_UpdateUser(t *testing.T) {
name: "Admin-update-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", strings.NewReader(succUser)),
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", bytes.NewBuffer(successBody)),
},
wantErr: false,
statusCode: 200,
@@ -168,10 +189,10 @@ func TestAdminController_UpdateUser(t *testing.T) {
name: "Admin-update-user-missing-access",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user", strings.NewReader(succUser)),
req: httptest.NewRequest(http.MethodPatch, "/update-user", bytes.NewBuffer(successBody)),
},
wantErr: false,
statusCode: 404,
statusCode: 400,
},
{
name: "Admin-update-user-invalid-request-body",
@@ -182,11 +203,20 @@ func TestAdminController_UpdateUser(t *testing.T) {
wantErr: false,
statusCode: 400,
},
{
name: "Admin-update-user-invalid-requester-role",
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "Admin-update-user-not-found",
app: appNotFound,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", strings.NewReader(succUser)),
req: httptest.NewRequest(http.MethodPatch, "/update-user?access=access", bytes.NewBuffer(successBody)),
},
wantErr: false,
statusCode: 404,
@@ -220,8 +250,22 @@ func TestAdminController_DeleteUser(t *testing.T) {
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/delete-user", adminController.DeleteUser)
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appErr.Patch("/delete-user", adminController.DeleteUser)
tests := []struct {
name string
app *fiber.App
@@ -238,6 +282,15 @@ func TestAdminController_DeleteUser(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Admin-delete-user-invalid-requester-role",
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/delete-user?access=test", nil),
},
wantErr: false,
statusCode: 403,
},
}
for _, tt := range tests {
resp, err := tt.app.Test(tt.args.req)
@@ -274,9 +327,30 @@ func TestAdminController_ListUsers(t *testing.T) {
}
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appErr.Patch("/list-users", adminControllerErr.ListUsers)
appRoleErr := fiber.New()
appRoleErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appRoleErr.Patch("/list-users", adminController.ListUsers)
appSucc := fiber.New()
appSucc.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appSucc.Patch("/list-users", adminController.ListUsers)
tests := []struct {
@@ -286,6 +360,15 @@ func TestAdminController_ListUsers(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "Admin-list-users-access-denied",
app: appRoleErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/list-users", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "Admin-list-users-iam-error",
app: appErr,
@@ -324,7 +407,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
}
adminController := AdminController{
be: &BackendMock{
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket, owner string) error {
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
return nil
},
},
@@ -352,12 +435,39 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
}
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
appRoleErr := fiber.New()
appRoleErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appRoleErr.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
appIamErr := fiber.New()
appIamErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appIamErr.Patch("/change-bucket-owner", adminControllerIamErr.ChangeBucketOwner)
appIamNoSuchUser := fiber.New()
appIamNoSuchUser.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
appIamNoSuchUser.Patch("/change-bucket-owner", adminControllerIamAccDoesNotExist.ChangeBucketOwner)
tests := []struct {
@@ -367,6 +477,15 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "Change-bucket-owner-access-denied",
app: appRoleErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/change-bucket-owner", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "Change-bucket-owner-check-account-server-error",
app: appIamErr,
@@ -421,8 +540,23 @@ func TestAdminController_ListBuckets(t *testing.T) {
}
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
return ctx.Next()
})
app.Patch("/list-buckets", adminController.ListBuckets)
appRoleErr := fiber.New()
appRoleErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
return ctx.Next()
})
appRoleErr.Patch("/list-buckets", adminController.ListBuckets)
tests := []struct {
name string
app *fiber.App
@@ -430,6 +564,15 @@ func TestAdminController_ListBuckets(t *testing.T) {
wantErr bool
statusCode int
}{
{
name: "List-buckets-incorrect-role",
app: appRoleErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/list-buckets", nil),
},
wantErr: false,
statusCode: 403,
},
{
name: "List-buckets-success",
app: app,

View File

@@ -26,27 +26,24 @@ var _ backend.Backend = &BackendMock{}
// AbortMultipartUploadFunc: func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error {
// panic("mock out the AbortMultipartUpload method")
// },
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, owner string) error {
// ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket string, acl []byte) error {
// panic("mock out the ChangeBucketOwner method")
// },
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
// panic("mock out the CompleteMultipartUpload method")
// },
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
// panic("mock out the CopyObject method")
// },
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
// panic("mock out the CreateBucket method")
// },
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
// panic("mock out the CreateMultipartUpload method")
// },
// DeleteBucketFunc: func(contextMoqParam context.Context, bucket string) error {
// DeleteBucketFunc: func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error {
// panic("mock out the DeleteBucket method")
// },
// DeleteBucketCorsFunc: func(contextMoqParam context.Context, bucket string) error {
// panic("mock out the DeleteBucketCors method")
// },
// DeleteBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) error {
// panic("mock out the DeleteBucketOwnershipControls method")
// },
@@ -68,9 +65,6 @@ var _ backend.Backend = &BackendMock{}
// GetBucketAclFunc: func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error) {
// panic("mock out the GetBucketAcl method")
// },
// GetBucketCorsFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
// panic("mock out the GetBucketCors method")
// },
// GetBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
// panic("mock out the GetBucketOwnershipControls method")
// },
@@ -80,7 +74,7 @@ var _ backend.Backend = &BackendMock{}
// GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
// panic("mock out the GetBucketTagging method")
// },
// GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
// GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
// panic("mock out the GetBucketVersioning method")
// },
// GetObjectFunc: func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
@@ -89,7 +83,7 @@ var _ backend.Backend = &BackendMock{}
// GetObjectAclFunc: func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
// panic("mock out the GetObjectAcl method")
// },
// GetObjectAttributesFunc: func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
// GetObjectAttributesFunc: func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
// panic("mock out the GetObjectAttributes method")
// },
// GetObjectLegalHoldFunc: func(contextMoqParam context.Context, bucket string, object string, versionId string) (*bool, error) {
@@ -110,7 +104,7 @@ var _ backend.Backend = &BackendMock{}
// HeadObjectFunc: func(contextMoqParam context.Context, headObjectInput *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
// panic("mock out the HeadObject method")
// },
// ListBucketsFunc: func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
// ListBucketsFunc: func(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
// panic("mock out the ListBuckets method")
// },
// ListBucketsAndOwnersFunc: func(contextMoqParam context.Context) ([]s3response.Bucket, error) {
@@ -134,9 +128,6 @@ var _ backend.Backend = &BackendMock{}
// PutBucketAclFunc: func(contextMoqParam context.Context, bucket string, data []byte) error {
// panic("mock out the PutBucketAcl method")
// },
// PutBucketCorsFunc: func(contextMoqParam context.Context, bytes []byte) error {
// panic("mock out the PutBucketCors method")
// },
// PutBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
// panic("mock out the PutBucketOwnershipControls method")
// },
@@ -149,7 +140,7 @@ var _ backend.Backend = &BackendMock{}
// PutBucketVersioningFunc: func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error {
// panic("mock out the PutBucketVersioning method")
// },
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
// panic("mock out the PutObject method")
// },
// PutObjectAclFunc: func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error {
@@ -179,10 +170,10 @@ var _ backend.Backend = &BackendMock{}
// StringFunc: func() string {
// panic("mock out the String method")
// },
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
// panic("mock out the UploadPart method")
// },
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
// panic("mock out the UploadPartCopy method")
// },
// }
@@ -196,25 +187,22 @@ type BackendMock struct {
AbortMultipartUploadFunc func(contextMoqParam context.Context, abortMultipartUploadInput *s3.AbortMultipartUploadInput) error
// ChangeBucketOwnerFunc mocks the ChangeBucketOwner method.
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, owner string) error
ChangeBucketOwnerFunc func(contextMoqParam context.Context, bucket string, acl []byte) error
// CompleteMultipartUploadFunc mocks the CompleteMultipartUpload method.
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error)
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
// CopyObjectFunc mocks the CopyObject method.
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error)
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
// CreateBucketFunc mocks the CreateBucket method.
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error
// CreateMultipartUploadFunc mocks the CreateMultipartUpload method.
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
// DeleteBucketFunc mocks the DeleteBucket method.
DeleteBucketFunc func(contextMoqParam context.Context, bucket string) error
// DeleteBucketCorsFunc mocks the DeleteBucketCors method.
DeleteBucketCorsFunc func(contextMoqParam context.Context, bucket string) error
DeleteBucketFunc func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error
// DeleteBucketOwnershipControlsFunc mocks the DeleteBucketOwnershipControls method.
DeleteBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) error
@@ -237,9 +225,6 @@ type BackendMock struct {
// GetBucketAclFunc mocks the GetBucketAcl method.
GetBucketAclFunc func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error)
// GetBucketCorsFunc mocks the GetBucketCors method.
GetBucketCorsFunc func(contextMoqParam context.Context, bucket string) ([]byte, error)
// GetBucketOwnershipControlsFunc mocks the GetBucketOwnershipControls method.
GetBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error)
@@ -250,7 +235,7 @@ type BackendMock struct {
GetBucketTaggingFunc func(contextMoqParam context.Context, bucket string) (map[string]string, error)
// GetBucketVersioningFunc mocks the GetBucketVersioning method.
GetBucketVersioningFunc func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error)
GetBucketVersioningFunc func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error)
// GetObjectFunc mocks the GetObject method.
GetObjectFunc func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput) (*s3.GetObjectOutput, error)
@@ -259,7 +244,7 @@ type BackendMock struct {
GetObjectAclFunc func(contextMoqParam context.Context, getObjectAclInput *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
// GetObjectAttributesFunc mocks the GetObjectAttributes method.
GetObjectAttributesFunc func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
GetObjectAttributesFunc func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error)
// GetObjectLegalHoldFunc mocks the GetObjectLegalHold method.
GetObjectLegalHoldFunc func(contextMoqParam context.Context, bucket string, object string, versionId string) (*bool, error)
@@ -280,7 +265,7 @@ type BackendMock struct {
HeadObjectFunc func(contextMoqParam context.Context, headObjectInput *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
// ListBucketsFunc mocks the ListBuckets method.
ListBucketsFunc func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error)
ListBucketsFunc func(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
// ListBucketsAndOwnersFunc mocks the ListBucketsAndOwners method.
ListBucketsAndOwnersFunc func(contextMoqParam context.Context) ([]s3response.Bucket, error)
@@ -303,9 +288,6 @@ type BackendMock struct {
// PutBucketAclFunc mocks the PutBucketAcl method.
PutBucketAclFunc func(contextMoqParam context.Context, bucket string, data []byte) error
// PutBucketCorsFunc mocks the PutBucketCors method.
PutBucketCorsFunc func(contextMoqParam context.Context, bytes []byte) error
// PutBucketOwnershipControlsFunc mocks the PutBucketOwnershipControls method.
PutBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error
@@ -319,7 +301,7 @@ type BackendMock struct {
PutBucketVersioningFunc func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error
// PutObjectFunc mocks the PutObject method.
PutObjectFunc func(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error)
PutObjectFunc func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error)
// PutObjectAclFunc mocks the PutObjectAcl method.
PutObjectAclFunc func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error
@@ -349,10 +331,10 @@ type BackendMock struct {
StringFunc func() string
// UploadPartFunc mocks the UploadPart method.
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error)
// UploadPartCopyFunc mocks the UploadPartCopy method.
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
// calls tracks calls to the methods.
calls struct {
@@ -369,8 +351,8 @@ type BackendMock struct {
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
// Owner is the owner argument value.
Owner string
// ACL is the acl argument value.
ACL []byte
}
// CompleteMultipartUpload holds details about calls to the CompleteMultipartUpload method.
CompleteMultipartUpload []struct {
@@ -384,7 +366,7 @@ type BackendMock struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// CopyObjectInput is the copyObjectInput argument value.
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
}
// CreateBucket holds details about calls to the CreateBucket method.
CreateBucket []struct {
@@ -400,21 +382,14 @@ type BackendMock struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// CreateMultipartUploadInput is the createMultipartUploadInput argument value.
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
}
// DeleteBucket holds details about calls to the DeleteBucket method.
DeleteBucket []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
}
// DeleteBucketCors holds details about calls to the DeleteBucketCors method.
DeleteBucketCors []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
// DeleteBucketInput is the deleteBucketInput argument value.
DeleteBucketInput *s3.DeleteBucketInput
}
// DeleteBucketOwnershipControls holds details about calls to the DeleteBucketOwnershipControls method.
DeleteBucketOwnershipControls []struct {
@@ -467,13 +442,6 @@ type BackendMock struct {
// GetBucketAclInput is the getBucketAclInput argument value.
GetBucketAclInput *s3.GetBucketAclInput
}
// GetBucketCors holds details about calls to the GetBucketCors method.
GetBucketCors []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
}
// GetBucketOwnershipControls holds details about calls to the GetBucketOwnershipControls method.
GetBucketOwnershipControls []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -579,8 +547,10 @@ type BackendMock struct {
ListBuckets []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// ListBucketsInput is the listBucketsInput argument value.
ListBucketsInput s3response.ListBucketsInput
// Owner is the owner argument value.
Owner string
// IsAdmin is the isAdmin argument value.
IsAdmin bool
}
// ListBucketsAndOwners holds details about calls to the ListBucketsAndOwners method.
ListBucketsAndOwners []struct {
@@ -631,13 +601,6 @@ type BackendMock struct {
// Data is the data argument value.
Data []byte
}
// PutBucketCors holds details about calls to the PutBucketCors method.
PutBucketCors []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bytes is the bytes argument value.
Bytes []byte
}
// PutBucketOwnershipControls holds details about calls to the PutBucketOwnershipControls method.
PutBucketOwnershipControls []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -679,7 +642,7 @@ type BackendMock struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// PutObjectInput is the putObjectInput argument value.
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
}
// PutObjectAcl holds details about calls to the PutObjectAcl method.
PutObjectAcl []struct {
@@ -778,7 +741,6 @@ type BackendMock struct {
lockCreateBucket sync.RWMutex
lockCreateMultipartUpload sync.RWMutex
lockDeleteBucket sync.RWMutex
lockDeleteBucketCors sync.RWMutex
lockDeleteBucketOwnershipControls sync.RWMutex
lockDeleteBucketPolicy sync.RWMutex
lockDeleteBucketTagging sync.RWMutex
@@ -786,7 +748,6 @@ type BackendMock struct {
lockDeleteObjectTagging sync.RWMutex
lockDeleteObjects sync.RWMutex
lockGetBucketAcl sync.RWMutex
lockGetBucketCors sync.RWMutex
lockGetBucketOwnershipControls sync.RWMutex
lockGetBucketPolicy sync.RWMutex
lockGetBucketTagging sync.RWMutex
@@ -808,7 +769,6 @@ type BackendMock struct {
lockListObjectsV2 sync.RWMutex
lockListParts sync.RWMutex
lockPutBucketAcl sync.RWMutex
lockPutBucketCors sync.RWMutex
lockPutBucketOwnershipControls sync.RWMutex
lockPutBucketPolicy sync.RWMutex
lockPutBucketTagging sync.RWMutex
@@ -864,23 +824,23 @@ func (mock *BackendMock) AbortMultipartUploadCalls() []struct {
}
// ChangeBucketOwner calls ChangeBucketOwnerFunc.
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, owner string) error {
func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, bucket string, acl []byte) error {
if mock.ChangeBucketOwnerFunc == nil {
panic("BackendMock.ChangeBucketOwnerFunc: method is nil but Backend.ChangeBucketOwner was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
Owner string
ACL []byte
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
Owner: owner,
ACL: acl,
}
mock.lockChangeBucketOwner.Lock()
mock.calls.ChangeBucketOwner = append(mock.calls.ChangeBucketOwner, callInfo)
mock.lockChangeBucketOwner.Unlock()
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, owner)
return mock.ChangeBucketOwnerFunc(contextMoqParam, bucket, acl)
}
// ChangeBucketOwnerCalls gets all the calls that were made to ChangeBucketOwner.
@@ -890,12 +850,12 @@ func (mock *BackendMock) ChangeBucketOwner(contextMoqParam context.Context, buck
func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
ContextMoqParam context.Context
Bucket string
Owner string
ACL []byte
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
Owner string
ACL []byte
}
mock.lockChangeBucketOwner.RLock()
calls = mock.calls.ChangeBucketOwner
@@ -904,7 +864,7 @@ func (mock *BackendMock) ChangeBucketOwnerCalls() []struct {
}
// CompleteMultipartUpload calls CompleteMultipartUploadFunc.
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
func (mock *BackendMock) CompleteMultipartUpload(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
if mock.CompleteMultipartUploadFunc == nil {
panic("BackendMock.CompleteMultipartUploadFunc: method is nil but Backend.CompleteMultipartUpload was just called")
}
@@ -940,13 +900,13 @@ func (mock *BackendMock) CompleteMultipartUploadCalls() []struct {
}
// CopyObject calls CopyObjectFunc.
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if mock.CopyObjectFunc == nil {
panic("BackendMock.CopyObjectFunc: method is nil but Backend.CopyObject was just called")
}
callInfo := struct {
ContextMoqParam context.Context
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
}{
ContextMoqParam: contextMoqParam,
CopyObjectInput: copyObjectInput,
@@ -963,11 +923,11 @@ func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectI
// len(mockedBackend.CopyObjectCalls())
func (mock *BackendMock) CopyObjectCalls() []struct {
ContextMoqParam context.Context
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
} {
var calls []struct {
ContextMoqParam context.Context
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
}
mock.lockCopyObject.RLock()
calls = mock.calls.CopyObject
@@ -1016,13 +976,13 @@ func (mock *BackendMock) CreateBucketCalls() []struct {
}
// CreateMultipartUpload calls CreateMultipartUploadFunc.
func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
if mock.CreateMultipartUploadFunc == nil {
panic("BackendMock.CreateMultipartUploadFunc: method is nil but Backend.CreateMultipartUpload was just called")
}
callInfo := struct {
ContextMoqParam context.Context
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
}{
ContextMoqParam: contextMoqParam,
CreateMultipartUploadInput: createMultipartUploadInput,
@@ -1039,11 +999,11 @@ func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context,
// len(mockedBackend.CreateMultipartUploadCalls())
func (mock *BackendMock) CreateMultipartUploadCalls() []struct {
ContextMoqParam context.Context
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
} {
var calls []struct {
ContextMoqParam context.Context
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
}
mock.lockCreateMultipartUpload.RLock()
calls = mock.calls.CreateMultipartUpload
@@ -1052,21 +1012,21 @@ func (mock *BackendMock) CreateMultipartUploadCalls() []struct {
}
// DeleteBucket calls DeleteBucketFunc.
func (mock *BackendMock) DeleteBucket(contextMoqParam context.Context, bucket string) error {
func (mock *BackendMock) DeleteBucket(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error {
if mock.DeleteBucketFunc == nil {
panic("BackendMock.DeleteBucketFunc: method is nil but Backend.DeleteBucket was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
ContextMoqParam context.Context
DeleteBucketInput *s3.DeleteBucketInput
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
ContextMoqParam: contextMoqParam,
DeleteBucketInput: deleteBucketInput,
}
mock.lockDeleteBucket.Lock()
mock.calls.DeleteBucket = append(mock.calls.DeleteBucket, callInfo)
mock.lockDeleteBucket.Unlock()
return mock.DeleteBucketFunc(contextMoqParam, bucket)
return mock.DeleteBucketFunc(contextMoqParam, deleteBucketInput)
}
// DeleteBucketCalls gets all the calls that were made to DeleteBucket.
@@ -1074,12 +1034,12 @@ func (mock *BackendMock) DeleteBucket(contextMoqParam context.Context, bucket st
//
// len(mockedBackend.DeleteBucketCalls())
func (mock *BackendMock) DeleteBucketCalls() []struct {
ContextMoqParam context.Context
Bucket string
ContextMoqParam context.Context
DeleteBucketInput *s3.DeleteBucketInput
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
ContextMoqParam context.Context
DeleteBucketInput *s3.DeleteBucketInput
}
mock.lockDeleteBucket.RLock()
calls = mock.calls.DeleteBucket
@@ -1087,42 +1047,6 @@ func (mock *BackendMock) DeleteBucketCalls() []struct {
return calls
}
// DeleteBucketCors calls DeleteBucketCorsFunc.
func (mock *BackendMock) DeleteBucketCors(contextMoqParam context.Context, bucket string) error {
if mock.DeleteBucketCorsFunc == nil {
panic("BackendMock.DeleteBucketCorsFunc: method is nil but Backend.DeleteBucketCors was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
}
mock.lockDeleteBucketCors.Lock()
mock.calls.DeleteBucketCors = append(mock.calls.DeleteBucketCors, callInfo)
mock.lockDeleteBucketCors.Unlock()
return mock.DeleteBucketCorsFunc(contextMoqParam, bucket)
}
// DeleteBucketCorsCalls gets all the calls that were made to DeleteBucketCors.
// Check the length with:
//
// len(mockedBackend.DeleteBucketCorsCalls())
func (mock *BackendMock) DeleteBucketCorsCalls() []struct {
ContextMoqParam context.Context
Bucket string
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
}
mock.lockDeleteBucketCors.RLock()
calls = mock.calls.DeleteBucketCors
mock.lockDeleteBucketCors.RUnlock()
return calls
}
// DeleteBucketOwnershipControls calls DeleteBucketOwnershipControlsFunc.
func (mock *BackendMock) DeleteBucketOwnershipControls(contextMoqParam context.Context, bucket string) error {
if mock.DeleteBucketOwnershipControlsFunc == nil {
@@ -1379,42 +1303,6 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
return calls
}
// GetBucketCors calls GetBucketCorsFunc.
func (mock *BackendMock) GetBucketCors(contextMoqParam context.Context, bucket string) ([]byte, error) {
if mock.GetBucketCorsFunc == nil {
panic("BackendMock.GetBucketCorsFunc: method is nil but Backend.GetBucketCors was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
}
mock.lockGetBucketCors.Lock()
mock.calls.GetBucketCors = append(mock.calls.GetBucketCors, callInfo)
mock.lockGetBucketCors.Unlock()
return mock.GetBucketCorsFunc(contextMoqParam, bucket)
}
// GetBucketCorsCalls gets all the calls that were made to GetBucketCors.
// Check the length with:
//
// len(mockedBackend.GetBucketCorsCalls())
func (mock *BackendMock) GetBucketCorsCalls() []struct {
ContextMoqParam context.Context
Bucket string
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
}
mock.lockGetBucketCors.RLock()
calls = mock.calls.GetBucketCors
mock.lockGetBucketCors.RUnlock()
return calls
}
// GetBucketOwnershipControls calls GetBucketOwnershipControlsFunc.
func (mock *BackendMock) GetBucketOwnershipControls(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
if mock.GetBucketOwnershipControlsFunc == nil {
@@ -1524,7 +1412,7 @@ func (mock *BackendMock) GetBucketTaggingCalls() []struct {
}
// GetBucketVersioning calls GetBucketVersioningFunc.
func (mock *BackendMock) GetBucketVersioning(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
func (mock *BackendMock) GetBucketVersioning(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
if mock.GetBucketVersioningFunc == nil {
panic("BackendMock.GetBucketVersioningFunc: method is nil but Backend.GetBucketVersioning was just called")
}
@@ -1632,7 +1520,7 @@ func (mock *BackendMock) GetObjectAclCalls() []struct {
}
// GetObjectAttributes calls GetObjectAttributesFunc.
func (mock *BackendMock) GetObjectAttributes(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
func (mock *BackendMock) GetObjectAttributes(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
if mock.GetObjectAttributesFunc == nil {
panic("BackendMock.GetObjectAttributesFunc: method is nil but Backend.GetObjectAttributes was just called")
}
@@ -1904,21 +1792,23 @@ func (mock *BackendMock) HeadObjectCalls() []struct {
}
// ListBuckets calls ListBucketsFunc.
func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
if mock.ListBucketsFunc == nil {
panic("BackendMock.ListBucketsFunc: method is nil but Backend.ListBuckets was just called")
}
callInfo := struct {
ContextMoqParam context.Context
ListBucketsInput s3response.ListBucketsInput
ContextMoqParam context.Context
Owner string
IsAdmin bool
}{
ContextMoqParam: contextMoqParam,
ListBucketsInput: listBucketsInput,
ContextMoqParam: contextMoqParam,
Owner: owner,
IsAdmin: isAdmin,
}
mock.lockListBuckets.Lock()
mock.calls.ListBuckets = append(mock.calls.ListBuckets, callInfo)
mock.lockListBuckets.Unlock()
return mock.ListBucketsFunc(contextMoqParam, listBucketsInput)
return mock.ListBucketsFunc(contextMoqParam, owner, isAdmin)
}
// ListBucketsCalls gets all the calls that were made to ListBuckets.
@@ -1926,12 +1816,14 @@ func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, listBucket
//
// len(mockedBackend.ListBucketsCalls())
func (mock *BackendMock) ListBucketsCalls() []struct {
ContextMoqParam context.Context
ListBucketsInput s3response.ListBucketsInput
ContextMoqParam context.Context
Owner string
IsAdmin bool
} {
var calls []struct {
ContextMoqParam context.Context
ListBucketsInput s3response.ListBucketsInput
ContextMoqParam context.Context
Owner string
IsAdmin bool
}
mock.lockListBuckets.RLock()
calls = mock.calls.ListBuckets
@@ -2191,42 +2083,6 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
return calls
}
// PutBucketCors calls PutBucketCorsFunc.
func (mock *BackendMock) PutBucketCors(contextMoqParam context.Context, bytes []byte) error {
if mock.PutBucketCorsFunc == nil {
panic("BackendMock.PutBucketCorsFunc: method is nil but Backend.PutBucketCors was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bytes []byte
}{
ContextMoqParam: contextMoqParam,
Bytes: bytes,
}
mock.lockPutBucketCors.Lock()
mock.calls.PutBucketCors = append(mock.calls.PutBucketCors, callInfo)
mock.lockPutBucketCors.Unlock()
return mock.PutBucketCorsFunc(contextMoqParam, bytes)
}
// PutBucketCorsCalls gets all the calls that were made to PutBucketCors.
// Check the length with:
//
// len(mockedBackend.PutBucketCorsCalls())
func (mock *BackendMock) PutBucketCorsCalls() []struct {
ContextMoqParam context.Context
Bytes []byte
} {
var calls []struct {
ContextMoqParam context.Context
Bytes []byte
}
mock.lockPutBucketCors.RLock()
calls = mock.calls.PutBucketCors
mock.lockPutBucketCors.RUnlock()
return calls
}
// PutBucketOwnershipControls calls PutBucketOwnershipControlsFunc.
func (mock *BackendMock) PutBucketOwnershipControls(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
if mock.PutBucketOwnershipControlsFunc == nil {
@@ -2388,13 +2244,13 @@ func (mock *BackendMock) PutBucketVersioningCalls() []struct {
}
// PutObject calls PutObjectFunc.
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
if mock.PutObjectFunc == nil {
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
}
callInfo := struct {
ContextMoqParam context.Context
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
}{
ContextMoqParam: contextMoqParam,
PutObjectInput: putObjectInput,
@@ -2411,11 +2267,11 @@ func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInp
// len(mockedBackend.PutObjectCalls())
func (mock *BackendMock) PutObjectCalls() []struct {
ContextMoqParam context.Context
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
} {
var calls []struct {
ContextMoqParam context.Context
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
}
mock.lockPutObject.RLock()
calls = mock.calls.PutObject
@@ -2770,7 +2626,7 @@ func (mock *BackendMock) StringCalls() []struct {
}
// UploadPart calls UploadPartFunc.
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
if mock.UploadPartFunc == nil {
panic("BackendMock.UploadPartFunc: method is nil but Backend.UploadPart was just called")
}
@@ -2806,7 +2662,7 @@ func (mock *BackendMock) UploadPartCalls() []struct {
}
// UploadPartCopy calls UploadPartCopyFunc.
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
if mock.UploadPartCopyFunc == nil {
panic("BackendMock.UploadPartCopyFunc: method is nil but Backend.UploadPartCopy was just called")
}

File diff suppressed because it is too large Load Diff

View File

@@ -32,7 +32,6 @@ import (
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -93,14 +92,15 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
app := fiber.New()
s3ApiController := S3ApiController{
be: &BackendMock{
ListBucketsFunc: func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
ListBucketsFunc: func(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, nil
},
},
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("isDebug", false)
return ctx.Next()
})
app.Get("/", s3ApiController.ListBuckets)
@@ -109,14 +109,15 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
appErr := fiber.New()
s3ApiControllerErr := S3ApiController{
be: &BackendMock{
ListBucketsFunc: func(contextMoqParam context.Context, listBucketsInput s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
ListBucketsFunc: func(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
},
},
}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
ctx.Locals("isDebug", false)
return ctx.Next()
})
appErr.Get("/", s3ApiControllerErr.ListBuckets)
@@ -186,8 +187,8 @@ func TestS3ApiController_GetActions(t *testing.T) {
GetObjectAclFunc: func(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
return &s3.GetObjectAclOutput{}, nil
},
GetObjectAttributesFunc: func(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, nil
GetObjectAttributesFunc: func(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResult, error) {
return s3response.GetObjectAttributesResult{}, nil
},
GetObjectFunc: func(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
return &s3.GetObjectOutput{
@@ -219,9 +220,10 @@ func TestS3ApiController_GetActions(t *testing.T) {
},
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Get("/:bucket/:key/*", s3ApiController.GetActions)
@@ -230,9 +232,6 @@ func TestS3ApiController_GetActions(t *testing.T) {
getObjAttrs := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
getObjAttrs.Header.Set("X-Amz-Object-Attributes", "hello")
invalidChecksumMode := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
invalidChecksumMode.Header.Set("x-amz-checksum-mode", "invalid_checksum_mode")
tests := []struct {
name string
app *fiber.App
@@ -330,15 +329,6 @@ func TestS3ApiController_GetActions(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Get-actions-get-object-invalid-checksum-mode",
app: app,
args: args{
req: invalidChecksumMode,
},
wantErr: false,
statusCode: 400,
},
{
name: "Get-actions-get-object-success",
app: app,
@@ -392,8 +382,8 @@ func TestS3ApiController_ListActions(t *testing.T) {
GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
return map[string]string{}, nil
},
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
return s3response.GetBucketVersioningOutput{}, nil
GetBucketVersioningFunc: func(contextMoqParam context.Context, bucket string) (*s3.GetBucketVersioningOutput, error) {
return &s3.GetBucketVersioningOutput{}, nil
},
ListObjectVersionsFunc: func(contextMoqParam context.Context, listObjectVersionsInput *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
return s3response.ListVersionsResult{}, nil
@@ -411,9 +401,10 @@ func TestS3ApiController_ListActions(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
@@ -435,9 +426,10 @@ func TestS3ApiController_ListActions(t *testing.T) {
}
appError := fiber.New()
appError.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
appError.Get("/:bucket", s3ApiControllerError.ListActions)
@@ -630,7 +622,8 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
</VersioningConfiguration>
`
policyBody := `{
policyBody := `
{
"Statement": [
{
"Effect": "Allow",
@@ -703,9 +696,10 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
}
// Mock ctx.Locals
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{Owner: "valid access"})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{Owner: "valid access"})
return ctx.Next()
})
app.Put("/:bucket", s3ApiController.PutBucketActions)
@@ -756,7 +750,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
req: httptest.NewRequest(http.MethodPut, "/my-bucket?tagging", strings.NewReader(tagBody)),
},
wantErr: false,
statusCode: 204,
statusCode: 200,
},
{
name: "Put-bucket-ownership-controls-invalid-ownership",
@@ -884,6 +878,15 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
{
name: "Create-bucket-invalid-bucket-name",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/aa", nil),
},
wantErr: false,
statusCode: 400,
},
{
name: "Create-bucket-success",
app: app,
@@ -938,12 +941,12 @@ func TestS3ApiController_PutActions(t *testing.T) {
</Tagging>
`
//retentionBody := `
//<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
// <Mode>GOVERNANCE</Mode>
// <RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
//</Retention>
//`
retentionBody := `
<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Mode>GOVERNANCE</Mode>
<RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
</Retention>
`
legalHoldBody := `
<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
@@ -960,22 +963,22 @@ func TestS3ApiController_PutActions(t *testing.T) {
PutObjectAclFunc: func(context.Context, *s3.PutObjectAclInput) error {
return nil
},
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (s3response.CopyObjectOutput, error) {
return s3response.CopyObjectOutput{
CopyObjectResult: &s3response.CopyObjectResult{},
CopyObjectFunc: func(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{},
}, nil
},
PutObjectFunc: func(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
PutObjectFunc: func(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
return s3response.PutObjectOutput{}, nil
},
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
return &s3.UploadPartOutput{}, nil
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (string, error) {
return "hello", nil
},
PutObjectTaggingFunc: func(_ context.Context, bucket, object string, tags map[string]string) error {
return nil
},
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
return s3response.CopyPartResult{}, nil
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
return s3response.CopyObjectResult{}, nil
},
PutObjectLegalHoldFunc: func(contextMoqParam context.Context, bucket, object, versionId string, status bool) error {
return nil
@@ -989,9 +992,10 @@ func TestS3ApiController_PutActions(t *testing.T) {
},
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Put("/:bucket/:key/*", s3ApiController.PutActions)
@@ -1008,11 +1012,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
cpySrcReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
cpySrcReq.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
// CopyObject invalid checksum algorithm
cpyInvChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
cpyInvChecksumAlgo.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
cpyInvChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
// PutObjectAcl success
aclReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
aclReq.Header.Set("X-Amz-Acl", "private")
@@ -1034,40 +1033,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
invAclBodyGrtReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?acl", strings.NewReader(body))
invAclBodyGrtReq.Header.Set("X-Amz-Grant-Read", "hello")
// PutObject invalid checksum algorithm
invChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
// PutObject invalid base64 checksum
invBase64Checksum := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invBase64Checksum.Header.Set("X-Amz-Checksum-Crc32", "invalid_base64")
// PutObject invalid crc32
invCrc32 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invCrc32.Header.Set("X-Amz-Checksum-Crc32", "YXNkZmFkc2Zhc2Rm")
// PutObject invalid crc32c
invCrc32c := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invCrc32c.Header.Set("X-Amz-Checksum-Crc32c", "YXNkZmFkc2Zhc2RmYXNkZg==")
// PutObject invalid sha1
invSha1 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invSha1.Header.Set("X-Amz-Checksum-Sha1", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZg==")
// PutObject invalid sha256
invSha256 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invSha256.Header.Set("X-Amz-Checksum-Sha256", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZmFkc2Zhc2Rm")
// PutObject multiple checksum headers
mulChecksumHdrs := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Sha256", "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=")
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
// PutObject checksum algorithm and header mismatch
checksumHdrMismatch := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Algorithm", "SHA1")
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
tests := []struct {
name string
app *fiber.App
@@ -1111,15 +1076,15 @@ func TestS3ApiController_PutActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
//{
// name: "put-object-retention-success",
// app: app,
// args: args{
// req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
// },
// wantErr: false,
// statusCode: 200,
//},
{
name: "put-object-retention-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
},
wantErr: false,
statusCode: 200,
},
{
name: "put-legal-hold-invalid-request",
app: app,
@@ -1210,15 +1175,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Copy-object-invalid-checksum-algorithm",
app: app,
args: args{
req: cpyInvChecksumAlgo,
},
wantErr: false,
statusCode: 400,
},
{
name: "Copy-object-success",
app: app,
@@ -1261,7 +1217,7 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
app := fiber.New()
s3ApiController := S3ApiController{
be: &BackendMock{
DeleteBucketFunc: func(_ context.Context, bucket string) error {
DeleteBucketFunc: func(context.Context, *s3.DeleteBucketInput) error {
return nil
},
DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
@@ -1277,9 +1233,10 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
@@ -1362,9 +1319,10 @@ func TestS3ApiController_DeleteObjects(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Post("/:bucket", s3ApiController.DeleteObjects)
@@ -1441,9 +1399,10 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Delete("/:bucket/:key/*", s3ApiController.DeleteActions)
@@ -1464,9 +1423,10 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
}}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
appErr.Delete("/:bucket/:key/*", s3ApiControllerErr.DeleteActions)
@@ -1546,10 +1506,11 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
utils.ContextKeyRegion.Set(ctx, "us-east-1")
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
ctx.Locals("region", "us-east-1")
return ctx.Next()
})
@@ -1563,16 +1524,17 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
return acldata, nil
},
HeadBucketFunc: func(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrBucketNotEmpty)
return nil, s3err.GetAPIError(3)
},
},
}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
utils.ContextKeyRegion.Set(ctx, "us-east-1")
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
ctx.Locals("region", "us-east-1")
return ctx.Next()
})
@@ -1649,9 +1611,10 @@ func TestS3ApiController_HeadObject(t *testing.T) {
}
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Head("/:bucket/:key/*", s3ApiController.HeadObject)
@@ -1665,22 +1628,20 @@ func TestS3ApiController_HeadObject(t *testing.T) {
return acldata, nil
},
HeadObjectFunc: func(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
return nil, s3err.GetAPIError(42)
},
},
}
appErr.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
appErr.Head("/:bucket/:key/*", s3ApiControllerErr.HeadObject)
invChecksumMode := httptest.NewRequest(http.MethodHead, "/my-bucket/my-key", nil)
invChecksumMode.Header.Set("X-Amz-Checksum-Mode", "invalid_checksum_mode")
tests := []struct {
name string
app *fiber.App
@@ -1697,15 +1658,6 @@ func TestS3ApiController_HeadObject(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Head-object-invalid-checksum-mode",
app: app,
args: args{
req: invChecksumMode,
},
wantErr: false,
statusCode: 400,
},
{
name: "Head-object-error",
app: appErr,
@@ -1742,10 +1694,10 @@ func TestS3ApiController_CreateActions(t *testing.T) {
RestoreObjectFunc: func(context.Context, *s3.RestoreObjectInput) error {
return nil
},
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
return s3response.CompleteMultipartUploadResult{}, "", nil
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return &s3.CompleteMultipartUploadOutput{}, nil
},
CreateMultipartUploadFunc: func(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
CreateMultipartUploadFunc: func(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, nil
},
SelectObjectContentFunc: func(context.Context, *s3.SelectObjectContentInput) func(w *bufio.Writer) {
@@ -1761,30 +1713,15 @@ func TestS3ApiController_CreateActions(t *testing.T) {
</SelectObjectContentRequest>
`
completMpBody := `
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Part>
<ETag>etag</ETag>
<PartNumber>1</PartNumber>
</Part>
</CompleteMultipartUpload>
`
completMpEmptyBody := `
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></CompleteMultipartUpload>
`
app.Use(func(ctx *fiber.Ctx) error {
utils.ContextKeyAccount.Set(ctx, auth.Account{Access: "valid access"})
utils.ContextKeyIsRoot.Set(ctx, true)
utils.ContextKeyParsedAcl.Set(ctx, auth.ACL{})
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
ctx.Locals("isDebug", false)
ctx.Locals("parsedAcl", auth.ACL{})
return ctx.Next()
})
app.Post("/:bucket/:key/*", s3ApiController.CreateActions)
invChecksumAlgo := httptest.NewRequest(http.MethodPost, "/my-bucket/my-key", nil)
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
tests := []struct {
name string
app *fiber.App
@@ -1828,33 +1765,15 @@ func TestS3ApiController_CreateActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
{
name: "Complete-multipart-upload-empty-parts",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpEmptyBody)),
},
wantErr: false,
statusCode: 400,
},
{
name: "Complete-multipart-upload-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpBody)),
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(`<root><key>body</key></root>`)),
},
wantErr: false,
statusCode: 200,
},
{
name: "Create-multipart-upload-invalid-checksum-algorithm",
app: app,
args: args{
req: invChecksumAlgo,
},
wantErr: false,
statusCode: 400,
},
{
name: "Create-multipart-upload-success",
app: app,

View File

@@ -1,226 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package debuglogger
import (
"fmt"
"log"
"net/http"
"strings"
"sync/atomic"
"github.com/gofiber/fiber/v2"
)
type Color string
const (
green Color = "\033[32m"
yellow Color = "\033[33m"
blue Color = "\033[34m"
Purple Color = "\033[0;35m"
reset = "\033[0m"
borderChar = "─"
boxWidth = 120
)
// Logs http request details: headers, body, params, query args
func LogFiberRequestDetails(ctx *fiber.Ctx) {
// Log the full request url
fullURL := ctx.Protocol() + "://" + ctx.Hostname() + ctx.OriginalURL()
fmt.Printf("%s[URL]: %s%s\n", green, fullURL, reset)
// log request headers
wrapInBox(green, "REQUEST HEADERS", boxWidth, func() {
for key, value := range ctx.Request().Header.All() {
printWrappedLine(yellow, string(key), string(value))
}
})
// skip request body log for PutObject and UploadPart
skipBodyLog := isLargeDataAction(ctx)
if !skipBodyLog {
body := ctx.Request().Body()
if len(body) != 0 {
printBoxTitleLine(blue, "REQUEST BODY", boxWidth, false)
fmt.Printf("%s%s%s\n", blue, body, reset)
printHorizontalBorder(blue, boxWidth, false)
}
}
if ctx.Request().URI().QueryArgs().Len() != 0 {
for key, value := range ctx.Request().URI().QueryArgs().All() {
log.Printf("%s: %s", key, value)
}
}
}
// Logs http response details: body, headers
func LogFiberResponseDetails(ctx *fiber.Ctx) {
wrapInBox(green, "RESPONSE HEADERS", boxWidth, func() {
for key, value := range ctx.Response().Header.All() {
printWrappedLine(yellow, string(key), string(value))
}
})
_, ok := ctx.Locals("skip-res-body-log").(bool)
if !ok {
body := ctx.Response().Body()
if len(body) != 0 {
PrintInsideHorizontalBorders(blue, "RESPONSE BODY", string(body), boxWidth)
}
}
}
var debugEnabled atomic.Bool
// SetDebugEnabled sets the debug mode
func SetDebugEnabled() {
debugEnabled.Store(true)
}
// Logf is the same as 'fmt.Printf' with debug prefix,
// a color added and '\n' at the end
func Logf(format string, v ...any) {
if !debugEnabled.Load() {
return
}
debugPrefix := "[DEBUG]: "
fmt.Printf(string(yellow)+debugPrefix+format+reset+"\n", v...)
}
// Infof prints out green info block with [INFO]: prefix
func Infof(format string, v ...any) {
if !debugEnabled.Load() {
return
}
debugPrefix := "[INFO]: "
fmt.Printf(string(green)+debugPrefix+format+reset+"\n", v...)
}
// PrintInsideHorizontalBorders prints the text inside horizontal
// border and title in the center of upper border
func PrintInsideHorizontalBorders(color Color, title, text string, width int) {
if !debugEnabled.Load() {
return
}
printBoxTitleLine(color, title, width, false)
fmt.Printf("%s%s%s\n", color, text, reset)
printHorizontalBorder(color, width, false)
}
// Prints out box title either with closing characters or not: "┌", "┐"
// e.g ┌────────────────[ RESPONSE HEADERS ]────────────────┐
func printBoxTitleLine(color Color, title string, length int, closing bool) {
leftCorner, rightCorner := "┌", "┐"
if !closing {
leftCorner, rightCorner = borderChar, borderChar
}
// Calculate how many border characters are needed
titleFormatted := fmt.Sprintf("[ %s ]", title)
borderSpace := length - len(titleFormatted) - 2 // 2 for corners
leftLen := borderSpace / 2
rightLen := borderSpace - leftLen
// Build the line
line := leftCorner +
strings.Repeat(borderChar, leftLen) +
titleFormatted +
strings.Repeat(borderChar, rightLen) +
rightCorner
fmt.Println(string(color) + line + reset)
}
// Prints out a horizontal line either with closing characters or not: "└", "┘"
func printHorizontalBorder(color Color, length int, closing bool) {
leftCorner, rightCorner := "└", "┘"
if !closing {
leftCorner, rightCorner = borderChar, borderChar
}
line := leftCorner + strings.Repeat(borderChar, length-2) + rightCorner + reset
fmt.Println(string(color) + line)
}
// wrapInBox wraps the output of a function call (fn) inside a styled box with a title.
func wrapInBox(color Color, title string, length int, fn func()) {
printBoxTitleLine(color, title, length, true)
fn()
printHorizontalBorder(color, length, true)
}
// returns the provided string length
// defaulting to 13 for exceeding lengths
func getLen(str string) int {
if len(str) < 13 {
return 13
}
return len(str)
}
// prints a formatted key-value pair within a box layout,
// wrapping the value text if it exceeds the allowed width.
func printWrappedLine(keyColor Color, key, value string) {
prefix := fmt.Sprintf("%s│%s %s%-13s%s : ", green, reset, keyColor, key, reset)
prefixLen := len(prefix) - len(green) - len(reset) - len(keyColor) - len(reset)
// the actual prefix size without colors
actualPrefixLen := getLen(key) + 5
lineWidth := boxWidth - prefixLen
valueLines := wrapText(value, lineWidth)
for i, line := range valueLines {
if i == 0 {
if len(line) < lineWidth {
line += strings.Repeat(" ", lineWidth-len(line))
}
fmt.Printf("%s%s%s %s│%s\n", prefix, reset, line, green, reset)
} else {
line = strings.Repeat(" ", actualPrefixLen-2) + line
if len(line) < boxWidth-4 {
line += strings.Repeat(" ", boxWidth-len(line)-4)
}
fmt.Printf("%s│ %s%s %s│%s\n", green, reset, line, green, reset)
}
}
}
// wrapText splits the input text into lines of at most `width` characters each.
func wrapText(text string, width int) []string {
var lines []string
for len(text) > width {
lines = append(lines, text[:width])
text = text[width:]
}
if text != "" {
lines = append(lines, text)
}
return lines
}
// TODO: remove this and use utils.IsBidDataAction after refactoring
// and creating 'internal' package
func isLargeDataAction(ctx *fiber.Ctx) bool {
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
return true
}
}
return false
}

View File

@@ -24,7 +24,6 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
@@ -35,6 +34,7 @@ var (
func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fiber.Handler {
return func(ctx *fiber.Ctx) error {
isRoot, acct := ctx.Locals("isRoot").(bool), ctx.Locals("account").(auth.Account)
path := ctx.Path()
pathParts := strings.Split(path, "/")
bucket := pathParts[1]
@@ -51,9 +51,7 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
!ctx.Request().URI().QueryArgs().Has("versioning") &&
!ctx.Request().URI().QueryArgs().Has("policy") &&
!ctx.Request().URI().QueryArgs().Has("object-lock") &&
!ctx.Request().URI().QueryArgs().Has("ownershipControls") &&
!ctx.Request().URI().QueryArgs().Has("cors") {
isRoot, acct := utils.ContextKeyIsRoot.Get(ctx).(bool), utils.ContextKeyAccount.Get(ctx).(auth.Account)
!ctx.Request().URI().QueryArgs().Has("ownershipControls") {
if err := auth.MayCreateBucket(acct, isRoot); err != nil {
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
}
@@ -76,12 +74,7 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
}
// if owner is not set, set default owner to root account
if parsedAcl.Owner == "" {
parsedAcl.Owner = utils.ContextKeyRootAccessKey.Get(ctx).(string)
}
utils.ContextKeyParsedAcl.Set(ctx, parsedAcl)
ctx.Locals("parsedAcl", parsedAcl)
return ctx.Next()
}
}

View File

@@ -1,60 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"strings"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
func IsAdmin(logger s3log.AuditLogger) fiber.Handler {
return func(ctx *fiber.Ctx) error {
acct := utils.ContextKeyAccount.Get(ctx).(auth.Account)
if acct.Role != auth.RoleAdmin {
path := ctx.Path()
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAdminAccessDenied),
&controllers.MetaOpts{
Logger: logger,
Action: detectAction(path),
})
}
return ctx.Next()
}
}
func detectAction(path string) (action string) {
if strings.Contains(path, "create-user") {
action = metrics.ActionAdminCreateUser
} else if strings.Contains(path, "update-user") {
action = metrics.ActionAdminUpdateUser
} else if strings.Contains(path, "delete-user") {
action = metrics.ActionAdminDeleteUser
} else if strings.Contains(path, "list-user") {
action = metrics.ActionAdminListUsers
} else if strings.Contains(path, "list-buckets") {
action = metrics.ActionAdminListBuckets
} else if strings.Contains(path, "change-bucket-owner") {
action = metrics.ActionAdminChangeBucketOwner
}
return action
}

View File

@@ -33,8 +33,7 @@ import (
)
const (
iso8601Format = "20060102T150405Z"
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
iso8601Format = "20060102T150405Z"
)
type RootUserConfig struct {
@@ -46,15 +45,14 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
acct := accounts{root: root, iam: iam}
return func(ctx *fiber.Ctx) error {
// The bucket is public, no need to check this signature
if utils.ContextKeyPublicBucket.IsSet(ctx) {
return ctx.Next()
}
// If ContextKeyAuthenticated is set in context locals, it means it was presigned url case
if utils.ContextKeyAuthenticated.IsSet(ctx) {
// If account is set in context locals, it means it was presigned url case
_, ok := ctx.Locals("account").(auth.Account)
if ok {
return ctx.Next()
}
ctx.Locals("region", region)
ctx.Locals("startTime", time.Now())
authorization := ctx.Get("Authorization")
if authorization == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty), logger, mm)
@@ -65,6 +63,10 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
return sendResponse(ctx, err, logger, mm)
}
if authData.Algorithm != "AWS4-HMAC-SHA256" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported), logger, mm)
}
if authData.Region != region {
return sendResponse(ctx, s3err.APIError{
Code: "SignatureDoesNotMatch",
@@ -73,7 +75,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}, logger, mm)
}
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
ctx.Locals("isRoot", authData.Access == root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
@@ -82,8 +84,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
utils.ContextKeyAccount.Set(ctx, account)
ctx.Locals("account", account)
// Check X-Amz-Date header
date := ctx.Get("X-Amz-Date")
@@ -107,17 +108,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
return sendResponse(ctx, err, logger, mm)
}
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if utils.IsBigDataAction(ctx) {
// for streaming PUT actions, authorization is deferred
// until end of stream due to need to get length and
@@ -125,36 +115,10 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
return utils.NewAuthReader(ctx, r, authData, account.Secret, debug)
})
// wrap the io.Reader with ChunkReader if x-amz-content-sha256
// provide chunk encoding value
if utils.IsStreamingPayload(hashPayload) {
var err error
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr io.Reader
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
return cr
})
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
return ctx.Next()
}
// Content-Length has to be set for data uploads: PutObject, UploadPart
if contentLengthStr == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
}
// the upload limit for big data actions: PutObject, UploadPart
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
if contentLength > maxObjSizeLimit {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
}
return ctx.Next()
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if !utils.IsSpecialPayload(hashPayload) {
// Calculate the hash of the request payload
hashedPayload := sha256.Sum256(ctx.Body())
@@ -166,6 +130,15 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}
}
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength, debug)
if err != nil {
return sendResponse(ctx, err, logger, mm)

View File

@@ -18,15 +18,14 @@ import (
"io"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/utils"
)
func wrapBodyReader(ctx *fiber.Ctx, wr func(io.Reader) io.Reader) {
r, ok := utils.ContextKeyBodyReader.Get(ctx).(io.Reader)
r, ok := ctx.Locals("body-reader").(io.Reader)
if !ok {
r = ctx.Request().BodyStream()
}
r = wr(r)
utils.ContextKeyBodyReader.Set(ctx, r)
ctx.Locals("body-reader", r)
}

View File

@@ -1,58 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"net/http"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
// BucketObjectNameValidator extracts and validates
// the bucket and object names from the request URI.
func BucketObjectNameValidator(l s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
// skip the check for admin apis
if ctx.Method() == http.MethodPatch {
return ctx.Next()
}
path := ctx.Path()
// skip the check if the operation isn't bucket/object scoped
// e.g ListBuckets
if path == "/" {
return ctx.Next()
}
bucket, object := parsePath(path)
// check if the provided bucket name is valid
if !utils.IsValidBucketName(bucket) {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidBucketName), l, mm)
}
// check if the provided object name is valid
// skip for empty objects: e.g bucket operations: HeadBucket...
if object != "" && !utils.IsObjectNameValid(object) {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrBadRequest), l, mm)
}
return ctx.Next()
}
}

View File

@@ -0,0 +1,62 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"io"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3log"
)
// ProcessChunkedBody initializes the chunked upload stream if the
// request appears to be a chunked upload
func ProcessChunkedBody(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, mm *metrics.Manager, region string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
decodedLength := ctx.Get("X-Amz-Decoded-Content-Length")
if decodedLength == "" {
return ctx.Next()
}
// TODO: validate content length
authData, err := utils.ParseAuthorization(ctx.Get("Authorization"))
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
acct := ctx.Locals("account").(auth.Account)
amzdate := ctx.Get("X-Amz-Date")
date, _ := time.Parse(iso8601Format, amzdate)
if utils.IsBigDataAction(ctx) {
var err error
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr *utils.ChunkReader
cr, err = utils.NewChunkReader(ctx, r, authData, region, acct.Secret, date)
return cr
})
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
return ctx.Next()
}
return ctx.Next()
}
}

View File

@@ -1,40 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"fmt"
"strings"
"github.com/gofiber/fiber/v2"
)
// HostStyleParser is a middleware which parses the bucket name
// from the 'Host' header and appends in the request URL path
func HostStyleParser(virtualDomain string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
host := string(ctx.Request().Host())
// the host should match this pattern: '<bucket_name>.<virtual_domain>'
bucket, _, found := strings.Cut(host, "."+virtualDomain)
if !found || bucket == "" {
return ctx.Next()
}
path := ctx.Path()
pathStyleUrl := fmt.Sprintf("/%v%v", bucket, path)
ctx.Path(pathStyleUrl)
return ctx.Next()
}
}

View File

@@ -15,15 +15,30 @@
package middlewares
import (
"fmt"
"log"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/debuglogger"
)
func DebugLogger() fiber.Handler {
func RequestLogger(isDebug bool) fiber.Handler {
return func(ctx *fiber.Ctx) error {
debuglogger.LogFiberRequestDetails(ctx)
err := ctx.Next()
debuglogger.LogFiberResponseDetails(ctx)
return err
ctx.Locals("isDebug", isDebug)
if isDebug {
log.Println("Request headers: ")
ctx.Request().Header.VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
if ctx.Request().URI().QueryArgs().Len() != 0 {
fmt.Println()
log.Println("Request query arguments: ")
ctx.Request().URI().QueryArgs().VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
}
}
return ctx.Next()
}
}

View File

@@ -45,7 +45,7 @@ func VerifyMD5Body(logger s3log.AuditLogger) fiber.Handler {
}
sum := md5.Sum(ctx.Body())
calculatedSum := utils.Base64SumString(sum[:])
calculatedSum := utils.Md5SumString(sum[:])
if incomingSum != calculatedSum {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest), &controllers.MetaOpts{Logger: logger})

View File

@@ -16,7 +16,7 @@ package middlewares
import (
"io"
"strconv"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
@@ -30,25 +30,19 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
acct := accounts{root: root, iam: iam}
return func(ctx *fiber.Ctx) error {
// The bucket is public, no need to check this signature
if utils.ContextKeyPublicBucket.IsSet(ctx) {
return ctx.Next()
}
if ctx.Query("X-Amz-Signature") == "" {
return ctx.Next()
}
// Set in the context the "authenticated" key, in case the authentication succeeds,
// otherwise the middleware will return the caucht error
utils.ContextKeyAuthenticated.Set(ctx, true)
ctx.Locals("region", region)
ctx.Locals("startTime", time.Now())
authData, err := utils.ParsePresignedURIParts(ctx)
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
utils.ContextKeyIsRoot.Set(ctx, authData.Access == root.Access)
ctx.Locals("isRoot", authData.Access == root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), logger, mm)
@@ -56,28 +50,9 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
utils.ContextKeyAccount.Set(ctx, account)
var contentLength int64
contentLengthStr := ctx.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
if err != nil {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger, mm)
}
}
ctx.Locals("account", account)
if utils.IsBigDataAction(ctx) {
// Content-Length has to be set for data uploads: PutObject, UploadPart
if contentLengthStr == "" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingContentLength), logger, mm)
}
// the upload limit for big data actions: PutObject, UploadPart
// is 5gb. If the size exceeds the limit, return 'EntityTooLarge' err
if contentLength > maxObjSizeLimit {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrEntityTooLarge), logger, mm)
}
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
return utils.NewPresignedAuthReader(ctx, r, authData, account.Secret, debug)
})

View File

@@ -1,298 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"io"
"strings"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
func AuthorizePublicBucketAccess(be backend.Backend, l s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
// skip for auhtneicated requests
if ctx.Query("X-Amz-Algorithm") != "" || ctx.Get("Authorization") != "" {
return ctx.Next()
}
bucket, object := parsePath(ctx.Path())
action, permission, err := detectS3Action(ctx, object == "")
if err != nil {
return sendResponse(ctx, err, l, mm)
}
err = auth.VerifyPublicAccess(ctx.Context(), be, action, permission, bucket, object)
if err != nil {
return sendResponse(ctx, err, l, mm)
}
if utils.IsBigDataAction(ctx) {
payloadType := ctx.Get("X-Amz-Content-Sha256")
if utils.IsUnsignedStreamingPayload(payloadType) {
checksumType, err := utils.ExtractChecksumType(ctx)
if err != nil {
return sendResponse(ctx, err, l, mm)
}
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr io.Reader
cr, err = utils.NewUnsignedChunkReader(r, checksumType)
return cr
})
if err != nil {
return sendResponse(ctx, err, l, mm)
}
}
}
utils.ContextKeyPublicBucket.Set(ctx, true)
return ctx.Next()
}
}
func detectS3Action(ctx *fiber.Ctx, isBucketAction bool) (auth.Action, auth.Permission, error) {
path := ctx.Path()
// ListBuckets is not publically available
if path == "/" {
//TODO: Still not clear what kind of error should be returned in this case(ListBuckets)
return "", auth.PermissionRead, s3err.GetAPIError(s3err.ErrAccessDenied)
}
queryArgs := ctx.Context().QueryArgs()
switch ctx.Method() {
case fiber.MethodPatch:
// Admin apis should always be protected
return "", "", s3err.GetAPIError(s3err.ErrAccessDenied)
case fiber.MethodHead:
// HeadBucket
if isBucketAction {
return auth.ListBucketAction, auth.PermissionRead, nil
}
// HeadObject
return auth.GetObjectAction, auth.PermissionRead, nil
case fiber.MethodGet:
if isBucketAction {
if queryArgs.Has("tagging") {
// GetBucketTagging
return auth.GetBucketTaggingAction, auth.PermissionRead, nil
} else if queryArgs.Has("ownershipControls") {
// GetBucketOwnershipControls
return auth.GetBucketOwnershipControlsAction, auth.PermissionRead, s3err.GetAPIError(s3err.ErrAnonymousGetBucketOwnership)
} else if queryArgs.Has("versioning") {
// GetBucketVersioning
return auth.GetBucketVersioningAction, auth.PermissionRead, nil
} else if queryArgs.Has("policy") {
// GetBucketPolicy
return auth.GetBucketPolicyAction, auth.PermissionRead, nil
} else if queryArgs.Has("cors") {
// GetBucketCors
return auth.GetBucketCorsAction, auth.PermissionRead, nil
} else if queryArgs.Has("versions") {
// ListObjectVersions
return auth.ListBucketVersionsAction, auth.PermissionRead, nil
} else if queryArgs.Has("object-lock") {
// GetObjectLockConfiguration
return auth.GetBucketObjectLockConfigurationAction, auth.PermissionReadAcp, nil
} else if queryArgs.Has("acl") {
// GetBucketAcl
return auth.GetBucketAclAction, auth.PermissionRead, nil
} else if queryArgs.Has("uploads") {
// ListMultipartUploads
return auth.ListBucketMultipartUploadsAction, auth.PermissionRead, nil
} else if queryArgs.GetUintOrZero("list-type") == 2 {
// ListObjectsV2
return auth.ListBucketAction, auth.PermissionRead, nil
}
// All the other requests are considerd as ListObjects in the router
// no matter what kind of query arguments are provided apart from the ones above
return auth.ListBucketAction, auth.PermissionRead, nil
}
if queryArgs.Has("tagging") {
// GetObjectTagging
return auth.GetObjectTaggingAction, auth.PermissionRead, nil
} else if queryArgs.Has("retention") {
// GetObjectRetention
return auth.GetObjectRetentionAction, auth.PermissionRead, nil
} else if queryArgs.Has("legal-hold") {
// GetObjectLegalHold
return auth.GetObjectLegalHoldAction, auth.PermissionReadAcp, nil
} else if queryArgs.Has("acl") {
// GetObjectAcl
return auth.GetObjectAclAction, auth.PermissionRead, nil
} else if queryArgs.Has("attributes") {
// GetObjectAttributes
return auth.GetObjectAttributesAction, auth.PermissionRead, nil
} else if queryArgs.Has("uploadId") {
// ListParts
return auth.ListMultipartUploadPartsAction, auth.PermissionRead, nil
}
// All the other requests are considerd as GetObject in the router
// no matter what kind of query arguments are provided apart from the ones above
if queryArgs.Has("versionId") {
return auth.GetObjectVersionAction, auth.PermissionRead, nil
}
return auth.GetObjectAction, auth.PermissionRead, nil
case fiber.MethodPut:
if isBucketAction {
if queryArgs.Has("tagging") {
// PutBucketTagging
return auth.PutBucketTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("ownershipControls") {
// PutBucketOwnershipControls
return auth.PutBucketOwnershipControlsAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousPutBucketOwnership)
}
if queryArgs.Has("versioning") {
// PutBucketVersioning
return auth.PutBucketVersioningAction, auth.PermissionWrite, nil
}
if queryArgs.Has("object-lock") {
// PutObjectLockConfiguration
return auth.PutBucketObjectLockConfigurationAction, auth.PermissionWrite, nil
}
if queryArgs.Has("cors") {
// PutBucketCors
return auth.PutBucketCorsAction, auth.PermissionWrite, nil
}
if queryArgs.Has("policy") {
// PutBucketPolicy
return auth.PutBucketPolicyAction, auth.PermissionWrite, nil
}
if queryArgs.Has("acl") {
// PutBucketAcl
return auth.PutBucketAclAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
// All the other rquestes are considered as 'CreateBucket' in the router
return "", "", s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
if queryArgs.Has("tagging") {
// PutObjectTagging
return auth.PutObjectTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("retention") {
// PutObjectRetention
return auth.PutObjectRetentionAction, auth.PermissionWrite, nil
}
if queryArgs.Has("legal-hold") {
// PutObjectLegalHold
return auth.PutObjectLegalHoldAction, auth.PermissionWrite, nil
}
if queryArgs.Has("acl") {
// PutObjectAcl
return auth.PutObjectAclAction, auth.PermissionWriteAcp, s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
if queryArgs.Has("uploadId") && queryArgs.Has("partNumber") {
if ctx.Get("X-Amz-Copy-Source") != "" {
// UploadPartCopy
//TODO: Add public access check for copy-source
// Return AccessDenied for now
return auth.PutObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAccessDenied)
}
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
// UploadPart
return auth.PutObjectAction, auth.PermissionWrite, nil
}
if ctx.Get("X-Amz-Copy-Source") != "" {
return auth.PutObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousCopyObject)
}
utils.ContextKeyBodyReader.Set(ctx, ctx.Request().BodyStream())
// All the other requests are considered as 'PutObject' in the router
return auth.PutObjectAction, auth.PermissionWrite, nil
case fiber.MethodPost:
if isBucketAction {
// DeleteObjects
// FIXME: should be fixed with https://github.com/versity/versitygw/issues/1327
// Return AccessDenied for now
return auth.DeleteObjectAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAccessDenied)
}
if queryArgs.Has("restore") {
return auth.RestoreObjectAction, auth.PermissionWrite, nil
}
if queryArgs.Has("select") && ctx.Query("select-type") == "2" {
// SelectObjectContent
return auth.GetObjectAction, auth.PermissionRead, s3err.GetAPIError(s3err.ErrAnonymousRequest)
}
if queryArgs.Has("uploadId") {
// CompleteMultipartUpload
return auth.PutObjectAction, auth.PermissionWrite, nil
}
// All the other requests are considered as 'CreateMultipartUpload' in the router
return "", "", s3err.GetAPIError(s3err.ErrAnonymousCreateMp)
case fiber.MethodDelete:
if isBucketAction {
if queryArgs.Has("tagging") {
// DeleteBucketTagging
return auth.PutBucketTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("ownershipControls") {
// DeleteBucketOwnershipControls
return auth.PutBucketOwnershipControlsAction, auth.PermissionWrite, s3err.GetAPIError(s3err.ErrAnonymousPutBucketOwnership)
}
if queryArgs.Has("policy") {
// DeleteBucketPolicy
return auth.PutBucketPolicyAction, auth.PermissionWrite, nil
}
if queryArgs.Has("cors") {
// DeleteBucketCors
return auth.PutBucketCorsAction, auth.PermissionWrite, nil
}
// All the other requests are considered as 'DeleteBucket' in the router
return auth.DeleteBucketAction, auth.PermissionWrite, nil
}
if queryArgs.Has("tagging") {
// DeleteObjectTagging
return auth.PutObjectTaggingAction, auth.PermissionWrite, nil
}
if queryArgs.Has("uploadId") {
// AbortMultipartUpload
return auth.AbortMultipartUploadAction, auth.PermissionWrite, nil
}
// All the other requests are considered as 'DeleteObject' in the router
return auth.DeleteObjectAction, auth.PermissionWrite, nil
default:
// In no action is detected, return AccessDenied ?
return "", "", s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
// parsePath extracts the bucket and object names from the path
func parsePath(path string) (string, string) {
p := strings.TrimPrefix(path, "/")
bucket, object, _ := strings.Cut(p, "/")
return bucket, object
}

View File

@@ -1,37 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/s3api/utils"
)
func SetDefaultValues(root RootUserConfig, region string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
// These are necessary for the server access logs
utils.ContextKeyRegion.Set(ctx, region)
utils.ContextKeyStartTime.Set(ctx, time.Now())
utils.ContextKeyRootAccessKey.Set(ctx, root.Access)
// Set the account and isRoot to some defulat values, to avoid panics
// in case of public buckets
utils.ContextKeyAccount.Set(ctx, auth.Account{})
utils.ContextKeyIsRoot.Set(ctx, false)
return ctx.Next()
}
}

View File

@@ -26,11 +26,12 @@ import (
func DecodeURL(logger s3log.AuditLogger, mm *metrics.Manager) fiber.Handler {
return func(ctx *fiber.Ctx) error {
unescp, err := url.PathUnescape(string(ctx.Request().URI().PathOriginal()))
reqURL := ctx.Request().URI().String()
decoded, err := url.Parse(reqURL)
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger, MetricsMng: mm})
}
ctx.Path(unescp)
ctx.Path(decoded.Path)
return ctx.Next()
}
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/middlewares"
"github.com/versity/versitygw/s3event"
"github.com/versity/versitygw/s3log"
)
@@ -36,22 +35,22 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
adminController := controllers.NewAdminController(iam, be, aLogger)
// CreateUser admin api
app.Patch("/create-user", middlewares.IsAdmin(logger), adminController.CreateUser)
app.Patch("/create-user", adminController.CreateUser)
// DeleteUsers admin api
app.Patch("/delete-user", middlewares.IsAdmin(logger), adminController.DeleteUser)
app.Patch("/delete-user", adminController.DeleteUser)
// UpdateUser admin api
app.Patch("/update-user", middlewares.IsAdmin(logger), adminController.UpdateUser)
app.Patch("update-user", adminController.UpdateUser)
// ListUsers admin api
app.Patch("/list-users", middlewares.IsAdmin(logger), adminController.ListUsers)
app.Patch("/list-users", adminController.ListUsers)
// ChangeBucketOwner admin api
app.Patch("/change-bucket-owner", middlewares.IsAdmin(logger), adminController.ChangeBucketOwner)
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
// ListBucketsAndOwners admin api
app.Patch("/list-buckets", middlewares.IsAdmin(logger), adminController.ListBuckets)
app.Patch("/list-buckets", adminController.ListBuckets)
}
// ListBuckets action

View File

@@ -29,16 +29,15 @@ import (
)
type S3ApiServer struct {
app *fiber.App
backend backend.Backend
router *S3ApiRouter
port string
cert *tls.Certificate
quiet bool
debug bool
readonly bool
health string
virtualDomain string
app *fiber.App
backend backend.Backend
router *S3ApiRouter
port string
cert *tls.Certificate
quiet bool
debug bool
readonly bool
health string
}
func New(
@@ -77,29 +76,12 @@ func New(
})
}
app.Use(middlewares.DecodeURL(l, mm))
// initialize host-style parser in virtual domain is specified
if server.virtualDomain != "" {
app.Use(middlewares.HostStyleParser(server.virtualDomain))
}
// initilaze the default value setter middleware
app.Use(middlewares.SetDefaultValues(root, region))
// initialize the debug logger in debug mode
if server.debug {
app.Use(middlewares.DebugLogger())
}
// initialize the bucket/object name validator
app.Use(middlewares.BucketObjectNameValidator(l, mm))
// Public buckets access checker
app.Use(middlewares.AuthorizePublicBucketAccess(be, l, mm))
app.Use(middlewares.RequestLogger(server.debug))
// Authentication middlewares
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, mm, region, server.debug))
app.Use(middlewares.VerifyV4Signature(root, iam, l, mm, region, server.debug))
app.Use(middlewares.ProcessChunkedBody(root, iam, l, mm, region))
app.Use(middlewares.VerifyMD5Body(l))
app.Use(middlewares.AclParser(be, l, server.readonly))
@@ -140,11 +122,6 @@ func WithReadOnly() Option {
return func(s *S3ApiServer) { s.readonly = true }
}
// WithHostStyle enabled host-style bucket addressing on the server
func WithHostStyle(virtualDomain string) Option {
return func(s *S3ApiServer) { s.virtualDomain = virtualDomain }
}
func (sa *S3ApiServer) Serve() (err error) {
if sa.cert != nil {
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)

View File

@@ -56,7 +56,7 @@ func NewAuthReader(ctx *fiber.Ctx, r io.Reader, auth AuthData, secret string, de
var hr *HashReader
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if !IsSpecialPayload(hashPayload) {
hr, _ = NewHashReader(r, "", HashTypeSha256Hex)
hr, _ = NewHashReader(r, "", HashTypeSha256)
} else {
hr, _ = NewHashReader(r, "", HashTypeNone)
}
@@ -190,10 +190,6 @@ func ParseAuthorization(authorization string) (AuthData, error) {
algo := authParts[0]
if algo != "AWS4-HMAC-SHA256" {
return a, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported)
}
kvData := authParts[1]
kvPairs := strings.Split(kvData, ",")
// we are expecting at least Credential, SignedHeaders, and Signature
@@ -264,3 +260,19 @@ func removeSpace(str string) string {
}
return b.String()
}
var (
specialValues = map[string]bool{
"UNSIGNED-PAYLOAD": true,
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
}
)
// IsSpecialPayload checks for streaming/unsigned authorization types
func IsSpecialPayload(str string) bool {
return specialValues[str]
}

View File

@@ -15,155 +15,260 @@
package utils
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"net/http"
"math"
"strconv"
"strings"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
)
const (
maxObjSizeLimit = 5 * 1024 * 1024 * 1024 // 5gb
)
type payloadType string
// chunked uploads described in:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
const (
payloadTypeUnsigned payloadType = "UNSIGNED-PAYLOAD"
payloadTypeStreamingUnsignedTrailer payloadType = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
payloadTypeStreamingSigned payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
payloadTypeStreamingSignedTrailer payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
payloadTypeStreamingEcdsa payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
payloadTypeStreamingEcdsaTrailer payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
chunkHdrStr = ";chunk-signature="
chunkHdrDelim = "\r\n"
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
awsV4 = "AWS4"
awsS3Service = "s3"
awsV4Request = "aws4_request"
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
)
func getPayloadTypeNotSupportedErr(p payloadType) error {
return s3err.APIError{
HTTPStatusCode: http.StatusNotImplemented,
Code: "NotImplemented",
Description: fmt.Sprintf("The chunk encoding algorithm %v is not supported.", p),
// ChunkReader reads from chunked upload request body, and returns
// object data stream
type ChunkReader struct {
r io.Reader
signingKey []byte
prevSig string
parsedSig string
currentChunkSize int64
chunkDataLeft int64
trailerExpected int
stash []byte
chunkHash hash.Hash
strToSignPrefix string
skipcheck bool
}
// NewChunkReader reads from request body io.Reader and parses out the
// chunk metadata in stream. The headers are validated for proper signatures.
// Reading from the chunk reader will read only the object data stream
// without the chunk headers/trailers.
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (*ChunkReader, error) {
return &ChunkReader{
r: r,
signingKey: getSigningKey(secret, region, date),
// the authdata.Signature is validated in the auth-reader,
// so we can use that here without any other checks
prevSig: authdata.Signature,
chunkHash: sha256.New(),
strToSignPrefix: getStringToSignPrefix(date, region),
}, nil
}
// Read satisfies the io.Reader for this type
func (cr *ChunkReader) Read(p []byte) (int, error) {
n, err := cr.r.Read(p)
if err != nil && err != io.EOF {
return n, err
}
if cr.chunkDataLeft < int64(n) {
chunkSize := cr.chunkDataLeft
if chunkSize > 0 {
cr.chunkHash.Write(p[:chunkSize])
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
n += int(chunkSize)
return n, err
}
cr.chunkDataLeft -= int64(n)
cr.chunkHash.Write(p[:n])
return n, err
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// This part is the same for all chunks,
// only the previous signature and hash of current chunk changes
func getStringToSignPrefix(date time.Time, region string) string {
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
date.Format("20060102"),
region,
awsS3Service,
awsV4Request)
return fmt.Sprintf("%s\n%s\n%s",
streamPayloadAlgo,
date.Format("20060102T150405Z"),
credentialScope)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// signature For each chunk, you calculate the signature using the following
// string to sign. For the first chunk, you use the seed-signature as the
// previous signature.
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
return fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
}
// The provided p should have all of the previous chunk data and trailer
// consumed already. The positioning here is expected that p[0] starts the
// new chunk size with the ";chunk-signature=" following. The only exception
// is if we started consuming the trailer, but hit the end of the read buffer.
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
// finish consuming the final trailer bytes.
// This parses the chunk metadata in situ without allocating an extra buffer.
// It will just read and validate the chunk metadata and then move the
// following chunk data to overwrite the metadata in the provided buffer.
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
n := len(p)
if !cr.skipcheck && cr.parsedSig != "" {
chunkhash := cr.chunkHash.Sum(nil)
cr.chunkHash.Reset()
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
}
if cr.trailerExpected != 0 {
if len(p) < len(chunkHdrDelim) {
// This is the special case where we need to consume the
// trailer, but instead hit the end of the buffer. The
// subsequent call will finish consuming the trailer.
cr.chunkDataLeft = 0
cr.trailerExpected -= len(p)
cr.skipcheck = true
return 0, nil
}
// move data up to remove trailer
copy(p, p[cr.trailerExpected:])
n -= cr.trailerExpected
}
cr.skipcheck = false
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
cr.currentChunkSize = chunkSize
cr.parsedSig = sig
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
return 0, err
}
if chunkSize == 0 {
return 0, io.EOF
}
cr.trailerExpected = len(chunkHdrDelim)
// move data up to remove chunk header
copy(p, p[bufOffset:n])
n -= bufOffset
// if remaining buffer larger than chunk data,
// parse next header in buffer
if int64(n) > chunkSize {
cr.chunkDataLeft = 0
cr.chunkHash.Write(p[:chunkSize])
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
}
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
return n, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
// Task 3: Calculate Signature
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
func getSigningKey(secret, region string, date time.Time) []byte {
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
return signingKey
}
func hmac256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
var (
specialValues = map[payloadType]bool{
payloadTypeUnsigned: true,
payloadTypeStreamingUnsignedTrailer: true,
payloadTypeStreamingSigned: true,
payloadTypeStreamingSignedTrailer: true,
payloadTypeStreamingEcdsa: true,
payloadTypeStreamingEcdsaTrailer: true,
}
errInvalidChunkFormat = errors.New("invalid chunk header format")
errskipHeader = errors.New("skip to next header")
)
func (pt payloadType) isValid() bool {
return pt == payloadTypeUnsigned ||
pt == payloadTypeStreamingUnsignedTrailer ||
pt == payloadTypeStreamingSigned ||
pt == payloadTypeStreamingSignedTrailer ||
pt == payloadTypeStreamingEcdsa ||
pt == payloadTypeStreamingEcdsaTrailer
}
type checksumType string
const (
checksumTypeCrc32 checksumType = "x-amz-checksum-crc32"
checksumTypeCrc32c checksumType = "x-amz-checksum-crc32c"
checksumTypeSha1 checksumType = "x-amz-checksum-sha1"
checksumTypeSha256 checksumType = "x-amz-checksum-sha256"
checksumTypeCrc64nvme checksumType = "x-amz-checksum-crc64nvme"
maxHeaderSize = 1024
)
func (c checksumType) isValid() bool {
return c == checksumTypeCrc32 ||
c == checksumTypeCrc32c ||
c == checksumTypeSha1 ||
c == checksumTypeSha256 ||
c == checksumTypeCrc64nvme
}
// Extracts and validates the checksum type from the 'X-Amz-Trailer' header
func ExtractChecksumType(ctx *fiber.Ctx) (checksumType, error) {
trailer := ctx.Get("X-Amz-Trailer")
chType := checksumType(strings.ToLower(trailer))
if chType != "" && !chType.isValid() {
debuglogger.Logf("invalid value for 'X-Amz-Trailer': %v", chType)
return "", s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
// Theis returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
stashLen := len(cr.stash)
if cr.stash != nil {
tmp := make([]byte, maxHeaderSize)
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
header = tmp
cr.stash = nil
}
return chType, nil
}
// IsSpecialPayload checks for special authorization types
func IsSpecialPayload(str string) bool {
return specialValues[payloadType(str)]
}
// Checks if the provided string is unsigned payload trailer type
func IsUnsignedStreamingPayload(str string) bool {
return payloadType(str) == payloadTypeStreamingUnsignedTrailer
}
// IsChunkEncoding checks for streaming/unsigned authorization types
func IsStreamingPayload(str string) bool {
pt := payloadType(str)
return pt == payloadTypeStreamingUnsignedTrailer ||
pt == payloadTypeStreamingSigned ||
pt == payloadTypeStreamingSignedTrailer
}
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
decContLengthStr := ctx.Get("X-Amz-Decoded-Content-Length")
if decContLengthStr == "" {
debuglogger.Logf("missing required header 'X-Amz-Decoded-Content-Length'")
return nil, s3err.GetAPIError(s3err.ErrMissingContentLength)
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
if semicolonIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
decContLength, err := strconv.ParseInt(decContLengthStr, 10, 64)
//TODO: not sure if InvalidRequest should be returned in this case
sigIndex := semicolonIndex + len(chunkHdrStr)
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
if sigEndIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
chunkSizeBytes := header[:semicolonIndex]
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
if err != nil {
debuglogger.Logf("invalid value for 'X-Amz-Decoded-Content-Length': %v", decContLengthStr)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
return 0, "", 0, errInvalidChunkFormat
}
if decContLength > maxObjSizeLimit {
debuglogger.Logf("the object size exceeds the allowed limit: (size): %v, (limit): %v", decContLength, maxObjSizeLimit)
return nil, s3err.GetAPIError(s3err.ErrEntityTooLarge)
}
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
if !contentSha256.isValid() {
//TODO: Add proper APIError
debuglogger.Logf("invalid value for 'X-Amz-Content-Sha256': %v", contentSha256)
return nil, fmt.Errorf("invalid x-amz-content-sha256: %v", string(contentSha256))
}
checksumType, err := ExtractChecksumType(ctx)
if err != nil {
return nil, err
}
if contentSha256 != payloadTypeStreamingSigned && checksumType == "" {
debuglogger.Logf("empty value for required trailer header 'X-Amz-Trailer': %v", checksumType)
return nil, s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
}
switch contentSha256 {
case payloadTypeStreamingUnsignedTrailer:
return NewUnsignedChunkReader(r, checksumType)
case payloadTypeStreamingSignedTrailer:
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType)
case payloadTypeStreamingSigned:
return NewSignedChunkReader(r, authdata, region, secret, date, "")
// return not supported for:
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER
default:
debuglogger.Logf("unsupported chunk reader algorithm: %v", contentSha256)
return nil, getPayloadTypeNotSupportedErr(contentSha256)
}
return chunkSize, signature, dataStartOffset - stashLen, nil
}

View File

@@ -1,65 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"github.com/gofiber/fiber/v2"
)
// Region, StartTime, IsRoot, Account, AccessKey context locals
// are set to defualut values in middlewares.SetDefaultValues
// to avoid the nil interface conversions
type ContextKey string
const (
ContextKeyRegion ContextKey = "region"
ContextKeyStartTime ContextKey = "start-time"
ContextKeyIsRoot ContextKey = "is-root"
ContextKeyRootAccessKey ContextKey = "root-access-key"
ContextKeyAccount ContextKey = "account"
ContextKeyAuthenticated ContextKey = "authenticated"
ContextKeyPublicBucket ContextKey = "public-bucket"
ContextKeyParsedAcl ContextKey = "parsed-acl"
ContextKeySkipResBodyLog ContextKey = "skip-res-body-log"
ContextKeyBodyReader ContextKey = "body-reader"
)
func (ck ContextKey) Values() []ContextKey {
return []ContextKey{
ContextKeyRegion,
ContextKeyStartTime,
ContextKeyIsRoot,
ContextKeyRootAccessKey,
ContextKeyAccount,
ContextKeyAuthenticated,
ContextKeyPublicBucket,
ContextKeyParsedAcl,
ContextKeySkipResBodyLog,
ContextKeyBodyReader,
}
}
func (ck ContextKey) Set(ctx *fiber.Ctx, val any) {
ctx.Locals(string(ck), val)
}
func (ck ContextKey) IsSet(ctx *fiber.Ctx) bool {
val := ctx.Locals(string(ck))
return val != nil
}
func (ck ContextKey) Get(ctx *fiber.Ctx) any {
return ctx.Locals(string(ck))
}

View File

@@ -1,180 +0,0 @@
// Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
// Jean-loup Gailly Mark Adler
// jloup@gzip.org madler@alumni.caltech.edu
// Original implementation is from
// https://github.com/vimeo/go-util/blob/8cd4c737f091d9317f72b25df78ce6cf869f7d30/crc32combine/crc32combine.go
// extended for crc64 support.
// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration.
// Used uint for unsigned long. Used uint32 for input arguments in order to match
// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib)
package utils
import (
"hash/crc64"
)
const crc64NVME = 0x9a6c_9329_ac4b_c9b5
var crc64NVMETable = crc64.MakeTable(crc64NVME)
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
var sum uint64
for vec != 0 {
if vec&1 != 0 {
sum ^= mat[0]
}
vec >>= 1
mat = mat[1:]
}
return sum
}
func gf2MatrixSquare(square, mat []uint64) {
if len(square) != len(mat) {
panic("square matrix size mismatch")
}
for n := range mat {
square[n] = gf2MatrixTimes(mat, mat[n])
}
}
// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32
// hash values crc1 and crc2. poly represents the generator polynomial
// and len2 specifies the byte length that the crc2 hash covers.
func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 {
// degenerate case (also disallow negative lengths)
if len2 <= 0 {
return crc1
}
even := make([]uint64, 32) // even-power-of-two zeros operator
odd := make([]uint64, 32) // odd-power-of-two zeros operator
// put operator for one zero bit in odd
odd[0] = uint64(poly) // CRC-32 polynomial
row := uint64(1)
for n := 1; n < 32; n++ {
odd[n] = row
row <<= 1
}
// put operator for two zero bits in even
gf2MatrixSquare(even, odd)
// put operator for four zero bits in odd
gf2MatrixSquare(odd, even)
// apply len2 zeros to crc1 (first square will put the operator for one
// zero byte, eight zero bits, in even)
crc1n := uint64(crc1)
for {
// apply zeros operator for this bit of len2
gf2MatrixSquare(even, odd)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(even, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
// another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd, even)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(odd, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
}
// return combined crc
crc1n ^= uint64(crc2)
return uint32(crc1n)
}
// crc64Combine returns the combined CRC-64 hash value of the two passed CRC-64
// hash values crc1 and crc2. poly represents the generator polynomial
// and len2 specifies the byte length that the crc2 hash covers.
func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 {
// degenerate case (also disallow negative lengths)
if len2 <= 0 {
return crc1
}
even := make([]uint64, 64) // even-power-of-two zeros operator
odd := make([]uint64, 64) // odd-power-of-two zeros operator
// put operator for one zero bit in odd
odd[0] = poly // CRC-64 polynomial
row := uint64(1)
for n := 1; n < 64; n++ {
odd[n] = row
row <<= 1
}
// put operator for two zero bits in even
gf2MatrixSquare(even, odd)
// put operator for four zero bits in odd
gf2MatrixSquare(odd, even)
// apply len2 zeros to crc1 (first square will put the operator for one
// zero byte, eight zero bits, in even)
crc1n := crc1
for {
// apply zeros operator for this bit of len2
gf2MatrixSquare(even, odd)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(even, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
// another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd, even)
if len2&1 != 0 {
crc1n = gf2MatrixTimes(odd, crc1n)
}
len2 >>= 1
// if no more bits set, then done
if len2 == 0 {
break
}
}
// return combined crc
crc1n ^= crc2
return crc1n
}

View File

@@ -1,57 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"hash/crc32"
"hash/crc64"
"testing"
)
func TestCRC32Combine(t *testing.T) {
data := []byte("The quick brown fox jumps over the lazy dog")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
var poly uint32 = crc32.IEEE
tab := crc32.MakeTable(poly)
crc1 := crc32.Checksum(part1, tab)
crc2 := crc32.Checksum(part2, tab)
combined := crc32Combine(poly, crc1, crc2, int64(len(part2)))
full := crc32.Checksum(data, tab)
if combined != full {
t.Errorf("crc32Combine failed: got %08x, want %08x", combined, full)
}
}
func TestCRC64Combine(t *testing.T) {
data := []byte("The quick brown fox jumps over the lazy dog")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
var poly uint64 = crc64NVME
tab := crc64NVMETable
crc1 := crc64.Checksum(part1, tab)
crc2 := crc64.Checksum(part2, tab)
combined := crc64Combine(poly, crc1, crc2, int64(len(part2)))
full := crc64.Checksum(data, tab)
if combined != full {
t.Errorf("crc64Combine failed: got %016x, want %016x", combined, full)
}
}

View File

@@ -16,18 +16,13 @@ package utils
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"hash/crc32"
"hash/crc64"
"io"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
)
@@ -36,21 +31,11 @@ type HashType string
const (
// HashTypeMd5 generates MD5 checksum for the data stream
HashTypeMd5 HashType = "md5"
// HashTypeSha256 generates SHA256 Base64-Encoded checksum for the data stream
HashTypeSha256 HashType = "sha256"
// HashTypeSha256Hex generates SHA256 hex encoded checksum for the data stream
HashTypeSha256Hex HashType = "sha256-hex"
// HashTypeSha1 generates SHA1 Base64-Encoded checksum for the data stream
HashTypeSha1 HashType = "sha1"
// HashTypeCRC32 generates CRC32 Base64-Encoded checksum for the data stream
HashTypeCRC32 HashType = "crc32"
// HashTypeCRC32C generates CRC32C Base64-Encoded checksum for the data stream
HashTypeCRC32C HashType = "crc32c"
// HashTypeCRC64NVME generates CRC64NVME Base64-Encoded checksum for the data stream
HashTypeCRC64NVME HashType = "crc64nvme"
HashTypeMd5 = "md5"
// HashTypeSha256 generates SHA256 checksum for the data stream
HashTypeSha256 = "sha256"
// HashTypeNone is a no-op checksum for the data stream
HashTypeNone HashType = "none"
HashTypeNone = "none"
)
// HashReader is an io.Reader that calculates the checksum
@@ -77,18 +62,8 @@ func NewHashReader(r io.Reader, expectedSum string, ht HashType) (*HashReader, e
switch ht {
case HashTypeMd5:
hash = md5.New()
case HashTypeSha256Hex:
hash = sha256.New()
case HashTypeSha256:
hash = sha256.New()
case HashTypeSha1:
hash = sha1.New()
case HashTypeCRC32:
hash = crc32.NewIEEE()
case HashTypeCRC32C:
hash = crc32.New(crc32.MakeTable(crc32.Castagnoli))
case HashTypeCRC64NVME:
hash = crc64.New(crc64NVMETable)
case HashTypeNone:
hash = noop{}
default:
@@ -113,40 +88,15 @@ func (hr *HashReader) Read(p []byte) (int, error) {
if errors.Is(readerr, io.EOF) && hr.sum != "" {
switch hr.hashType {
case HashTypeMd5:
sum := hr.Sum()
sum := base64.StdEncoding.EncodeToString(hr.hash.Sum(nil))
if sum != hr.sum {
return n, s3err.GetAPIError(s3err.ErrInvalidDigest)
}
case HashTypeSha256Hex:
sum := hr.Sum()
case HashTypeSha256:
sum := hex.EncodeToString(hr.hash.Sum(nil))
if sum != hr.sum {
return n, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
}
case HashTypeCRC32:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32)
}
case HashTypeCRC32C:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32c)
}
case HashTypeSha1:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha1)
}
case HashTypeSha256:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha256)
}
case HashTypeCRC64NVME:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc64nvme)
}
default:
return n, errInvalidHashType
}
@@ -154,38 +104,20 @@ func (hr *HashReader) Read(p []byte) (int, error) {
return n, readerr
}
func (hr *HashReader) SetReader(r io.Reader) {
hr.r = r
}
// Sum returns the checksum hash of the data read so far
func (hr *HashReader) Sum() string {
switch hr.hashType {
case HashTypeMd5:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeSha256Hex:
return hex.EncodeToString(hr.hash.Sum(nil))
case HashTypeCRC32:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeCRC32C:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeSha1:
return Base64SumString(hr.hash.Sum(nil))
return Md5SumString(hr.hash.Sum(nil))
case HashTypeSha256:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeCRC64NVME:
return Base64SumString(hr.hash.Sum(nil))
return hex.EncodeToString(hr.hash.Sum(nil))
default:
return ""
}
}
func (hr *HashReader) Type() HashType {
return hr.hashType
}
// Base64SumString converts the hash bytes to the b64 encoded string checksum value
func Base64SumString(b []byte) string {
// Md5SumString converts the hash bytes to the string checksum value
func Md5SumString(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
@@ -196,161 +128,3 @@ func (n noop) Sum(b []byte) []byte { return []byte{} }
func (n noop) Reset() {}
func (n noop) Size() int { return 0 }
func (n noop) BlockSize() int { return 1 }
// IsChecksumComposable tests if the final foll object crc can be calculated
// based on the part crc values.
func IsChecksumComposable(algo types.ChecksumAlgorithm) bool {
switch algo {
case types.ChecksumAlgorithmCrc32, types.ChecksumAlgorithmCrc32c, types.ChecksumAlgorithmCrc64nvme:
return true
default:
return false
}
}
// AddCRCChecksum calculates the composite CRC checksum after adding the part crc.
// Only CRC32, CRC32C, and CRC64NVME are supported. The input checksums must be base64-encoded strings.
func AddCRCChecksum(algo types.ChecksumAlgorithm, crc, partCrc string, partLen int64) (string, error) {
switch algo {
case types.ChecksumAlgorithmCrc32:
data, err := base64.StdEncoding.DecodeString(partCrc)
if err != nil {
return "", fmt.Errorf("base64 decode partCrc: %w", err)
}
if len(data) != 4 {
return "", fmt.Errorf("invalid crc32 part checksum length: %d", len(data))
}
currentCRC, err := base64.StdEncoding.DecodeString(crc)
if err != nil {
return "", fmt.Errorf("base64 decode crc: %w", err)
}
if len(currentCRC) != 4 {
return "", fmt.Errorf("invalid crc32 checksum length: %d", len(currentCRC))
}
currentVal := uint32(currentCRC[0])<<24 | uint32(currentCRC[1])<<16 | uint32(currentCRC[2])<<8 | uint32(currentCRC[3])
val := uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
composite := crc32Combine(crc32.IEEE, currentVal, val, partLen)
out := []byte{
byte(composite >> 24),
byte(composite >> 16),
byte(composite >> 8),
byte(composite),
}
return base64.StdEncoding.EncodeToString(out), nil
case types.ChecksumAlgorithmCrc32c:
data, err := base64.StdEncoding.DecodeString(partCrc)
if err != nil {
return "", fmt.Errorf("base64 decode partCrc: %w", err)
}
if len(data) != 4 {
return "", fmt.Errorf("invalid crc32 part checksum length: %d", len(data))
}
currentCRC, err := base64.StdEncoding.DecodeString(crc)
if err != nil {
return "", fmt.Errorf("base64 decode crc: %w", err)
}
if len(currentCRC) != 4 {
return "", fmt.Errorf("invalid crc32 checksum length: %d", len(currentCRC))
}
currentVal := uint32(currentCRC[0])<<24 | uint32(currentCRC[1])<<16 | uint32(currentCRC[2])<<8 | uint32(currentCRC[3])
val := uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
composite := crc32Combine(crc32.Castagnoli, currentVal, val, partLen)
// Convert composite to big-endian bytes
out := []byte{
byte(composite >> 24),
byte(composite >> 16),
byte(composite >> 8),
byte(composite),
}
return base64.StdEncoding.EncodeToString(out), nil
case types.ChecksumAlgorithmCrc64nvme:
data, err := base64.StdEncoding.DecodeString(partCrc)
if err != nil {
return "", fmt.Errorf("base64 decode partCrc: %w", err)
}
if len(data) != 8 {
return "", fmt.Errorf("invalid crc64 part checksum length: %d", len(data))
}
currentCRC, err := base64.StdEncoding.DecodeString(crc)
if err != nil {
return "", fmt.Errorf("base64 decode crc: %w", err)
}
if len(currentCRC) != 8 {
return "", fmt.Errorf("invalid crc64 checksum length: %d", len(currentCRC))
}
currentVal := uint64(currentCRC[0])<<56 | uint64(currentCRC[1])<<48 | uint64(currentCRC[2])<<40 | uint64(currentCRC[3])<<32 |
uint64(currentCRC[4])<<24 | uint64(currentCRC[5])<<16 | uint64(currentCRC[6])<<8 | uint64(currentCRC[7])
val := uint64(data[0])<<56 | uint64(data[1])<<48 | uint64(data[2])<<40 | uint64(data[3])<<32 |
uint64(data[4])<<24 | uint64(data[5])<<16 | uint64(data[6])<<8 | uint64(data[7])
composite := crc64Combine(crc64NVME, currentVal, val, partLen)
out := []byte{
byte(composite >> 56), byte(composite >> 48), byte(composite >> 40), byte(composite >> 32),
byte(composite >> 24), byte(composite >> 16), byte(composite >> 8), byte(composite),
}
return base64.StdEncoding.EncodeToString(out), nil
default:
return "", fmt.Errorf("composite checksum not supported for algorithm: %v", algo)
}
}
// NewCompositeChecksumReader initializes a composite checksum
// processor, which decodes and validates the provided
// checksums and returns the final checksum based on
// the previous processings.
//
// The supported checksum types are:
// - CRC32
// - CRC32C
// - SHA1
// - SHA256
func NewCompositeChecksumReader(ht HashType) (*CompositeChecksumReader, error) {
var hasher hash.Hash
switch ht {
case HashTypeSha256:
hasher = sha256.New()
case HashTypeSha1:
hasher = sha1.New()
case HashTypeCRC32:
hasher = crc32.NewIEEE()
case HashTypeCRC32C:
hasher = crc32.New(crc32.MakeTable(crc32.Castagnoli))
case HashTypeNone:
hasher = noop{}
default:
return nil, errInvalidHashType
}
return &CompositeChecksumReader{
hasher: hasher,
}, nil
}
type CompositeChecksumReader struct {
hasher hash.Hash
}
// Decodes and writes the checksum in the hasher
func (ccr *CompositeChecksumReader) Process(checksum string) error {
data, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
return fmt.Errorf("base64 decode: %w", err)
}
_, err = ccr.hasher.Write(data)
if err != nil {
return fmt.Errorf("hash write: %w", err)
}
return nil
}
// Returns the base64 encoded composite checksum
func (ccr *CompositeChecksumReader) Sum() string {
return Base64SumString(ccr.hasher.Sum(nil))
}

View File

@@ -1,120 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"encoding/base64"
"hash/crc32"
"hash/crc64"
"testing"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
func TestAddCRCChecksum_CRC32(t *testing.T) {
data := []byte("this is a test buffer for crc32")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
crc1 := crc32.Checksum(part1, crc32.IEEETable)
crc2 := crc32.Checksum(part2, crc32.IEEETable)
crcFull := crc32.Checksum(data, crc32.IEEETable)
crc1b := []byte{byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1)}
crc2b := []byte{byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2)}
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc32, crc1b64, crc2b64, int64(len(part2)))
if err != nil {
t.Fatalf("AddCRCChecksum failed: %v", err)
}
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
if err != nil {
t.Fatalf("base64 decode failed: %v", err)
}
combinedVal := uint32(combinedBytes[0])<<24 | uint32(combinedBytes[1])<<16 | uint32(combinedBytes[2])<<8 | uint32(combinedBytes[3])
if combinedVal != crcFull {
t.Errorf("CRC32 combine mismatch: got %x, want %x", combinedVal, crcFull)
}
}
func TestAddCRCChecksum_CRC32c(t *testing.T) {
data := []byte("this is a test buffer for crc32c")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
castagnoli := crc32.MakeTable(crc32.Castagnoli)
crc1 := crc32.Checksum(part1, castagnoli)
crc2 := crc32.Checksum(part2, castagnoli)
crcFull := crc32.Checksum(data, castagnoli)
crc1b := []byte{byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1)}
crc2b := []byte{byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2)}
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc32c, crc1b64, crc2b64, int64(len(part2)))
if err != nil {
t.Fatalf("AddCRCChecksum failed: %v", err)
}
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
if err != nil {
t.Fatalf("base64 decode failed: %v", err)
}
combinedVal := uint32(combinedBytes[0])<<24 | uint32(combinedBytes[1])<<16 | uint32(combinedBytes[2])<<8 | uint32(combinedBytes[3])
if combinedVal != crcFull {
t.Errorf("CRC32c combine mismatch: got %x, want %x", combinedVal, crcFull)
}
}
func TestAddCRCChecksum_CRC64NVME(t *testing.T) {
data := []byte("this is a test buffer for crc64nvme")
mid := len(data) / 2
part1 := data[:mid]
part2 := data[mid:]
table := crc64NVMETable
crc1 := crc64.Checksum(part1, table)
crc2 := crc64.Checksum(part2, table)
crcFull := crc64.Checksum(data, table)
crc1b := []byte{
byte(crc1 >> 56), byte(crc1 >> 48), byte(crc1 >> 40), byte(crc1 >> 32),
byte(crc1 >> 24), byte(crc1 >> 16), byte(crc1 >> 8), byte(crc1),
}
crc2b := []byte{
byte(crc2 >> 56), byte(crc2 >> 48), byte(crc2 >> 40), byte(crc2 >> 32),
byte(crc2 >> 24), byte(crc2 >> 16), byte(crc2 >> 8), byte(crc2),
}
crc1b64 := base64.StdEncoding.EncodeToString(crc1b)
crc2b64 := base64.StdEncoding.EncodeToString(crc2b)
combined, err := AddCRCChecksum(types.ChecksumAlgorithmCrc64nvme, crc1b64, crc2b64, int64(len(part2)))
if err != nil {
t.Fatalf("AddCRCChecksum failed: %v", err)
}
combinedBytes, err := base64.StdEncoding.DecodeString(combined)
if err != nil {
t.Fatalf("base64 decode failed: %v", err)
}
combinedVal := uint64(combinedBytes[0])<<56 | uint64(combinedBytes[1])<<48 | uint64(combinedBytes[2])<<40 | uint64(combinedBytes[3])<<32 |
uint64(combinedBytes[4])<<24 | uint64(combinedBytes[5])<<16 | uint64(combinedBytes[6])<<8 | uint64(combinedBytes[7])
if combinedVal != crcFull {
t.Errorf("CRC64NVME combine mismatch: got %x, want %x", combinedVal, crcFull)
}
}

55
s3api/utils/logger.go Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"fmt"
"log"
"github.com/gofiber/fiber/v2"
)
func LogCtxDetails(ctx *fiber.Ctx, respBody []byte) {
isDebug, ok := ctx.Locals("isDebug").(bool)
_, notLogReqBody := ctx.Locals("logReqBody").(bool)
_, notLogResBody := ctx.Locals("logResBody").(bool)
if isDebug && ok {
// Log request body
if !notLogReqBody {
fmt.Println()
log.Printf("Request Body: %s", ctx.Request().Body())
}
// Log path parameters
fmt.Println()
log.Println("Path parameters: ")
for key, val := range ctx.AllParams() {
log.Printf("%s: %s", key, val)
}
// Log response headers
fmt.Println()
log.Println("Response Headers: ")
ctx.Response().Header.VisitAll(func(key, val []byte) {
log.Printf("%s: %s", key, val)
})
// Log response body
if !notLogResBody && len(respBody) > 0 {
fmt.Println()
log.Printf("Response body %s", ctx.Response().Body())
}
}
}

View File

@@ -1,24 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
func IsObjectNameValid(name string) bool {
switch clean(name) {
case "", ".", "..", "/":
return false
}
return isObjectLocal(name)
}

View File

@@ -1,171 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// code modified from golang std library src/internal/filepathlite/path.go
// to support path separator '/' for all platforms.
package utils
import (
"strings"
)
const separator = '/'
// isObjectLocal checks if the given path would result in an object
// that is local to the bucket.
func isObjectLocal(path string) bool {
if path == "" || path == "." {
return true
}
path = strings.Join([]string{".", path}, string(separator))
hasDots := false
for p := path; p != ""; {
var part string
part, p, _ = strings.Cut(p, "/")
if part == "." || part == ".." {
hasDots = true
break
}
}
if hasDots {
path = clean(path)
}
if path == ".." || strings.HasPrefix(path, "../") {
return false
}
return true
}
func clean(path string) string {
originalPath := path
if path == "" {
return originalPath + "."
}
rooted := isPathSeparator(path[0])
// Invariants:
// reading from path; r is index of next byte to process.
// writing to buf; w is index of next byte to write.
// dotdot is index in buf where .. must stop, either because
// it is the leading slash or it is a leading ../../.. prefix.
n := len(path)
out := lazybuf{path: path, volAndPath: originalPath, volLen: 0}
r, dotdot := 0, 0
if rooted {
out.append(separator)
r, dotdot = 1, 1
}
for r < n {
switch {
case isPathSeparator(path[r]):
// empty path element
r++
case path[r] == '.' && (r+1 == n || isPathSeparator(path[r+1])):
// . element
r++
case path[r] == '.' && path[r+1] == '.' && (r+2 == n || isPathSeparator(path[r+2])):
// .. element: remove to last separator
r += 2
switch {
case out.w > dotdot:
// can backtrack
out.w--
for out.w > dotdot && !isPathSeparator(out.index(out.w)) {
out.w--
}
case !rooted:
// cannot backtrack, but not rooted, so append .. element.
if out.w > 0 {
out.append(separator)
}
out.append('.')
out.append('.')
dotdot = out.w
}
default:
// real path element.
// add slash if needed
if rooted && out.w != 1 || !rooted && out.w != 0 {
out.append(separator)
}
// copy element
for ; r < n && !isPathSeparator(path[r]); r++ {
out.append(path[r])
}
}
}
// Turn empty string into "."
if out.w == 0 {
out.append('.')
}
return FromSlash(out.string())
}
func isPathSeparator(c uint8) bool {
return c == '/'
}
func FromSlash(path string) string {
if separator == '/' {
return path
}
return replaceStringByte(path, '/', separator)
}
func replaceStringByte(s string, old, new byte) string {
if strings.IndexByte(s, old) == -1 {
return s
}
n := []byte(s)
for i := range n {
if n[i] == old {
n[i] = new
}
}
return string(n)
}
// A lazybuf is a lazily constructed path buffer.
// It supports append, reading previously appended bytes,
// and retrieving the final string. It does not allocate a buffer
// to hold the output until that output diverges from s.
type lazybuf struct {
path string
buf []byte
w int
volAndPath string
volLen int
}
func (b *lazybuf) index(i int) byte {
if b.buf != nil {
return b.buf[i]
}
return b.path[i]
}
func (b *lazybuf) append(c byte) {
if b.buf == nil {
if b.w < len(b.path) && b.path[b.w] == c {
b.w++
return
}
b.buf = make([]byte, len(b.path))
copy(b.buf, b.path[:b.w])
}
b.buf[b.w] = c
b.w++
}
func (b *lazybuf) string() string {
if b.buf == nil {
return b.volAndPath[:b.volLen+b.w]
}
return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
}

View File

@@ -1,64 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils_test
import (
"testing"
"github.com/versity/versitygw/s3api/utils"
)
func TestIsObjectNameValid(t *testing.T) {
tests := []struct {
name string
input string
want bool
}{
// valid names
{"simple file", "file.txt", true},
{"nested file", "dir/file.txt", true},
{"absolute nested file", "/dir/file.txt", true},
{"trailing slash", "dir/", true},
{"slash prefix", "/file.txt", true}, // treated as local after joined with bucket
{"dot slash prefix", "./file.txt", true},
// invalid names
{"dot dot only", "..", false},
{"dot only", ".", false},
{"dot slash", "./", false},
{"dot slash dot dot", "./..", false},
{"cleans to dot", "./../.", false},
{"empty", "", false},
{"file escapes 1", "../file.txt", false},
{"file escapes 2", "dir/../../file.txt", false},
{"file escapes 3", "../../../file.txt", false},
{"dir escapes 1", "../dir/", false},
{"dir escapes 2", "dir/../../dir/", false},
{"dir escapes 3", "../../../dir/", false},
{"dot escapes 1", "../.", false},
{"dot escapes 2", "dir/../../.", false},
{"dot escapes 3", "../../../.", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := utils.IsObjectNameValid(tt.input)
if got != tt.want {
t.Errorf("%v: IsObjectNameValid(%q) = %v, want %v",
tt.name, tt.input, got, tt.want)
}
})
}
}

View File

@@ -180,7 +180,7 @@ func ParsePresignedURIParts(ctx *fiber.Ctx) (AuthData, error) {
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
}
if ContextKeyRegion.Get(ctx) != creds[2] {
if ctx.Locals("region") != creds[2] {
return a, s3err.APIError{
Code: "SignatureDoesNotMatch",
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", creds[2]),

View File

@@ -1,533 +0,0 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bufio"
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"math"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
)
// chunked uploads described in:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
const (
chunkHdrDelim = "\r\n"
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
awsV4 = "AWS4"
awsS3Service = "s3"
awsV4Request = "aws4_request"
trailerSignatureHeader = "x-amz-trailer-signature"
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
streamPayloadTrailerAlgo = "AWS4-HMAC-SHA256-TRAILER"
)
// ChunkReader reads from chunked upload request body, and returns
// object data stream
type ChunkReader struct {
r io.Reader
signingKey []byte
prevSig string
parsedSig string
chunkDataLeft int64
trailer checksumType
trailerSig string
parsedChecksum string
stash []byte
chunkHash hash.Hash
checksumHash hash.Hash
isEOF bool
isFirstHeader bool
region string
date time.Time
}
// NewChunkReader reads from request body io.Reader and parses out the
// chunk metadata in stream. The headers are validated for proper signatures.
// Reading from the chunk reader will read only the object data stream
// without the chunk headers/trailers.
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType) (io.Reader, error) {
chRdr := &ChunkReader{
r: r,
signingKey: getSigningKey(secret, region, date),
// the authdata.Signature is validated in the auth-reader,
// so we can use that here without any other checks
prevSig: authdata.Signature,
chunkHash: sha256.New(),
isFirstHeader: true,
date: date,
region: region,
trailer: chType,
}
if chType != "" {
checksumHasher, err := getHasher(chType)
if err != nil {
debuglogger.Logf("failed to initialize hash calculator: %v", err)
return nil, err
}
chRdr.checksumHash = checksumHasher
}
if chType == "" {
debuglogger.Infof("initializing signed chunk reader")
} else {
debuglogger.Infof("initializing signed chunk reader with '%v' trailing checksum", chType)
}
return chRdr, nil
}
// Read satisfies the io.Reader for this type
func (cr *ChunkReader) Read(p []byte) (int, error) {
n, err := cr.r.Read(p)
if err != nil && err != io.EOF {
return 0, err
}
cr.isEOF = err == io.EOF
if cr.chunkDataLeft < int64(n) {
chunkSize := cr.chunkDataLeft
if chunkSize > 0 {
cr.chunkHash.Write(p[:chunkSize])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:chunkSize])
}
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
n += int(chunkSize)
return n, err
}
cr.chunkDataLeft -= int64(n)
cr.chunkHash.Write(p[:n])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:n])
}
return n, err
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// This part is the same for all chunks,
// only the previous signature and hash of current chunk changes
func (cr *ChunkReader) getStringToSignPrefix(algo string) string {
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
cr.date.Format("20060102"),
cr.region,
awsS3Service,
awsV4Request)
return fmt.Sprintf("%s\n%s\n%s",
algo,
cr.date.Format("20060102T150405Z"),
credentialScope)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// signature For each chunk, you calculate the signature using the following
// string to sign. For the first chunk, you use the seed-signature as the
// previous signature.
func (cr *ChunkReader) getChunkStringToSign() string {
prefix := cr.getStringToSignPrefix(streamPayloadAlgo)
chunkHash := cr.chunkHash.Sum(nil)
strToSign := fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
cr.prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "STRING TO SIGN", strToSign, 64)
return strToSign
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
// Builds the final chunk trailing signature string to sign
func (cr *ChunkReader) getTrailerChunkStringToSign() string {
trailer := fmt.Sprintf("%v:%v\n", cr.trailer, cr.parsedChecksum)
hsh := sha256.Sum256([]byte(trailer))
sig := hex.EncodeToString(hsh[:])
prefix := cr.getStringToSignPrefix(streamPayloadTrailerAlgo)
strToSign := fmt.Sprintf("%s\n%s\n%s",
prefix,
cr.prevSig,
sig,
)
debuglogger.PrintInsideHorizontalBorders(debuglogger.Purple, "TRAILER STRING TO SIGN", strToSign, 64)
return strToSign
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
// Calculates and validates the final chunk trailer signature
func (cr *ChunkReader) verifyTrailerSignature() error {
strToSign := cr.getTrailerChunkStringToSign()
sig := hex.EncodeToString(hmac256(cr.signingKey, []byte(strToSign)))
if sig != cr.trailerSig {
debuglogger.Logf("incorrect trailing signature: (calculated): %v, (got): %v", sig, cr.trailerSig)
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return nil
}
// Verifies the object checksum
func (cr *ChunkReader) verifyChecksum() error {
checksumHash := cr.checksumHash.Sum(nil)
checksum := base64.StdEncoding.EncodeToString(checksumHash)
if checksum != cr.parsedChecksum {
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-")))
debuglogger.Logf("incorrect trailing checksum: (calculated): %v, (got): %v", checksum, cr.parsedChecksum)
return s3err.GetChecksumBadDigestErr(algo)
}
return nil
}
// Calculates and verifies the chunk signature
func (cr *ChunkReader) checkSignature() error {
sigstr := cr.getChunkStringToSign()
cr.chunkHash.Reset()
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.prevSig != cr.parsedSig {
debuglogger.Logf("incorrect signature: (calculated): %v, (got) %v", cr.prevSig, cr.parsedSig)
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
cr.parsedSig = ""
return nil
}
// The provided p should have all of the previous chunk data and trailer
// consumed already. The positioning here is expected that p[0] starts the
// new chunk size with the ";chunk-signature=" following. The only exception
// is if we started consuming the trailer, but hit the end of the read buffer.
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
// finish consuming the final trailer bytes.
// This parses the chunk metadata in situ without allocating an extra buffer.
// It will just read and validate the chunk metadata and then move the
// following chunk data to overwrite the metadata in the provided buffer.
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
n := len(p)
if cr.parsedSig != "" {
err := cr.checkSignature()
if err != nil {
return 0, err
}
}
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
debuglogger.Logf("failed to parse chunk headers: %v", err)
return 0, err
}
cr.parsedSig = sig
// If we hit the final chunk, calculate and validate the final
// chunk signature and finish reading
if chunkSize == 0 {
debuglogger.Infof("final chunk parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
cr.chunkHash.Reset()
err := cr.checkSignature()
if err != nil {
return 0, err
}
if cr.trailer != "" {
debuglogger.Infof("final chunk trailers parsed:\nchecksum: %v\ntrailing signature: %v", cr.parsedChecksum, cr.trailerSig)
err := cr.verifyChecksum()
if err != nil {
return 0, err
}
err = cr.verifyTrailerSignature()
if err != nil {
return 0, err
}
}
return 0, io.EOF
}
debuglogger.Infof("chunk headers parsed:\nchunk size: %v\nsignature: %v\nbuffer offset: %v", chunkSize, sig, bufOffset)
// move data up to remove chunk header
copy(p, p[bufOffset:n])
n -= bufOffset
// if remaining buffer larger than chunk data,
// parse next header in buffer
if int64(n) > chunkSize {
cr.chunkDataLeft = 0
cr.chunkHash.Write(p[:chunkSize])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:chunkSize])
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
debuglogger.Logf("exceeding the limit of maximum integer allowed: (value): %v, (limit): %v", chunkSize+int64(n), math.MaxInt)
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
}
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:n])
}
return n, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
// Task 3: Calculate Signature
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
func getSigningKey(secret, region string, date time.Time) []byte {
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
debuglogger.Infof("signing key: %s", hex.EncodeToString(signingKey))
return signingKey
}
func hmac256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
var (
errInvalidChunkFormat = errors.New("invalid chunk header format")
errskipHeader = errors.New("skip to next header")
)
const (
maxHeaderSize = 1024
)
// This returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
stashLen := len(cr.stash)
if stashLen > maxHeaderSize {
debuglogger.Logf("the stash length exceeds the maximum allowed chunk header size: (stash len): %v, (header limit): %v", stashLen, maxHeaderSize)
return 0, "", 0, errInvalidChunkFormat
}
if cr.stash != nil {
debuglogger.Logf("recovering the stash: (stash len): %v", stashLen)
tmp := make([]byte, stashLen+len(header))
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
header = tmp
cr.stash = nil
}
rdr := bufio.NewReader(bytes.NewReader(header))
// After the first chunk each chunk header should start
// with "\n\r\n"
if !cr.isFirstHeader {
err := readAndSkip(rdr, '\r', '\n')
if err != nil {
debuglogger.Logf("failed to read chunk header first 2 bytes: (should be): \\r\\n, (got): %q", header[:2])
return cr.handleRdrErr(err, header)
}
}
// read and parse the chunk size
chunkSizeStr, err := readAndTrim(rdr, ';')
if err != nil {
debuglogger.Logf("failed to read chunk size: %v", err)
return cr.handleRdrErr(err, header)
}
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
if err != nil {
debuglogger.Logf("failed to parse chunk size: (size): %v, (err): %v", chunkSizeStr, err)
return 0, "", 0, errInvalidChunkFormat
}
// read the chunk signature
err = readAndSkip(rdr, 'c', 'h', 'u', 'n', 'k', '-', 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', '=')
if err != nil {
debuglogger.Logf("failed to read 'chunk-signature=': %v", err)
return cr.handleRdrErr(err, header)
}
sig, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read '\\r', after chunk signature: %v", err)
return cr.handleRdrErr(err, header)
}
// read and parse the final chunk trailer and checksum
if chunkSize == 0 {
if cr.trailer != "" {
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n before the trailer: %v", err)
return cr.handleRdrErr(err, header)
}
// parse and validate the trailing header
trailer, err := readAndTrim(rdr, ':')
if err != nil {
debuglogger.Logf("failed to read trailer prefix: %v", err)
return cr.handleRdrErr(err, header)
}
if trailer != string(cr.trailer) {
debuglogger.Logf("incorrect trailer prefix: (expected): %v, (got): %v", cr.trailer, trailer)
return 0, "", 0, errInvalidChunkFormat
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(trailer, "x-amz-checksum-")))
// parse the checksum
checksum, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read checksum value: %v", err)
return cr.handleRdrErr(err, header)
}
if !IsValidChecksum(checksum, algo) {
debuglogger.Logf("invalid checksum value: %v", checksum)
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
}
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n after checksum: %v", err)
return cr.handleRdrErr(err, header)
}
// parse the trailing signature
trailerSigPrefix, err := readAndTrim(rdr, ':')
if err != nil {
debuglogger.Logf("failed to read trailing signature prefix: %v", err)
return cr.handleRdrErr(err, header)
}
if trailerSigPrefix != trailerSignatureHeader {
debuglogger.Logf("invalid trailing signature prefix: (expected): %v, (got): %v", trailerSignatureHeader, trailerSigPrefix)
return 0, "", 0, errInvalidChunkFormat
}
trailerSig, err := readAndTrim(rdr, '\r')
if err != nil {
debuglogger.Logf("failed to read trailing signature: %v", err)
return cr.handleRdrErr(err, header)
}
cr.trailerSig = trailerSig
cr.parsedChecksum = checksum
}
// "\r\n\r\n" is followed after the last chunk
err = readAndSkip(rdr, '\n', '\r', '\n')
if err != nil {
debuglogger.Logf("failed to read \\n\\r\\n at the end of chunk header: %v", err)
return cr.handleRdrErr(err, header)
}
return 0, sig, 0, nil
}
err = readAndSkip(rdr, '\n')
if err != nil {
debuglogger.Logf("failed to read \\n at the end of chunk header: %v", err)
return cr.handleRdrErr(err, header)
}
// find the index of chunk ending: '\r\n'
// skip the first 2 bytes as it is the starting '\r\n'
// the first chunk doesn't contain the starting '\r\n', but
// anyway, trimming the first 2 bytes doesn't pollute the logic.
ind := bytes.Index(header[2:], []byte{'\r', '\n'})
cr.isFirstHeader = false
// the offset is the found index + 4 - the stash length
// where:
// ind is the index of '\r\n'
// 4 specifies the trimmed 2 bytes plus 2 to shift the index at the end of '\r\n'
offset := ind + 4 - stashLen
return chunkSize, sig, offset, nil
}
// Stashes the header in cr.stash and returns "errskipHeader"
func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, error) {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
debuglogger.Logf("stashing the header: (header length): %v", len(header))
return 0, "", 0, errskipHeader
}
// Returns "errInvalidChunkFormat" if the passed err is "io.EOF" and cr.rdr EOF is reached
// calls "cr.stashAndSkipHeader" if the passed err is "io.EOF" and cr.isEOF is false
// Returns the error otherwise
func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, int, error) {
if err == io.EOF {
if cr.isEOF {
debuglogger.Logf("incomplete chunk encoding, EOF reached")
return 0, "", 0, errInvalidChunkFormat
}
return cr.stashAndSkipHeader(header)
}
return 0, "", 0, err
}
// reads data from the "rdr" and validates the passed data bytes
func readAndSkip(rdr *bufio.Reader, data ...byte) error {
for _, d := range data {
b, err := rdr.ReadByte()
if err != nil {
return err
}
if b != d {
return errMalformedEncoding
}
}
return nil
}
// reads string by "delim" and trims the delimiter at the end
func readAndTrim(r *bufio.Reader, delim byte) (string, error) {
str, err := r.ReadString(delim)
if err != nil {
return "", err
}
return strings.TrimSuffix(str, string(delim)), nil
}

View File

@@ -1,252 +0,0 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bufio"
"bytes"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"hash"
"hash/crc32"
"hash/crc64"
"io"
"math/bits"
"strconv"
"strings"
"github.com/versity/versitygw/s3api/debuglogger"
)
var (
trailerDelim = []byte{'\n', '\r', '\n'}
errMalformedEncoding = errors.New("malformed chunk encoding")
)
type UnsignedChunkReader struct {
reader *bufio.Reader
checksumType checksumType
expectedChecksum string
hasher hash.Hash
stash []byte
offset int
}
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
hasher, err := getHasher(ct)
if err != nil {
debuglogger.Logf("failed to initialize hash calculator: %v", err)
return nil, err
}
debuglogger.Infof("initializing unsigned chunk reader")
return &UnsignedChunkReader{
reader: bufio.NewReader(r),
checksumType: ct,
stash: make([]byte, 0),
hasher: hasher,
}, nil
}
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// First read any stashed data
if len(ucr.stash) != 0 {
debuglogger.Infof("recovering the stash: (stash length): %v", len(ucr.stash))
n := copy(p, ucr.stash)
ucr.offset += n
if n < len(ucr.stash) {
ucr.stash = ucr.stash[n:]
ucr.offset = 0
return n, nil
}
}
for {
// Read the chunk size
chunkSize, err := ucr.extractChunkSize()
if err != nil {
return 0, err
}
if chunkSize == 0 {
// Stop reading parsing payloads as 0 sized chunk is reached
break
}
rdr := io.TeeReader(ucr.reader, ucr.hasher)
payload := make([]byte, chunkSize)
// Read and cache the payload
_, err = io.ReadFull(rdr, payload)
if err != nil {
debuglogger.Logf("failed to read chunk data: %v", err)
return 0, err
}
// Skip the trailing "\r\n"
if err := ucr.readAndSkip('\r', '\n'); err != nil {
debuglogger.Logf("failed to read trailing \\r\\n after chunk data: %v", err)
return 0, err
}
// Copy the payload into the io.Reader buffer
n := copy(p[ucr.offset:], payload)
ucr.offset += n
if int64(n) < chunkSize {
// stash the remaining data
ucr.stash = payload[n:]
debuglogger.Infof("stashing the remaining data: (stash length): %v", len(ucr.stash))
dataRead := ucr.offset
ucr.offset = 0
return dataRead, nil
}
}
// Read and validate trailers
if err := ucr.readTrailer(); err != nil {
debuglogger.Logf("failed to read trailer: %v", err)
return 0, err
}
return ucr.offset, io.EOF
}
// Reads and validates the bytes provided from the underlying io.Reader
func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
for _, d := range data {
b, err := ucr.reader.ReadByte()
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if b != d {
return errMalformedEncoding
}
}
return nil
}
// Extracts the chunk size from the payload
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
line, err := ucr.reader.ReadString('\n')
if err != nil {
debuglogger.Logf("failed to parse chunk size: %v", err)
return 0, errMalformedEncoding
}
line = strings.TrimSpace(line)
chunkSize, err := strconv.ParseInt(line, 16, 64)
if err != nil {
debuglogger.Logf("failed to convert chunk size: %v", err)
return 0, errMalformedEncoding
}
debuglogger.Infof("chunk size extracted: %v", chunkSize)
return chunkSize, nil
}
// Reads and validates the trailer at the end
func (ucr *UnsignedChunkReader) readTrailer() error {
var trailerBuffer bytes.Buffer
for {
v, err := ucr.reader.ReadByte()
if err != nil {
debuglogger.Logf("failed to read byte: %v", err)
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if v != '\r' {
trailerBuffer.WriteByte(v)
continue
}
var tmp [3]byte
_, err = io.ReadFull(ucr.reader, tmp[:])
if err != nil {
debuglogger.Logf("failed to read chunk ending: \\n\\r\\n: %v", err)
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if !bytes.Equal(tmp[:], trailerDelim) {
debuglogger.Logf("incorrect trailer delimiter: (expected): \\n\\r\\n, (got): %q", tmp[:])
return errMalformedEncoding
}
break
}
// Parse the trailer
trailerHeader := trailerBuffer.String()
trailerHeader = strings.TrimSpace(trailerHeader)
trailerHeaderParts := strings.Split(trailerHeader, ":")
if len(trailerHeaderParts) != 2 {
debuglogger.Logf("invalid trailer header parts: %v", trailerHeaderParts)
return errMalformedEncoding
}
if trailerHeaderParts[0] != string(ucr.checksumType) {
debuglogger.Logf("invalid checksum type: %v", trailerHeaderParts[0])
//TODO: handle the error
return errMalformedEncoding
}
ucr.expectedChecksum = trailerHeaderParts[1]
debuglogger.Infof("parsed the trailing header:\n%v:%v", trailerHeaderParts[0], trailerHeaderParts[1])
// Validate checksum
return ucr.validateChecksum()
}
// Validates the trailing checksum sent at the end
func (ucr *UnsignedChunkReader) validateChecksum() error {
csum := ucr.hasher.Sum(nil)
checksum := base64.StdEncoding.EncodeToString(csum)
if checksum != ucr.expectedChecksum {
debuglogger.Logf("incorrect checksum: (expected): %v, (got): %v", ucr.expectedChecksum, checksum)
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
}
return nil
}
// Retruns the hash calculator based on the hash type provided
func getHasher(ct checksumType) (hash.Hash, error) {
switch ct {
case checksumTypeCrc32:
return crc32.NewIEEE(), nil
case checksumTypeCrc32c:
return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
case checksumTypeCrc64nvme:
table := crc64.MakeTable(bits.Reverse64(0xad93d23594c93659))
return crc64.New(table), nil
case checksumTypeSha1:
return sha1.New(), nil
case checksumTypeSha256:
return sha256.New(), nil
default:
return nil, errors.New("unsupported checksum type")
}
}

View File

@@ -16,8 +16,6 @@ package utils
import (
"bytes"
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"io"
@@ -29,9 +27,9 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/encoding/httpbinding"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -44,14 +42,14 @@ var (
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
metadata = make(map[string]string)
headers.DisableNormalizing()
for key, value := range headers.AllInOrder() {
headers.VisitAllInOrder(func(key, value []byte) {
hKey := string(key)
if strings.HasPrefix(strings.ToLower(hKey), "x-amz-meta-") {
trimmedKey := hKey[11:]
headerValue := string(value)
metadata[trimmedKey] = headerValue
}
}
})
headers.EnableNormalizing()
return
@@ -66,20 +64,18 @@ func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength
body = bytes.NewReader(req.Body())
}
uri := ctx.OriginalURL()
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
httpReq, err := http.NewRequest(string(req.Header.Method()), string(ctx.Context().RequestURI()), body)
if err != nil {
return nil, errors.New("error in creating an http request")
}
// Set the request headers
for key, value := range req.Header.All() {
req.Header.VisitAll(func(key, value []byte) {
keyStr := string(key)
if includeHeader(keyStr, signedHdrs) {
httpReq.Header.Add(keyStr, string(value))
}
}
})
// make sure all headers in the signed headers are present
for _, header := range signedHdrs {
@@ -121,10 +117,11 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
body = bytes.NewReader(req.Body())
}
uri, _, _ := strings.Cut(ctx.OriginalURL(), "?")
uri := string(ctx.Request().URI().Path())
uri = httpbinding.EscapePath(uri, false)
isFirst := true
for key, value := range ctx.Request().URI().QueryArgs().All() {
ctx.Request().URI().QueryArgs().VisitAll(func(key, value []byte) {
_, ok := signedQueryArgs[string(key)]
if !ok {
escapeValue := url.QueryEscape(string(value))
@@ -135,19 +132,19 @@ func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, cont
uri += fmt.Sprintf("&%s=%s", key, escapeValue)
}
}
}
})
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
if err != nil {
return nil, errors.New("error in creating an http request")
}
// Set the request headers
for key, value := range req.Header.All() {
req.Header.VisitAll(func(key, value []byte) {
keyStr := string(key)
if includeHeader(keyStr, signedHdrs) {
httpReq.Header.Add(keyStr, string(value))
}
}
})
// Check if Content-Length in signed headers
// If content length is non 0, then the header will be included
@@ -175,17 +172,9 @@ func ParseUint(str string) (int32, error) {
if str == "" {
return 1000, nil
}
num, err := strconv.ParseInt(str, 10, 32)
num, err := strconv.ParseUint(str, 10, 16)
if err != nil {
debuglogger.Logf("invalid intager provided: %v\n", err)
return 1000, fmt.Errorf("invalid int: %w", err)
}
if num < 0 {
debuglogger.Logf("negative intager provided: %v\n", num)
return 1000, fmt.Errorf("negative uint: %v", num)
}
if num > 1000 {
num = 1000
return 1000, s3err.GetAPIError(s3err.ErrInvalidMaxKeys)
}
return int32(num), nil
}
@@ -201,27 +190,17 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
}
}
// Streams the response body by chunks
func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
// SetBodyStream will call Close() on the reader when the stream is done
// since rdr is a ReadCloser
ctx.Context().SetBodyStream(rdr, bodysize)
}
func IsValidBucketName(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
debuglogger.Logf("bucket name length should be in 3-63 range, got: %v\n", len(bucket))
return false
}
// Checks to contain only digits, lowercase letters, dot, hyphen.
// Checks to start and end with only digits and lowercase letters.
if !bucketNameRegexp.MatchString(bucket) {
debuglogger.Logf("invalid bucket name: %v\n", bucket)
return false
}
// Checks not to be a valid IP address
if bucketNameIpRegexp.MatchString(bucket) {
debuglogger.Logf("bucket name is an ip address: %v\n", bucket)
return false
}
return true
@@ -269,62 +248,35 @@ func ParseDeleteObjects(objs []types.ObjectIdentifier) (result []string) {
return
}
func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, output s3response.GetObjectAttributesResponse) s3response.GetObjectAttributesResponse {
// These properties shouldn't appear in the final response body
output.LastModified = nil
output.VersionId = nil
output.DeleteMarker = nil
if _, ok := attrs[s3response.ObjectAttributesEtag]; !ok {
func FilterObjectAttributes(attrs map[types.ObjectAttributes]struct{}, output s3response.GetObjectAttributesResult) s3response.GetObjectAttributesResult {
if _, ok := attrs[types.ObjectAttributesEtag]; !ok {
output.ETag = nil
}
if _, ok := attrs[s3response.ObjectAttributesObjectParts]; !ok {
if _, ok := attrs[types.ObjectAttributesObjectParts]; !ok {
output.ObjectParts = nil
}
if _, ok := attrs[s3response.ObjectAttributesObjectSize]; !ok {
if _, ok := attrs[types.ObjectAttributesObjectSize]; !ok {
output.ObjectSize = nil
}
if _, ok := attrs[s3response.ObjectAttributesStorageClass]; !ok {
if _, ok := attrs[types.ObjectAttributesStorageClass]; !ok {
output.StorageClass = ""
}
if _, ok := attrs[s3response.ObjectAttributesChecksum]; !ok {
output.Checksum = nil
}
return output
}
func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]struct{}, error) {
attrs := map[s3response.ObjectAttributes]struct{}{}
var err error
for key, value := range ctx.Request().Header.All() {
func ParseObjectAttributes(ctx *fiber.Ctx) map[types.ObjectAttributes]struct{} {
attrs := map[types.ObjectAttributes]struct{}{}
ctx.Request().Header.VisitAll(func(key, value []byte) {
if string(key) == "X-Amz-Object-Attributes" {
if len(value) == 0 {
break
}
oattrs := strings.Split(string(value), ",")
for _, a := range oattrs {
attr := s3response.ObjectAttributes(a)
if !attr.IsValid() {
debuglogger.Logf("invalid object attribute: %v\n", attr)
err = s3err.GetAPIError(s3err.ErrInvalidObjectAttributes)
break
}
attrs[attr] = struct{}{}
attrs[types.ObjectAttributes(a)] = struct{}{}
}
}
}
})
if err != nil {
return nil, err
}
if len(attrs) == 0 {
debuglogger.Logf("empty get object attributes")
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
}
return attrs, nil
return attrs
}
type objLockCfg struct {
@@ -339,7 +291,6 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
objLockDate := ctx.Get("X-Amz-Object-Lock-Retain-Until-Date")
if (objLockDate != "" && objLockModeHdr == "") || (objLockDate == "" && objLockModeHdr != "") {
debuglogger.Logf("one of 2 required params is missing: (lock date): %v, (lock mode): %v\n", objLockDate, objLockModeHdr)
return nil, s3err.GetAPIError(s3err.ErrObjectLockInvalidHeaders)
}
@@ -347,11 +298,9 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
if objLockDate != "" {
rDate, err := time.Parse(time.RFC3339, objLockDate)
if err != nil {
debuglogger.Logf("failed to parse retain until date: %v\n", err)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if rDate.Before(time.Now()) {
debuglogger.Logf("expired retain until date: %v\n", rDate.Format(time.RFC3339))
return nil, s3err.GetAPIError(s3err.ErrPastObjectLockRetainDate)
}
retainUntilDate = rDate
@@ -362,15 +311,13 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
if objLockMode != "" &&
objLockMode != types.ObjectLockModeCompliance &&
objLockMode != types.ObjectLockModeGovernance {
debuglogger.Logf("invalid object lock mode: %v\n", objLockMode)
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectLockMode)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
legalHold := types.ObjectLockLegalHoldStatus(legalHoldHdr)
if legalHold != "" && legalHold != types.ObjectLockLegalHoldStatusOff && legalHold != types.ObjectLockLegalHoldStatusOn {
debuglogger.Logf("invalid object lock legal hold status: %v\n", legalHold)
return nil, s3err.GetAPIError(s3err.ErrInvalidLegalHoldStatus)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
return &objLockCfg{
@@ -389,278 +336,6 @@ func IsValidOwnership(val types.ObjectOwnership) bool {
case types.ObjectOwnershipObjectWriter:
return true
default:
debuglogger.Logf("invalid object ownership: %v\n", val)
return false
}
}
type ChecksumValues map[types.ChecksumAlgorithm]string
// Headers concatinates checksum algorithm by prefixing each
// with 'x-amz-checksum-'
// e.g.
// "x-amz-checksum-crc64nvme, x-amz-checksum-sha1"
func (cv ChecksumValues) Headers() string {
result := ""
isFirst := false
for key := range cv {
if !isFirst {
result += ", "
}
result += fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(key)))
}
return result
}
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, ChecksumValues, error) {
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
err := IsChecksumAlgorithmValid(sdkAlgorithm)
if err != nil {
debuglogger.Logf("invalid checksum algorithm: %v\n", sdkAlgorithm)
return "", nil, err
}
checksums := ChecksumValues{}
var hdrErr error
// Parse and validate checksum headers
for key, value := range ctx.Request().Header.All() {
// Skip `X-Amz-Checksum-Type` as it's a special header
if !strings.HasPrefix(string(key), "X-Amz-Checksum-") || string(key) == "X-Amz-Checksum-Type" {
continue
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
err := IsChecksumAlgorithmValid(algo)
if err != nil {
debuglogger.Logf("invalid checksum header: %s\n", key)
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
break
}
checksums[algo] = string(value)
}
if hdrErr != nil {
return sdkAlgorithm, nil, hdrErr
}
if len(checksums) > 1 {
debuglogger.Logf("multiple checksum headers provided: %v\n", checksums.Headers())
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
for al, val := range checksums {
if !IsValidChecksum(val, al) {
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
}
// If any other checksum value is provided,
// rather than x-amz-sdk-checksum-algorithm
if sdkAlgorithm != "" && sdkAlgorithm != al {
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
sdkAlgorithm = al
}
return sdkAlgorithm, checksums, nil
}
var checksumLengths = map[types.ChecksumAlgorithm]int{
types.ChecksumAlgorithmCrc32: 4,
types.ChecksumAlgorithmCrc32c: 4,
types.ChecksumAlgorithmCrc64nvme: 8,
types.ChecksumAlgorithmSha1: 20,
types.ChecksumAlgorithmSha256: 32,
}
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
decoded, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
debuglogger.Logf("failed to parse checksum base64: %v\n", err)
return false
}
expectedLength, exists := checksumLengths[algorithm]
if !exists {
debuglogger.Logf("unknown checksum algorithm: %v\n", algorithm)
return false
}
isValid := len(decoded) == expectedLength
if !isValid {
debuglogger.Logf("decoded checksum length: (expected): %v, (got): %v\n", expectedLength, len(decoded))
}
return isValid
}
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
alg = types.ChecksumAlgorithm(strings.ToUpper(string(alg)))
if alg != "" &&
alg != types.ChecksumAlgorithmCrc32 &&
alg != types.ChecksumAlgorithmCrc32c &&
alg != types.ChecksumAlgorithmSha1 &&
alg != types.ChecksumAlgorithmSha256 &&
alg != types.ChecksumAlgorithmCrc64nvme {
debuglogger.Logf("invalid checksum algorithm: %v\n", alg)
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
}
return nil
}
// Validates the provided checksum type
func IsChecksumTypeValid(t types.ChecksumType) error {
if t != "" &&
t != types.ChecksumTypeComposite &&
t != types.ChecksumTypeFullObject {
debuglogger.Logf("invalid checksum type: %v\n", t)
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
}
return nil
}
type checksumTypeSchema map[types.ChecksumType]struct{}
type checksumSchema map[types.ChecksumAlgorithm]checksumTypeSchema
// A table defining the checksum algorithm/type support
var checksumMap checksumSchema = checksumSchema{
types.ChecksumAlgorithmCrc32: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmCrc32c: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmSha1: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmSha256: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmCrc64nvme: checksumTypeSchema{
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
// Both could be empty
"": checksumTypeSchema{
"": struct{}{},
},
}
// Checks if checksum type and algorithm are supported together
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType) error {
typeSchema := checksumMap[algo]
_, ok := typeSchema[t]
if !ok {
debuglogger.Logf("checksum type and algorithm mismatch: (type): %v, (algorithm): %v\n", t, algo)
return s3err.GetChecksumSchemaMismatchErr(algo, t)
}
return nil
}
// Parses and validates the x-amz-checksum-algorithm and x-amz-checksum-type headers
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, types.ChecksumType, error) {
algo := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
if err := IsChecksumAlgorithmValid(algo); err != nil {
return "", "", err
}
chType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
if err := IsChecksumTypeValid(chType); err != nil {
return "", "", err
}
// Verify if checksum algorithm is provided, if
// checksum type is specified
if chType != "" && algo == "" {
debuglogger.Logf("checksum type can only be used with checksum algorithm: (type): %v\n", chType)
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
}
// Verify if the checksum type is supported for
// the provided checksum algorithm
if err := checkChecksumTypeAndAlgo(algo, chType); err != nil {
return algo, chType, err
}
// x-amz-checksum-type defaults to COMPOSITE
// if x-amz-checksum-algorithm is set except
// for the CRC64NVME algorithm: it defaults to FULL_OBJECT
if algo != "" && chType == "" {
if algo == types.ChecksumAlgorithmCrc64nvme {
chType = types.ChecksumTypeFullObject
} else {
chType = types.ChecksumTypeComposite
}
}
return algo, chType, nil
}
// TagLimit specifies the allowed tag count in a tag set
type TagLimit int
const (
// Tag limit for bucket tagging
TagLimitBucket TagLimit = 50
// Tag limit for object tagging
TagLimitObject TagLimit = 10
)
// Parses and validates tagging
func ParseTagging(data []byte, limit TagLimit) (map[string]string, error) {
var tagging s3response.TaggingInput
err := xml.Unmarshal(data, &tagging)
if err != nil {
debuglogger.Logf("invalid taggging: %s", data)
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
}
tLen := len(tagging.TagSet.Tags)
if tLen > int(limit) {
switch limit {
case TagLimitObject:
debuglogger.Logf("bucket tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrObjectTaggingLimited)
case TagLimitBucket:
debuglogger.Logf("object tagging length exceeds %v: %v", limit, tLen)
return nil, s3err.GetAPIError(s3err.ErrBucketTaggingLimited)
}
}
tagSet := make(map[string]string, tLen)
for _, tag := range tagging.TagSet.Tags {
// validate tag key
if len(tag.Key) == 0 || len(tag.Key) > 128 {
debuglogger.Logf("tag key should 0 < tag.Key <= 128, key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagKey)
}
// validate tag value
if len(tag.Value) > 256 {
debuglogger.Logf("invalid long tag value: (length): %v, (value): %v", len(tag.Value), tag.Value)
return nil, s3err.GetAPIError(s3err.ErrInvalidTagValue)
}
// make sure there are no duplicate keys
_, ok := tagSet[tag.Key]
if ok {
debuglogger.Logf("duplicate tag key: %v", tag.Key)
return nil, s3err.GetAPIError(s3err.ErrDuplicateTagKey)
}
tagSet[tag.Key] = tag.Value
}
return tagSet, nil
}

Some files were not shown because too many files have changed in this diff Show More