mirror of
https://github.com/versity/versitygw.git
synced 2026-01-24 20:12:01 +00:00
Compare commits
1 Commits
v0.14
...
proxy-test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27a8aa66d9 |
@@ -1,46 +0,0 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
cmd/versitygw/versitygw
|
||||
/versitygw
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# ignore IntelliJ directories
|
||||
.idea
|
||||
|
||||
# auto generated VERSION file
|
||||
VERSION
|
||||
|
||||
# build output
|
||||
/versitygw.spec
|
||||
/versitygw.spec.in
|
||||
*.tar
|
||||
*.tar.gz
|
||||
**/rand.data
|
||||
/profile.txt
|
||||
|
||||
dist/
|
||||
|
||||
# Release config files
|
||||
/.github
|
||||
|
||||
# Docker configuration files
|
||||
*Dockerfile
|
||||
/docker-compose.yml
|
||||
|
||||
# read files
|
||||
/LICENSE
|
||||
/NOTICE
|
||||
/CODE_OF_CONDUCT.md
|
||||
/README.md
|
||||
8
.env.dev
8
.env.dev
@@ -1,8 +0,0 @@
|
||||
POSIX_PORT=7071
|
||||
PROXY_PORT=7070
|
||||
ACCESS_KEY_ID=user
|
||||
SECRET_ACCESS_KEY=pass
|
||||
IAM_DIR=.
|
||||
SETUP_DIR=.
|
||||
AZ_ACCOUNT_NAME=devstoreaccount1
|
||||
AZ_ACCOUNT_KEY=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
|
||||
45
.github/workflows/docker.yaml
vendored
45
.github/workflows/docker.yaml
vendored
@@ -1,45 +0,0 @@
|
||||
name: Publish Docker image
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
push_to_registries:
|
||||
name: Push Docker image to multiple registries
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
versity/versitygw
|
||||
ghcr.io/${{ github.repository }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
42
.github/workflows/system.yml
vendored
42
.github/workflows/system.yml
vendored
@@ -1,42 +0,0 @@
|
||||
name: system tests
|
||||
on: pull_request
|
||||
jobs:
|
||||
build:
|
||||
name: RunTests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install ShellCheck
|
||||
run: sudo apt-get install shellcheck
|
||||
|
||||
- name: Run ShellCheck
|
||||
run: shellcheck -S warning ./tests/*.sh
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
- name: Get Dependencies
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Install BATS
|
||||
run: |
|
||||
git clone https://github.com/bats-core/bats-core.git
|
||||
cd bats-core && ./install.sh $HOME
|
||||
|
||||
- name: Build and Run
|
||||
run: |
|
||||
make testbin
|
||||
export AWS_ACCESS_KEY_ID=user
|
||||
export AWS_SECRET_ACCESS_KEY=pass
|
||||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity
|
||||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity
|
||||
export VERSITY_EXE=./versitygw
|
||||
mkdir /tmp/gw
|
||||
VERSITYGW_TEST_ENV=$GITHUB_WORKSPACE/tests/.env.default $HOME/bin/bats ./tests/s3_bucket_tests.sh
|
||||
VERSITYGW_TEST_ENV=$GITHUB_WORKSPACE/tests/.env.default $HOME/bin/bats ./tests/posix_tests.sh
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -25,9 +25,6 @@ go.work
|
||||
# ignore IntelliJ directories
|
||||
.idea
|
||||
|
||||
# ignore VS code directories
|
||||
.vscode
|
||||
|
||||
# auto generated VERSION file
|
||||
VERSION
|
||||
|
||||
@@ -39,10 +36,3 @@ VERSION
|
||||
/profile.txt
|
||||
|
||||
dist/
|
||||
|
||||
# secrets file for local github-actions testing
|
||||
.secrets
|
||||
|
||||
# env files for testing
|
||||
.env*
|
||||
!.env.default
|
||||
|
||||
@@ -9,10 +9,6 @@ builds:
|
||||
# windows is untested, we can start doing windows releases
|
||||
# if someone is interested in taking on testing
|
||||
# - windows
|
||||
env:
|
||||
# disable cgo to fix glibc issues: https://github.com/golang/go/issues/58550
|
||||
# once we need to enable this, we will need to do per distro releases
|
||||
- CGO_ENABLED=0
|
||||
main: ./cmd/versitygw
|
||||
binary: ./cmd/versitygw
|
||||
id: versitygw
|
||||
@@ -27,7 +23,6 @@ archives:
|
||||
# this name template makes the OS and Arch compatible with the results of uname.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
v{{ .Version }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
|
||||
25
Dockerfile
25
Dockerfile
@@ -1,25 +0,0 @@
|
||||
FROM golang:latest
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY go.mod ./
|
||||
RUN go mod download
|
||||
|
||||
COPY ./ ./
|
||||
|
||||
WORKDIR /app/cmd/versitygw
|
||||
ENV CGO_ENABLED=0
|
||||
RUN go build -o versitygw
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
# These arguments can be overriden when building the image
|
||||
ARG IAM_DIR=/tmp/vgw
|
||||
ARG SETUP_DIR=/tmp/vgw
|
||||
|
||||
RUN mkdir -p $IAM_DIR
|
||||
RUN mkdir -p $SETUP_DIR
|
||||
|
||||
COPY --from=0 /app/cmd/versitygw/versitygw /app/versitygw
|
||||
|
||||
ENTRYPOINT [ "/app/versitygw" ]
|
||||
@@ -1,17 +0,0 @@
|
||||
FROM golang:1.20
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY go.mod ./
|
||||
RUN go mod download
|
||||
|
||||
COPY ./ ./
|
||||
|
||||
ARG IAM_DIR=/tmp/vgw
|
||||
ARG SETUP_DIR=/tmp/vgw
|
||||
|
||||
RUN mkdir -p $IAM_DIR
|
||||
RUN mkdir -p $SETUP_DIR
|
||||
|
||||
RUN go get github.com/githubnemo/CompileDaemon
|
||||
RUN go install github.com/githubnemo/CompileDaemon
|
||||
20
Makefile
20
Makefile
@@ -74,23 +74,3 @@ dist: $(BIN).spec
|
||||
rm -f VERSION
|
||||
rm -f $(BIN).spec
|
||||
gzip -f $(TARFILE)
|
||||
|
||||
# Creates and runs S3 gateway instance in a docker container
|
||||
.PHONY: up-posix
|
||||
up-posix:
|
||||
docker compose --env-file .env.dev up posix
|
||||
|
||||
# Creates and runs S3 gateway proxy instance in a docker container
|
||||
.PHONY: up-proxy
|
||||
up-proxy:
|
||||
docker compose --env-file .env.dev up proxy
|
||||
|
||||
# Creates and runs S3 gateway to azurite instance in a docker container
|
||||
.PHONY: up-azurite
|
||||
up-azurite:
|
||||
docker compose --env-file .env.dev up azurite azuritegw
|
||||
|
||||
# Creates and runs both S3 gateway and proxy server instances in docker containers
|
||||
.PHONY: up-app
|
||||
up-app:
|
||||
docker compose --env-file .env.dev up
|
||||
|
||||
@@ -8,18 +8,13 @@
|
||||
|
||||
[](https://github.com/versity/versitygw/blob/main/LICENSE)
|
||||
|
||||
**Current status:** Ready for general testing, Issue reports welcome.
|
||||
|
||||
**News:**<br>
|
||||
* New performance analysis article [https://github.com/versity/versitygw/wiki/Performance](https://github.com/versity/versitygw/wiki/Performance)
|
||||
|
||||
**Current status:** Beta: Most clients functional, work in progress for more test coverage. Issue reports welcome.
|
||||
|
||||
See project [documentation](https://github.com/versity/versitygw/wiki) on the wiki.
|
||||
|
||||
* Share filesystem directory via S3 protocol
|
||||
* Proxy S3 requests to S3 storage
|
||||
* Simple to deploy S3 server with a single command
|
||||
* Protocol compatibility in `posix` allows common access to files via posix or S3
|
||||
* Protocol compatibility allows common access to files via posix or S3
|
||||
|
||||
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and storage systems. The Versity Gateway bridges the gap between S3-reliant applications and other storage systems, enabling enhanced compatibility and integration while offering exceptional scalability.
|
||||
|
||||
|
||||
40
auth/iam.go
40
auth/iam.go
@@ -16,7 +16,6 @@ package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -44,25 +43,18 @@ type IAMService interface {
|
||||
var ErrNoSuchUser = errors.New("user not found")
|
||||
|
||||
type Opts struct {
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
S3Debug bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
@@ -72,20 +64,12 @@ func New(o *Opts) (IAMService, error) {
|
||||
switch {
|
||||
case o.Dir != "":
|
||||
svc, err = NewInternal(o.Dir)
|
||||
fmt.Printf("initializing internal IAM with %q\n", o.Dir)
|
||||
case o.LDAPServerURL != "":
|
||||
svc, err = NewLDAPService(o.LDAPServerURL, o.LDAPBindDN, o.LDAPPassword,
|
||||
o.LDAPQueryBase, o.LDAPAccessAtr, o.LDAPSecretAtr, o.LDAPRoleAtr,
|
||||
o.LDAPObjClasses)
|
||||
fmt.Printf("initializing LDAP IAM with %q\n", o.LDAPServerURL)
|
||||
case o.S3Endpoint != "":
|
||||
svc, err = NewS3(o.S3Access, o.S3Secret, o.S3Region, o.S3Bucket,
|
||||
o.S3Endpoint, o.S3DisableSSlVerfiy, o.S3Debug)
|
||||
fmt.Printf("initializing S3 IAM with '%v/%v'\n",
|
||||
o.S3Endpoint, o.S3Bucket)
|
||||
default:
|
||||
// if no iam options selected, default to the single user mode
|
||||
fmt.Println("No IAM service configured, enabling single account mode")
|
||||
return IAMServiceSingle{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -270,7 +270,7 @@ func (s *IAMServiceInternal) storeIAM(update UpdateAcctFunc) error {
|
||||
// reset retries on successful read
|
||||
retries = 0
|
||||
|
||||
err = os.Remove(fname)
|
||||
err = os.Remove(iamFile)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
|
||||
@@ -1,263 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
)
|
||||
|
||||
// IAMServiceS3 stores user accounts in an S3 object
|
||||
// The endpoint, credentials, bucket, and region are provided
|
||||
// from cli configuration.
|
||||
// The object format and name is the same as the internal IAM service:
|
||||
// coming from iAMConfig and iamFile in iam_internal.
|
||||
|
||||
type IAMServiceS3 struct {
|
||||
access string
|
||||
secret string
|
||||
region string
|
||||
bucket string
|
||||
endpoint string
|
||||
sslSkipVerify bool
|
||||
debug bool
|
||||
client *s3.Client
|
||||
}
|
||||
|
||||
var _ IAMService = &IAMServiceS3{}
|
||||
|
||||
func NewS3(access, secret, region, bucket, endpoint string, sslSkipVerify, debug bool) (*IAMServiceS3, error) {
|
||||
if access == "" {
|
||||
return nil, fmt.Errorf("must provide s3 IAM service access key")
|
||||
}
|
||||
if secret == "" {
|
||||
return nil, fmt.Errorf("must provide s3 IAM service secret key")
|
||||
}
|
||||
if region == "" {
|
||||
return nil, fmt.Errorf("must provide s3 IAM service region")
|
||||
}
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("must provide s3 IAM service bucket")
|
||||
}
|
||||
if endpoint == "" {
|
||||
return nil, fmt.Errorf("must provide s3 IAM service endpoint")
|
||||
}
|
||||
|
||||
i := &IAMServiceS3{
|
||||
access: access,
|
||||
secret: secret,
|
||||
region: region,
|
||||
bucket: bucket,
|
||||
endpoint: endpoint,
|
||||
sslSkipVerify: sslSkipVerify,
|
||||
debug: debug,
|
||||
}
|
||||
|
||||
cfg, err := i.getConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init s3 IAM: %v", err)
|
||||
}
|
||||
|
||||
i.client = s3.NewFromConfig(cfg)
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) CreateAccount(account Account) error {
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, ok := conf.AccessAccounts[account.Access]
|
||||
if ok {
|
||||
return fmt.Errorf("account already exists")
|
||||
}
|
||||
conf.AccessAccounts[account.Access] = account
|
||||
|
||||
return s.storeAccts(conf)
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) GetUserAccount(access string) (Account, error) {
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
acct, ok := conf.AccessAccounts[access]
|
||||
if !ok {
|
||||
return Account{}, ErrNoSuchUser
|
||||
}
|
||||
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) DeleteUserAccount(access string) error {
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, ok := conf.AccessAccounts[access]
|
||||
if !ok {
|
||||
return fmt.Errorf("account does not exist")
|
||||
}
|
||||
delete(conf.AccessAccounts, access)
|
||||
|
||||
return s.storeAccts(conf)
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) ListUserAccounts() ([]Account, error) {
|
||||
conf, err := s.getAccounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(conf.AccessAccounts))
|
||||
for k := range conf.AccessAccounts {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
})
|
||||
}
|
||||
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
// ResolveEndpoint is used for on prem or non-aws endpoints
|
||||
func (s *IAMServiceS3) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: s.endpoint,
|
||||
SigningRegion: s.region,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) getConfig() (aws.Config, error) {
|
||||
creds := credentials.NewStaticCredentialsProvider(s.access, s.secret, "")
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: s.sslSkipVerify},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
|
||||
opts := []func(*config.LoadOptions) error{
|
||||
config.WithRegion(s.region),
|
||||
config.WithCredentialsProvider(creds),
|
||||
config.WithHTTPClient(client),
|
||||
}
|
||||
|
||||
if s.endpoint != "" {
|
||||
opts = append(opts,
|
||||
config.WithEndpointResolverWithOptions(s))
|
||||
}
|
||||
|
||||
if s.debug {
|
||||
opts = append(opts,
|
||||
config.WithClientLogMode(aws.LogSigning|aws.LogRetries|aws.LogRequest|aws.LogResponse|aws.LogRequestEventMessage|aws.LogResponseEventMessage))
|
||||
}
|
||||
|
||||
return config.LoadDefaultConfig(context.Background(), opts...)
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) getAccounts() (iAMConfig, error) {
|
||||
obj := iamFile
|
||||
|
||||
out, err := s.client.GetObject(context.Background(), &s3.GetObjectInput{
|
||||
Bucket: &s.bucket,
|
||||
Key: &obj,
|
||||
})
|
||||
if err != nil {
|
||||
// if the error is object not exists,
|
||||
// init empty accounts stuct and return that
|
||||
var nsk *types.NoSuchKey
|
||||
if errors.As(err, &nsk) {
|
||||
return iAMConfig{}, nil
|
||||
}
|
||||
var apiErr smithy.APIError
|
||||
if errors.As(err, &apiErr) {
|
||||
if apiErr.ErrorCode() == "NotFound" {
|
||||
return iAMConfig{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// all other errors, return the error
|
||||
return iAMConfig{}, fmt.Errorf("get %v: %w", obj, err)
|
||||
}
|
||||
|
||||
defer out.Body.Close()
|
||||
|
||||
b, err := io.ReadAll(out.Body)
|
||||
if err != nil {
|
||||
return iAMConfig{}, fmt.Errorf("read %v: %w", obj, err)
|
||||
}
|
||||
|
||||
conf, err := parseIAM(b)
|
||||
if err != nil {
|
||||
return iAMConfig{}, fmt.Errorf("parse iam data: %w", err)
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) storeAccts(conf iAMConfig) error {
|
||||
b, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize iam: %w", err)
|
||||
}
|
||||
|
||||
obj := iamFile
|
||||
uploader := manager.NewUploader(s.client)
|
||||
upinfo := &s3.PutObjectInput{
|
||||
Body: bytes.NewReader(b),
|
||||
Bucket: &s.bucket,
|
||||
Key: &obj,
|
||||
}
|
||||
_, err = uploader.Upload(context.Background(), upinfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("store accounts in %v: %w", iamFile, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,986 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
// When getting container metadata with GetProperties method the sdk returns
|
||||
// the first letter capital, when accessing the metadata after listing the containers
|
||||
// it returns the first letter lower
|
||||
type aclKey string
|
||||
|
||||
const aclKeyCapital aclKey = "Acl"
|
||||
const aclKeyLower aclKey = "acl"
|
||||
|
||||
type Azure struct {
|
||||
backend.BackendUnsupported
|
||||
|
||||
client *azblob.Client
|
||||
sharedkeyCreds *azblob.SharedKeyCredential
|
||||
defaultCreds *azidentity.DefaultAzureCredential
|
||||
serviceURL string
|
||||
sasToken string
|
||||
}
|
||||
|
||||
var _ backend.Backend = &Azure{}
|
||||
|
||||
func New(accountName, accountKey, serviceURL, sasToken string) (*Azure, error) {
|
||||
url := serviceURL
|
||||
if serviceURL == "" && accountName != "" {
|
||||
// if not otherwise specified, use the typical form:
|
||||
// http(s)://<account>.blob.core.windows.net/
|
||||
url = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
|
||||
}
|
||||
|
||||
if sasToken != "" {
|
||||
client, err := azblob.NewClientWithNoCredential(url+"?"+sasToken, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init client: %w", err)
|
||||
}
|
||||
return &Azure{client: client, serviceURL: serviceURL, sasToken: sasToken}, nil
|
||||
}
|
||||
|
||||
if accountName == "" {
|
||||
// if account name not provided, try to get from env var
|
||||
accountName = os.Getenv("AZURE_CLIENT_ID")
|
||||
}
|
||||
|
||||
if accountName == "" || accountKey == "" {
|
||||
cred, err := azidentity.NewDefaultAzureCredential(nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init default credentials: %w", err)
|
||||
}
|
||||
client, err := azblob.NewClient(url, cred, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init client: %w", err)
|
||||
}
|
||||
return &Azure{client: client, serviceURL: url, defaultCreds: cred}, nil
|
||||
}
|
||||
|
||||
cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init credentials: %w", err)
|
||||
}
|
||||
|
||||
client, err := azblob.NewClientWithSharedKeyCredential(url, cred, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init client: %w", err)
|
||||
}
|
||||
|
||||
return &Azure{client: client, serviceURL: url, sharedkeyCreds: cred}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) Shutdown() {}
|
||||
|
||||
func (az *Azure) String() string {
|
||||
return "Azure Blob Gateway"
|
||||
}
|
||||
|
||||
func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
meta := map[string]*string{
|
||||
string(aclKeyCapital): backend.GetStringPtr(string(acl)),
|
||||
}
|
||||
_, err := az.client.CreateContainer(ctx, *input.Bucket, &container.CreateOptions{Metadata: meta})
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
func (az *Azure) ListBuckets(ctx context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
pager := az.client.NewListContainersPager(nil)
|
||||
|
||||
var buckets []s3response.ListAllMyBucketsEntry
|
||||
var result s3response.ListAllMyBucketsResult
|
||||
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return result, azureErrToS3Err(err)
|
||||
}
|
||||
for _, v := range resp.ContainerItems {
|
||||
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
|
||||
Name: *v.Name,
|
||||
// TODO: using modification date here instead of creation, is that ok?
|
||||
CreationDate: *v.Properties.LastModified,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
result.Buckets.Bucket = buckets
|
||||
result.Owner.ID = owner
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (az *Azure) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
client, err := az.getContainerClient(*input.Bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return &s3.HeadBucketOutput{}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput) error {
|
||||
_, err := az.client.DeleteContainer(ctx, *input.Bucket, nil)
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, error) {
|
||||
tags, err := parseTags(po.Tagging)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
uploadResp, err := az.client.UploadStream(ctx, *po.Bucket, *po.Key, po.Body, &blockblob.UploadStreamOptions{
|
||||
Metadata: parseMetadata(po.Metadata),
|
||||
Tags: tags,
|
||||
})
|
||||
if err != nil {
|
||||
return "", azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return string(*uploadResp.ETag), nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
var opts *azblob.DownloadStreamOptions
|
||||
if *input.Range != "" {
|
||||
offset, count, err := parseRange(*input.Range)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = &azblob.DownloadStreamOptions{
|
||||
Range: blob.HTTPRange{
|
||||
Count: count,
|
||||
Offset: offset,
|
||||
},
|
||||
}
|
||||
}
|
||||
blobDownloadResponse, err := az.client.DownloadStream(ctx, *input.Bucket, *input.Key, opts)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
defer blobDownloadResponse.Body.Close()
|
||||
|
||||
_, err = io.Copy(writer, blobDownloadResponse.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
var tagcount int32
|
||||
if blobDownloadResponse.TagCount != nil {
|
||||
tagcount = int32(*blobDownloadResponse.TagCount)
|
||||
}
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: input.Range,
|
||||
ContentLength: blobDownloadResponse.ContentLength,
|
||||
ContentEncoding: blobDownloadResponse.ContentEncoding,
|
||||
ContentType: blobDownloadResponse.ContentType,
|
||||
ETag: (*string)(blobDownloadResponse.ETag),
|
||||
LastModified: blobDownloadResponse.LastModified,
|
||||
Metadata: parseAzMetadata(blobDownloadResponse.Metadata),
|
||||
TagCount: &tagcount,
|
||||
ContentRange: blobDownloadResponse.ContentRange,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
client, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
AcceptRanges: resp.AcceptRanges,
|
||||
ContentLength: resp.ContentLength,
|
||||
ContentType: resp.ContentType,
|
||||
ContentEncoding: resp.ContentEncoding,
|
||||
ContentLanguage: resp.ContentLanguage,
|
||||
ContentDisposition: resp.ContentDisposition,
|
||||
ETag: (*string)(resp.ETag),
|
||||
LastModified: resp.LastModified,
|
||||
Metadata: parseAzMetadata(resp.Metadata),
|
||||
Expires: resp.ExpiresOn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{
|
||||
Marker: input.Marker,
|
||||
MaxResults: input.MaxKeys,
|
||||
Prefix: input.Prefix,
|
||||
})
|
||||
|
||||
var objects []types.Object
|
||||
var nextMarker *string
|
||||
var isTruncated bool
|
||||
var maxKeys int32 = math.MaxInt32
|
||||
|
||||
if input.MaxKeys != nil {
|
||||
maxKeys = *input.MaxKeys
|
||||
}
|
||||
|
||||
Pager:
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
for _, v := range resp.Segment.BlobItems {
|
||||
if nextMarker == nil && *resp.NextMarker != "" {
|
||||
nextMarker = resp.NextMarker
|
||||
isTruncated = true
|
||||
}
|
||||
if len(objects) >= int(maxKeys) {
|
||||
break Pager
|
||||
}
|
||||
objects = append(objects, types.Object{
|
||||
ETag: (*string)(v.Properties.ETag),
|
||||
Key: v.Name,
|
||||
LastModified: v.Properties.LastModified,
|
||||
Size: v.Properties.ContentLength,
|
||||
StorageClass: types.ObjectStorageClass(*v.Properties.AccessTier),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: generate common prefixes when appropriate
|
||||
|
||||
return &s3.ListObjectsOutput{
|
||||
Contents: objects,
|
||||
Marker: input.Marker,
|
||||
MaxKeys: input.MaxKeys,
|
||||
Name: input.Bucket,
|
||||
NextMarker: nextMarker,
|
||||
Prefix: input.Prefix,
|
||||
IsTruncated: &isTruncated,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{
|
||||
Marker: input.ContinuationToken,
|
||||
MaxResults: input.MaxKeys,
|
||||
Prefix: input.Prefix,
|
||||
})
|
||||
|
||||
var objects []types.Object
|
||||
var nextMarker *string
|
||||
var isTruncated bool
|
||||
var maxKeys int32 = math.MaxInt32
|
||||
|
||||
if input.MaxKeys != nil {
|
||||
maxKeys = *input.MaxKeys
|
||||
}
|
||||
|
||||
Pager:
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
for _, v := range resp.Segment.BlobItems {
|
||||
if nextMarker == nil && *resp.NextMarker != "" {
|
||||
nextMarker = resp.NextMarker
|
||||
isTruncated = true
|
||||
}
|
||||
if len(objects) >= int(maxKeys) {
|
||||
break Pager
|
||||
}
|
||||
nextMarker = resp.NextMarker
|
||||
objects = append(objects, types.Object{
|
||||
ETag: (*string)(v.Properties.ETag),
|
||||
Key: v.Name,
|
||||
LastModified: v.Properties.LastModified,
|
||||
Size: v.Properties.ContentLength,
|
||||
StorageClass: types.ObjectStorageClass(*v.Properties.AccessTier),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: generate common prefixes when appropriate
|
||||
|
||||
return &s3.ListObjectsV2Output{
|
||||
Contents: objects,
|
||||
ContinuationToken: input.ContinuationToken,
|
||||
MaxKeys: input.MaxKeys,
|
||||
Name: input.Bucket,
|
||||
NextContinuationToken: nextMarker,
|
||||
Prefix: input.Prefix,
|
||||
IsTruncated: &isTruncated,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) error {
|
||||
_, err := az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil)
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
delResult, errs := []types.DeletedObject{}, []types.Error{}
|
||||
for _, obj := range input.Delete.Objects {
|
||||
err := az.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: obj.Key,
|
||||
})
|
||||
if err == nil {
|
||||
delResult = append(delResult, types.DeletedObject{Key: obj.Key})
|
||||
} else {
|
||||
serr, ok := err.(s3err.APIError)
|
||||
if ok {
|
||||
errs = append(errs, types.Error{
|
||||
Key: obj.Key,
|
||||
Code: &serr.Code,
|
||||
Message: &serr.Description,
|
||||
})
|
||||
} else {
|
||||
errs = append(errs, types.Error{
|
||||
Key: obj.Key,
|
||||
Code: backend.GetStringPtr("InternalError"),
|
||||
Message: backend.GetStringPtr(err.Error()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s3response.DeleteObjectsResult{
|
||||
Deleted: delResult,
|
||||
Error: errs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
containerClient, err := az.getContainerClient(*input.Bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := containerClient.GetProperties(ctx, &container.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
dstContainerAcl, err := getAclFromMetadata(res.Metadata, aclKeyCapital)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = auth.VerifyACL(*dstContainerAcl, *input.ExpectedBucketOwner, types.PermissionWrite, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource && isMetaSame(res.Metadata, input.Metadata) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
}
|
||||
|
||||
tags, err := parseTags(input.Tagging)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := az.getBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := client.CopyFromURL(ctx, az.serviceURL+"/"+*input.CopySource, &blob.CopyFromURLOptions{
|
||||
BlobTags: tags,
|
||||
Metadata: parseMetadata(input.Metadata),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return &s3.CopyObjectOutput{
|
||||
CopyObjectResult: &types.CopyObjectResult{
|
||||
ETag: (*string)(resp.ETag),
|
||||
LastModified: resp.LastModified,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.SetTags(ctx, tags, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags, err := client.GetTags(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return parseAzTags(tags.BlobTagSet), nil
|
||||
}
|
||||
|
||||
func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
client, err := az.getBlobClient(bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.SetTags(ctx, map[string]string{}, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
// Multipart upload starts with UploadPart action so there is no
|
||||
// correlating function for creating mutlipart uploads.
|
||||
// TODO: since azure only allows for a single multipart upload
|
||||
// for an object name at a time, we need to send an error back to
|
||||
// the client if there is already an outstanding upload in progress
|
||||
// for this object.
|
||||
// Alternatively, is there something we can do with upload ids to
|
||||
// keep concurrent uploads unique still? I haven't found an efficient
|
||||
// way to rename final objects.
|
||||
return &s3.CreateMultipartUploadOutput{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
UploadId: input.Key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Each part is translated into an uncommitted block in a newly created blob in staging area
|
||||
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TODO: request streamable version of StageBlock()
|
||||
// (*blockblob.Client).StageBlock does not have a streamable
|
||||
// version of this function at this time, so we need to cache
|
||||
// the body in memory to create an io.ReadSeekCloser
|
||||
rdr, err := getReadSeekCloser(input.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// block id serves as etag here
|
||||
etag = blockIDInt32ToBase64(*input.PartNumber)
|
||||
_, err = client.StageBlock(ctx, etag, rdr, nil)
|
||||
if err != nil {
|
||||
return "", parseMpError(err)
|
||||
}
|
||||
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, nil
|
||||
}
|
||||
|
||||
//TODO: handle block copy by range
|
||||
//TODO: the action returns not implemented on azurite, maybe in production this will work?
|
||||
// UploadId here is the source block id
|
||||
_, err = client.StageBlockFromURL(ctx, *input.UploadId, *input.CopySource, nil)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, parseMpError(err)
|
||||
}
|
||||
|
||||
return s3response.CopyObjectResult{}, nil
|
||||
}
|
||||
|
||||
// Lists all uncommitted parts from the blob
|
||||
func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, nil
|
||||
}
|
||||
|
||||
resp, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, parseMpError(err)
|
||||
}
|
||||
var partNumberMarker int
|
||||
var nextPartNumberMarker int
|
||||
var maxParts int32 = math.MaxInt32
|
||||
var isTruncated bool
|
||||
|
||||
if *input.PartNumberMarker != "" {
|
||||
partNumberMarker, err = strconv.Atoi(*input.PartNumberMarker)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker)
|
||||
}
|
||||
}
|
||||
if input.MaxParts != nil {
|
||||
maxParts = *input.MaxParts
|
||||
}
|
||||
|
||||
parts := []s3response.Part{}
|
||||
for _, el := range resp.BlockList.UncommittedBlocks {
|
||||
partNumber, err := decodeBlockId(*el.Name)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
if partNumberMarker != 0 && partNumberMarker < partNumber {
|
||||
continue
|
||||
}
|
||||
if len(parts) >= int(maxParts) {
|
||||
nextPartNumberMarker = partNumber
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
parts = append(parts, s3response.Part{
|
||||
Size: *el.Size,
|
||||
ETag: *el.Name,
|
||||
PartNumber: partNumber,
|
||||
LastModified: time.Now().Format(backend.RFC3339TimeFormat),
|
||||
})
|
||||
}
|
||||
return s3response.ListPartsResult{
|
||||
Bucket: *input.Bucket,
|
||||
Key: *input.Key,
|
||||
Parts: parts,
|
||||
NextPartNumberMarker: nextPartNumberMarker,
|
||||
PartNumberMarker: partNumberMarker,
|
||||
IsTruncated: isTruncated,
|
||||
MaxParts: int(maxParts),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Lists all block blobs, which has uncommitted blocks
|
||||
func (az *Azure) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
client, err := az.getContainerClient(*input.Bucket)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, err
|
||||
}
|
||||
pager := client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
||||
Include: container.ListBlobsInclude{UncommittedBlobs: true},
|
||||
Marker: input.KeyMarker,
|
||||
Prefix: input.Prefix,
|
||||
})
|
||||
|
||||
var maxUploads int32
|
||||
if input.MaxUploads != nil {
|
||||
maxUploads = *input.MaxUploads
|
||||
}
|
||||
isTruncated := false
|
||||
nextKeyMarker := ""
|
||||
uploads := []s3response.Upload{}
|
||||
breakFlag := false
|
||||
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, azureErrToS3Err(err)
|
||||
}
|
||||
for _, el := range resp.Segment.BlobItems {
|
||||
if el.Properties.AccessTier == nil {
|
||||
if len(uploads) >= int(*input.MaxUploads) && maxUploads != 0 {
|
||||
breakFlag = true
|
||||
nextKeyMarker = *el.Name
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
uploads = append(uploads, s3response.Upload{
|
||||
Key: *el.Name,
|
||||
Initiated: el.Properties.CreationTime.Format(backend.RFC3339TimeFormat),
|
||||
})
|
||||
}
|
||||
}
|
||||
if breakFlag {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Uploads: uploads,
|
||||
Bucket: *input.Bucket,
|
||||
KeyMarker: *input.KeyMarker,
|
||||
NextKeyMarker: nextKeyMarker,
|
||||
MaxUploads: int(maxUploads),
|
||||
Prefix: *input.Prefix,
|
||||
IsTruncated: isTruncated,
|
||||
Delimiter: *input.Delimiter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deletes the block blob with committed/uncommitted blocks
|
||||
func (az *Azure) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
|
||||
// TODO: need to verify this blob has uncommitted blocks?
|
||||
_, err := az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil)
|
||||
if err != nil {
|
||||
return parseMpError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commits all the uncommitted blocks inside the block blob
|
||||
// And moves the block blob from staging area into the blobs list
|
||||
// It indicates the end of the multipart upload
|
||||
func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockIds := []string{}
|
||||
for _, el := range input.MultipartUpload.Parts {
|
||||
blockIds = append(blockIds, *el.ETag)
|
||||
}
|
||||
resp, err := client.CommitBlockList(ctx, blockIds, nil)
|
||||
if err != nil {
|
||||
return nil, parseMpError(err)
|
||||
}
|
||||
|
||||
return &s3.CompleteMultipartUploadOutput{
|
||||
Bucket: input.Bucket,
|
||||
Key: input.Key,
|
||||
ETag: (*string)(resp.ETag),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (az *Azure) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta := map[string]*string{
|
||||
string(aclKeyCapital): backend.GetStringPtr(string(data)),
|
||||
}
|
||||
_, err = client.SetMetadata(ctx, &container.SetMetadataOptions{
|
||||
Metadata: meta,
|
||||
})
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Azure) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
client, err := az.getContainerClient(*input.Bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
aclPtr, ok := props.Metadata[string(aclKeyCapital)]
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInternalError)
|
||||
}
|
||||
|
||||
return []byte(*aclPtr), nil
|
||||
}
|
||||
|
||||
func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
client, err := az.getContainerClient(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
props, err := client.GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
return azureErrToS3Err(err)
|
||||
}
|
||||
|
||||
acl, err := getAclFromMetadata(props.Metadata, aclKeyCapital)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acl.Owner = newOwner
|
||||
|
||||
newAcl, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal acl: %w", err)
|
||||
}
|
||||
|
||||
err = az.PutBucketAcl(ctx, bucket, newAcl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// The action actually returns the containers owned by the user, who initialized the gateway
|
||||
// TODO: Not sure if there's a way to list all the containers and owners?
|
||||
func (az *Azure) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.Bucket, err error) {
|
||||
pager := az.client.NewListContainersPager(nil)
|
||||
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return buckets, azureErrToS3Err(err)
|
||||
}
|
||||
for _, v := range resp.ContainerItems {
|
||||
acl, err := getAclFromMetadata(v.Metadata, aclKeyLower)
|
||||
if err != nil {
|
||||
return buckets, err
|
||||
}
|
||||
|
||||
buckets = append(buckets, s3response.Bucket{
|
||||
Name: *v.Name,
|
||||
Owner: acl.Owner,
|
||||
})
|
||||
}
|
||||
}
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func (az *Azure) getContainerURL(cntr string) string {
|
||||
return fmt.Sprintf("%v/%v", az.serviceURL, cntr)
|
||||
}
|
||||
|
||||
func (az *Azure) getBlobURL(cntr, blb string) string {
|
||||
return fmt.Sprintf("%v/%v", az.getContainerURL(cntr), blb)
|
||||
}
|
||||
|
||||
func (az *Azure) getBlobClient(cntr, blb string) (*blob.Client, error) {
|
||||
blobURL := az.getBlobURL(cntr, blb)
|
||||
if az.defaultCreds != nil {
|
||||
return blob.NewClient(blobURL, az.defaultCreds, nil)
|
||||
}
|
||||
if az.sasToken != "" {
|
||||
return blob.NewClientWithNoCredential(blobURL+"?"+az.sasToken, nil)
|
||||
}
|
||||
return blob.NewClientWithSharedKeyCredential(blobURL, az.sharedkeyCreds, nil)
|
||||
}
|
||||
|
||||
func (az *Azure) getContainerClient(cntr string) (*container.Client, error) {
|
||||
containerURL := az.getContainerURL(cntr)
|
||||
if az.defaultCreds != nil {
|
||||
return container.NewClient(containerURL, az.defaultCreds, nil)
|
||||
}
|
||||
if az.sasToken != "" {
|
||||
return container.NewClientWithNoCredential(containerURL+"?"+az.sasToken, nil)
|
||||
}
|
||||
return container.NewClientWithSharedKeyCredential(containerURL, az.sharedkeyCreds, nil)
|
||||
}
|
||||
|
||||
func (az *Azure) getBlockBlobClient(cntr, blb string) (*blockblob.Client, error) {
|
||||
blobURL := az.getBlobURL(cntr, blb)
|
||||
if az.defaultCreds != nil {
|
||||
return blockblob.NewClient(blobURL, az.defaultCreds, nil)
|
||||
}
|
||||
if az.sasToken != "" {
|
||||
return blockblob.NewClientWithNoCredential(blobURL+"?"+az.sasToken, nil)
|
||||
}
|
||||
return blockblob.NewClientWithSharedKeyCredential(blobURL, az.sharedkeyCreds, nil)
|
||||
}
|
||||
|
||||
func parseMetadata(m map[string]string) map[string]*string {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
meta := make(map[string]*string)
|
||||
|
||||
for k, v := range m {
|
||||
val := v
|
||||
meta[k] = &val
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
func parseAzMetadata(m map[string]*string) map[string]string {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
meta := make(map[string]string)
|
||||
|
||||
for k, v := range m {
|
||||
meta[k] = *v
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
func parseTags(tagstr *string) (map[string]string, error) {
|
||||
tagsStr := getString(tagstr)
|
||||
tags := make(map[string]string)
|
||||
|
||||
if tagsStr != "" {
|
||||
tagParts := strings.Split(tagsStr, "&")
|
||||
for _, prt := range tagParts {
|
||||
p := strings.Split(prt, "=")
|
||||
if len(p) != 2 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
tags[p[0]] = p[1]
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func parseAzTags(tagSet []*blob.Tags) map[string]string {
|
||||
tags := map[string]string{}
|
||||
for _, tag := range tagSet {
|
||||
tags[*tag.Key] = *tag.Value
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func getString(str *string) string {
|
||||
if str == nil {
|
||||
return ""
|
||||
}
|
||||
return *str
|
||||
}
|
||||
|
||||
// Converts io.Reader into io.ReadSeekCloser
|
||||
func getReadSeekCloser(input io.Reader) (io.ReadSeekCloser, error) {
|
||||
var buffer bytes.Buffer
|
||||
_, err := io.Copy(&buffer, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return streaming.NopCloser(bytes.NewReader(buffer.Bytes())), nil
|
||||
}
|
||||
|
||||
// Creates a new Base64 encoded block id from a 32 bit integer
|
||||
func blockIDInt32ToBase64(blockID int32) string {
|
||||
binaryBlockID := &[4]byte{} // All block IDs are 4 bytes long
|
||||
binary.LittleEndian.PutUint32(binaryBlockID[:], uint32(blockID))
|
||||
return base64.StdEncoding.EncodeToString(binaryBlockID[:])
|
||||
}
|
||||
|
||||
// Decodes Base64 encoded string to integer
|
||||
func decodeBlockId(blockID string) (int, error) {
|
||||
slice, err := base64.StdEncoding.DecodeString(blockID)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return int(binary.LittleEndian.Uint32(slice)), nil
|
||||
}
|
||||
|
||||
func parseRange(rg string) (offset, count int64, err error) {
|
||||
rangeKv := strings.Split(rg, "=")
|
||||
|
||||
if len(rangeKv) < 2 {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) < 1 || len(bRange) > 2 {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
offset, err = strconv.ParseInt(bRange[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
if len(bRange) == 1 || bRange[1] == "" {
|
||||
return offset, count, nil
|
||||
}
|
||||
|
||||
count, err = strconv.ParseInt(bRange[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
if count < offset {
|
||||
return 0, 0, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
return offset, count - offset + 1, nil
|
||||
}
|
||||
|
||||
func getAclFromMetadata(meta map[string]*string, key aclKey) (*auth.ACL, error) {
|
||||
aclPtr, ok := meta[string(key)]
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInternalError)
|
||||
}
|
||||
|
||||
var acl auth.ACL
|
||||
err := json.Unmarshal([]byte(*aclPtr), &acl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshal acl: %w", err)
|
||||
}
|
||||
|
||||
return &acl, nil
|
||||
}
|
||||
|
||||
func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool {
|
||||
if len(azMeta) != len(awsMeta)+1 {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, val := range azMeta {
|
||||
if key == string(aclKeyCapital) || key == string(aclKeyLower) {
|
||||
continue
|
||||
}
|
||||
awsVal, ok := awsMeta[key]
|
||||
if !ok || awsVal != *val {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// Parses azure ResponseError into AWS APIError
|
||||
func azureErrToS3Err(apiErr error) error {
|
||||
var azErr *azcore.ResponseError
|
||||
if !errors.As(apiErr, &azErr) {
|
||||
return apiErr
|
||||
}
|
||||
|
||||
return azErrToS3err(azErr)
|
||||
}
|
||||
|
||||
func azErrToS3err(azErr *azcore.ResponseError) s3err.APIError {
|
||||
switch azErr.ErrorCode {
|
||||
case "ContainerAlreadyExists":
|
||||
return s3err.GetAPIError(s3err.ErrBucketAlreadyExists)
|
||||
case "InvalidResourceName", "ContainerNotFound":
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
case "BlobNotFound":
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
case "TagsTooLarge":
|
||||
return s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
case "Requested Range Not Satisfiable":
|
||||
return s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
return s3err.APIError{
|
||||
Code: azErr.ErrorCode,
|
||||
Description: azErr.RawResponse.Status,
|
||||
HTTPStatusCode: azErr.StatusCode,
|
||||
}
|
||||
}
|
||||
|
||||
func parseMpError(mpErr error) error {
|
||||
err := azureErrToS3Err(mpErr)
|
||||
|
||||
serr, ok := err.(s3err.APIError)
|
||||
if !ok || serr.Code != "NoSuchKey" {
|
||||
return mpErr
|
||||
}
|
||||
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
@@ -15,7 +15,6 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
"github.com/versity/versitygw/s3select"
|
||||
)
|
||||
|
||||
//go:generate moq -out ../s3api/controllers/backend_moq_test.go -pkg controllers . Backend
|
||||
@@ -35,7 +33,7 @@ type Backend interface {
|
||||
ListBuckets(_ context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
|
||||
HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error)
|
||||
CreateBucket(_ context.Context, _ *s3.CreateBucketInput, defaultACL []byte) error
|
||||
CreateBucket(context.Context, *s3.CreateBucketInput) error
|
||||
PutBucketAcl(_ context.Context, bucket string, data []byte) error
|
||||
DeleteBucket(context.Context, *s3.DeleteBucketInput) error
|
||||
|
||||
@@ -63,12 +61,7 @@ type Backend interface {
|
||||
|
||||
// special case object operations
|
||||
RestoreObject(context.Context, *s3.RestoreObjectInput) error
|
||||
SelectObjectContent(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer)
|
||||
|
||||
// bucket tagging operations
|
||||
GetBucketTagging(_ context.Context, bucket string) (map[string]string, error)
|
||||
PutBucketTagging(_ context.Context, bucket string, tags map[string]string) error
|
||||
DeleteBucketTagging(_ context.Context, bucket string) error
|
||||
SelectObjectContent(context.Context, *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error)
|
||||
|
||||
// object tags operations
|
||||
GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
@@ -100,7 +93,7 @@ func (BackendUnsupported) HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.
|
||||
func (BackendUnsupported) GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CreateBucket(context.Context, *s3.CreateBucketInput, []byte) error {
|
||||
func (BackendUnsupported) CreateBucket(context.Context, *s3.CreateBucketInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
|
||||
@@ -169,29 +162,8 @@ func (BackendUnsupported) PutObjectAcl(context.Context, *s3.PutObjectAclInput) e
|
||||
func (BackendUnsupported) RestoreObject(context.Context, *s3.RestoreObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) SelectObjectContent(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer) {
|
||||
return func(w *bufio.Writer) {
|
||||
var getProgress s3select.GetProgress
|
||||
progress := input.RequestProgress
|
||||
if progress != nil && *progress.Enabled {
|
||||
getProgress = func() (bytesScanned int64, bytesProcessed int64) {
|
||||
return -1, -1
|
||||
}
|
||||
}
|
||||
mh := s3select.NewMessageHandler(ctx, w, getProgress)
|
||||
apiErr := s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
mh.FinishWithError(apiErr.Code, apiErr.Description)
|
||||
}
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetBucketTagging(_ context.Context, bucket string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketTagging(_ context.Context, bucket string, tags map[string]string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucketTagging(_ context.Context, bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) SelectObjectContent(context.Context, *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error) {
|
||||
return s3response.SelectObjectContentResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
|
||||
@@ -146,10 +146,6 @@ func (p *Posix) ListBuckets(_ context.Context, owner string, isAdmin bool) (s3re
|
||||
}
|
||||
|
||||
func (p *Posix) HeadBucket(_ context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
_, err := os.Lstat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -161,12 +157,9 @@ func (p *Posix) HeadBucket(_ context.Context, input *s3.HeadBucketInput) (*s3.He
|
||||
return &s3.HeadBucketOutput{}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) CreateBucket(_ context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
if input.Bucket == nil {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
func (p *Posix) CreateBucket(_ context.Context, input *s3.CreateBucketInput) error {
|
||||
bucket := *input.Bucket
|
||||
owner := string(input.ObjectOwnership)
|
||||
|
||||
err := os.Mkdir(bucket, 0777)
|
||||
if err != nil && os.IsExist(err) {
|
||||
@@ -176,7 +169,13 @@ func (p *Posix) CreateBucket(_ context.Context, input *s3.CreateBucketInput, acl
|
||||
return fmt.Errorf("mkdir bucket: %w", err)
|
||||
}
|
||||
|
||||
if err := xattr.Set(bucket, aclkey, acl); err != nil {
|
||||
acl := auth.ACL{ACL: "private", Owner: owner, Grantees: []auth.Grantee{}}
|
||||
jsonACL, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal acl: %w", err)
|
||||
}
|
||||
|
||||
if err := xattr.Set(bucket, aclkey, jsonACL); err != nil {
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
|
||||
@@ -184,10 +183,6 @@ func (p *Posix) CreateBucket(_ context.Context, input *s3.CreateBucketInput, acl
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucket(_ context.Context, input *s3.DeleteBucketInput) error {
|
||||
if input.Bucket == nil {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
names, err := os.ReadDir(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -217,13 +212,6 @@ func (p *Posix) DeleteBucket(_ context.Context, input *s3.DeleteBucketInput) err
|
||||
}
|
||||
|
||||
func (p *Posix) CreateMultipartUpload(_ context.Context, mpu *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
if mpu.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if mpu.Key == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
bucket := *mpu.Bucket
|
||||
object := *mpu.Key
|
||||
|
||||
@@ -281,19 +269,6 @@ func (p *Posix) CreateMultipartUpload(_ context.Context, mpu *s3.CreateMultipart
|
||||
}
|
||||
|
||||
func (p *Posix) CompleteMultipartUpload(_ context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if input.UploadId == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
if input.MultipartUpload == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
@@ -319,7 +294,7 @@ func (p *Posix) CompleteMultipartUpload(_ context.Context, input *s3.CompleteMul
|
||||
partsize := int64(0)
|
||||
var totalsize int64
|
||||
for i, p := range parts {
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber))
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber))
|
||||
fi, err := os.Lstat(partPath)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
@@ -351,7 +326,7 @@ func (p *Posix) CompleteMultipartUpload(_ context.Context, input *s3.CompleteMul
|
||||
defer f.cleanup()
|
||||
|
||||
for _, p := range parts {
|
||||
pf, err := os.Open(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber)))
|
||||
pf, err := os.Open(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open part %v: %v", p.PartNumber, err)
|
||||
}
|
||||
@@ -537,16 +512,6 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
|
||||
}
|
||||
|
||||
func (p *Posix) AbortMultipartUpload(_ context.Context, mpu *s3.AbortMultipartUploadInput) error {
|
||||
if mpu.Bucket == nil {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if mpu.Key == nil {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if mpu.UploadId == nil {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
|
||||
bucket := *mpu.Bucket
|
||||
object := *mpu.Key
|
||||
uploadID := *mpu.UploadId
|
||||
@@ -577,12 +542,6 @@ func (p *Posix) AbortMultipartUpload(_ context.Context, mpu *s3.AbortMultipartUp
|
||||
}
|
||||
|
||||
func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
var lmu s3response.ListMultipartUploadsResult
|
||||
|
||||
if mpu.Bucket == nil {
|
||||
return lmu, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
bucket := *mpu.Bucket
|
||||
var delimiter string
|
||||
if mpu.Delimiter != nil {
|
||||
@@ -593,6 +552,8 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
prefix = *mpu.Prefix
|
||||
}
|
||||
|
||||
var lmu s3response.ListMultipartUploadsResult
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return lmu, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -665,16 +626,12 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
}
|
||||
}
|
||||
|
||||
maxUploads := 0
|
||||
if mpu.MaxUploads != nil {
|
||||
maxUploads = int(*mpu.MaxUploads)
|
||||
}
|
||||
if (uploadIDMarker != "" && !uploadIdMarkerFound) || (keyMarker != "" && keyMarkerInd == -1) {
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: maxUploads,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: []s3response.Upload{},
|
||||
@@ -686,18 +643,18 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
})
|
||||
|
||||
for i := keyMarkerInd + 1; i < len(uploads); i++ {
|
||||
if maxUploads == 0 {
|
||||
if mpu.MaxUploads == 0 {
|
||||
break
|
||||
}
|
||||
if keyMarker != "" && uploadIDMarker != "" && uploads[i].UploadID < uploadIDMarker {
|
||||
continue
|
||||
}
|
||||
if i != len(uploads)-1 && len(resultUpds) == maxUploads {
|
||||
if i != len(uploads)-1 && len(resultUpds) == int(mpu.MaxUploads) {
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: maxUploads,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
NextKeyMarker: resultUpds[i-1].Key,
|
||||
NextUploadIDMarker: resultUpds[i-1].UploadID,
|
||||
IsTruncated: true,
|
||||
@@ -714,7 +671,7 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: maxUploads,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: resultUpds,
|
||||
@@ -722,29 +679,13 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
}
|
||||
|
||||
func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
var lpr s3response.ListPartsResult
|
||||
|
||||
if input.Bucket == nil {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if input.UploadId == nil {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
stringMarker := ""
|
||||
if input.PartNumberMarker != nil {
|
||||
stringMarker = *input.PartNumberMarker
|
||||
}
|
||||
maxParts := 0
|
||||
if input.MaxParts != nil {
|
||||
maxParts = int(*input.MaxParts)
|
||||
}
|
||||
stringMarker := *input.PartNumberMarker
|
||||
maxParts := int(input.MaxParts)
|
||||
|
||||
var lpr s3response.ListPartsResult
|
||||
|
||||
var partNumberMarker int
|
||||
if stringMarker != "" {
|
||||
@@ -836,21 +777,11 @@ func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3respon
|
||||
}
|
||||
|
||||
func (p *Posix) UploadPart(_ context.Context, input *s3.UploadPartInput) (string, error) {
|
||||
if input.Bucket == nil {
|
||||
return "", s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
part := input.PartNumber
|
||||
length := int64(0)
|
||||
if input.ContentLength != nil {
|
||||
length = *input.ContentLength
|
||||
}
|
||||
length := input.ContentLength
|
||||
r := input.Body
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
@@ -872,7 +803,7 @@ func (p *Posix) UploadPart(_ context.Context, input *s3.UploadPartInput) (string
|
||||
return "", fmt.Errorf("stat uploadid: %w", err)
|
||||
}
|
||||
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part))
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", part))
|
||||
|
||||
f, err := openTmpFile(filepath.Join(bucket, objdir),
|
||||
bucket, partPath, length)
|
||||
@@ -902,13 +833,6 @@ func (p *Posix) UploadPart(_ context.Context, input *s3.UploadPartInput) (string
|
||||
}
|
||||
|
||||
func (p *Posix) UploadPartCopy(_ context.Context, upi *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
if upi.Bucket == nil {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if upi.Key == nil {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
_, err := os.Stat(*upi.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -928,7 +852,7 @@ func (p *Posix) UploadPartCopy(_ context.Context, upi *s3.UploadPartCopyInput) (
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("stat uploadid: %w", err)
|
||||
}
|
||||
|
||||
partPath := filepath.Join(objdir, *upi.UploadId, fmt.Sprintf("%v", *upi.PartNumber))
|
||||
partPath := filepath.Join(objdir, *upi.UploadId, fmt.Sprintf("%v", upi.PartNumber))
|
||||
|
||||
substrs := strings.SplitN(*upi.CopySource, "/", 2)
|
||||
if len(substrs) != 2 {
|
||||
@@ -1014,13 +938,6 @@ func (p *Posix) UploadPartCopy(_ context.Context, upi *s3.UploadPartCopyInput) (
|
||||
}
|
||||
|
||||
func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, error) {
|
||||
if po.Bucket == nil {
|
||||
return "", s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if po.Key == nil {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
tagsStr := getString(po.Tagging)
|
||||
tags := make(map[string]string)
|
||||
_, err := os.Stat(*po.Bucket)
|
||||
@@ -1047,13 +964,9 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
|
||||
name := filepath.Join(*po.Bucket, *po.Key)
|
||||
|
||||
contentLength := int64(0)
|
||||
if po.ContentLength != nil {
|
||||
contentLength = *po.ContentLength
|
||||
}
|
||||
if strings.HasSuffix(*po.Key, "/") {
|
||||
// object is directory
|
||||
if contentLength != 0 {
|
||||
if po.ContentLength != 0 {
|
||||
// posix directories can't contain data, send error
|
||||
// if reuests has a data payload associated with a
|
||||
// directory object
|
||||
@@ -1082,7 +995,7 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
|
||||
f, err := openTmpFile(filepath.Join(*po.Bucket, metaTmpDir),
|
||||
*po.Bucket, *po.Key, contentLength)
|
||||
*po.Bucket, *po.Key, po.ContentLength)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
@@ -1126,13 +1039,6 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteObject(_ context.Context, input *s3.DeleteObjectInput) error {
|
||||
if input.Bucket == nil {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
@@ -1225,16 +1131,6 @@ func (p *Posix) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput)
|
||||
}
|
||||
|
||||
func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if input.Range == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
bucket := *input.Bucket
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -1296,17 +1192,15 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
|
||||
tagCount := int32(len(tags))
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
ContentLength: &length,
|
||||
ContentLength: length,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentType: &contentType,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: &tagCount,
|
||||
TagCount: int32(len(tags)),
|
||||
ContentRange: &contentRange,
|
||||
}, nil
|
||||
}
|
||||
@@ -1341,28 +1235,20 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
|
||||
tagCount := int32(len(tags))
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
ContentLength: &length,
|
||||
ContentLength: length,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentType: &contentType,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: &tagCount,
|
||||
TagCount: int32(len(tags)),
|
||||
ContentRange: &contentRange,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
@@ -1392,10 +1278,8 @@ func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.He
|
||||
etag = ""
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: &size,
|
||||
ContentLength: fi.Size(),
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ETag: &etag,
|
||||
@@ -1405,18 +1289,6 @@ func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.He
|
||||
}
|
||||
|
||||
func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
}
|
||||
if input.CopySource == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
if input.ExpectedBucketOwner == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
srcBucket, srcObject, ok := strings.Cut(*input.CopySource, "/")
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
@@ -1489,16 +1361,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
}
|
||||
}
|
||||
|
||||
contentLength := fInfo.Size()
|
||||
|
||||
etag, err := p.PutObject(ctx,
|
||||
&s3.PutObjectInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstObject,
|
||||
Body: f,
|
||||
ContentLength: &contentLength,
|
||||
Metadata: meta,
|
||||
})
|
||||
etag, err := p.PutObject(ctx, &s3.PutObjectInput{Bucket: &dstBucket, Key: &dstObject, Body: f, ContentLength: fInfo.Size(), Metadata: meta})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1517,26 +1380,11 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
prefix := ""
|
||||
if input.Prefix != nil {
|
||||
prefix = *input.Prefix
|
||||
}
|
||||
marker := ""
|
||||
if input.Marker != nil {
|
||||
marker = *input.Marker
|
||||
}
|
||||
delim := ""
|
||||
if input.Delimiter != nil {
|
||||
delim = *input.Delimiter
|
||||
}
|
||||
maxkeys := int32(0)
|
||||
if input.MaxKeys != nil {
|
||||
maxkeys = *input.MaxKeys
|
||||
}
|
||||
prefix := *input.Prefix
|
||||
marker := *input.Marker
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -1557,9 +1405,9 @@ func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: &results.Truncated,
|
||||
IsTruncated: results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: &maxkeys,
|
||||
MaxKeys: maxkeys,
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
@@ -1618,46 +1466,21 @@ func fileToObj(bucket string) backend.GetObjFunc {
|
||||
return types.Object{}, fmt.Errorf("get fileinfo: %w", err)
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
return types.Object{
|
||||
ETag: &etag,
|
||||
Key: &path,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Size: &size,
|
||||
Size: fi.Size(),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
prefix := ""
|
||||
if input.Prefix != nil {
|
||||
prefix = *input.Prefix
|
||||
}
|
||||
marker := ""
|
||||
if input.ContinuationToken != nil {
|
||||
if input.StartAfter != nil {
|
||||
if *input.StartAfter > *input.ContinuationToken {
|
||||
marker = *input.StartAfter
|
||||
} else {
|
||||
marker = *input.ContinuationToken
|
||||
}
|
||||
} else {
|
||||
marker = *input.ContinuationToken
|
||||
}
|
||||
}
|
||||
delim := ""
|
||||
if input.Delimiter != nil {
|
||||
delim = *input.Delimiter
|
||||
}
|
||||
maxkeys := int32(0)
|
||||
if input.MaxKeys != nil {
|
||||
maxkeys = *input.MaxKeys
|
||||
}
|
||||
prefix := *input.Prefix
|
||||
marker := *input.ContinuationToken
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -1668,25 +1491,23 @@ func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
|
||||
count := int32(len(results.Objects))
|
||||
|
||||
return &s3.ListObjectsV2Output{
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: &results.Truncated,
|
||||
IsTruncated: results.Truncated,
|
||||
ContinuationToken: &marker,
|
||||
MaxKeys: &maxkeys,
|
||||
MaxKeys: int32(maxkeys),
|
||||
Name: &bucket,
|
||||
NextContinuationToken: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
KeyCount: &count,
|
||||
KeyCount: int32(len(results.Objects)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1707,9 +1528,6 @@ func (p *Posix) PutBucketAcl(_ context.Context, bucket string, data []byte) erro
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
_, err := os.Stat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1728,57 +1546,6 @@ func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketTagging(_ context.Context, bucket string, tags map[string]string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
err = xattr.Remove(bucket, "user."+tagHdr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove tags: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
|
||||
err = xattr.Set(bucket, "user."+tagHdr, b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set tags: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketTagging(_ context.Context, bucket string) (map[string]string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
tags, err := p.getXattrTags(bucket, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
return p.PutBucketTagging(ctx, bucket, nil)
|
||||
}
|
||||
|
||||
func (p *Posix) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
|
||||
@@ -17,6 +17,7 @@ package s3proxy
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
@@ -25,10 +26,16 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
"github.com/versity/versitygw/auth"
|
||||
)
|
||||
|
||||
func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
|
||||
cfg, err := s.getConfig(ctx, s.access, s.secret)
|
||||
func (s *S3be) getClientFromCtx(ctx context.Context) (*s3.Client, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid account in context")
|
||||
}
|
||||
|
||||
cfg, err := s.getConfig(ctx, acct.Access, acct.Secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -36,7 +43,7 @@ func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
|
||||
return s3.NewFromConfig(cfg), nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) getConfig(ctx context.Context, access, secret string) (aws.Config, error) {
|
||||
func (s *S3be) getConfig(ctx context.Context, access, secret string) (aws.Config, error) {
|
||||
creds := credentials.NewStaticCredentialsProvider(access, secret, "")
|
||||
|
||||
tr := &http.Transport{
|
||||
@@ -69,7 +76,7 @@ func (s *S3Proxy) getConfig(ctx context.Context, access, secret string) (aws.Con
|
||||
}
|
||||
|
||||
// ResolveEndpoint is used for on prem or non-aws endpoints
|
||||
func (s *S3Proxy) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
func (s *S3be) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: s.endpoint,
|
||||
|
||||
@@ -16,37 +16,21 @@ package s3proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
const aclKey string = "versitygwAcl"
|
||||
|
||||
type S3Proxy struct {
|
||||
type S3be struct {
|
||||
backend.BackendUnsupported
|
||||
|
||||
client *s3.Client
|
||||
|
||||
access string
|
||||
secret string
|
||||
endpoint string
|
||||
awsRegion string
|
||||
disableChecksum bool
|
||||
@@ -54,28 +38,25 @@ type S3Proxy struct {
|
||||
debug bool
|
||||
}
|
||||
|
||||
func New(access, secret, endpoint, region string, disableChecksum, sslSkipVerify, debug bool) (*S3Proxy, error) {
|
||||
s := &S3Proxy{
|
||||
access: access,
|
||||
secret: secret,
|
||||
func New(endpoint, region string, disableChecksum, sslSkipVerify, debug bool) *S3be {
|
||||
return &S3be{
|
||||
endpoint: endpoint,
|
||||
awsRegion: region,
|
||||
disableChecksum: disableChecksum,
|
||||
sslSkipVerify: sslSkipVerify,
|
||||
debug: debug,
|
||||
}
|
||||
client, err := s.getClientWithCtx(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.client = client
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListBuckets(ctx context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
output, err := s.client.ListBuckets(ctx, &s3.ListBucketsInput{})
|
||||
func (s *S3be) ListBuckets(ctx context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{}, handleError(err)
|
||||
return s3response.ListAllMyBucketsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.ListBuckets(ctx, &s3.ListBucketsInput{})
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{}, err
|
||||
}
|
||||
|
||||
var buckets []s3response.ListAllMyBucketsEntry
|
||||
@@ -88,7 +69,8 @@ func (s *S3Proxy) ListBuckets(ctx context.Context, owner string, isAdmin bool) (
|
||||
|
||||
return s3response.ListAllMyBucketsResult{
|
||||
Owner: s3response.CanonicalUser{
|
||||
ID: *output.Owner.ID,
|
||||
ID: *output.Owner.ID,
|
||||
DisplayName: *output.Owner.DisplayName,
|
||||
},
|
||||
Buckets: s3response.ListAllMyBucketsList{
|
||||
Bucket: buckets,
|
||||
@@ -96,56 +78,76 @@ func (s *S3Proxy) ListBuckets(ctx context.Context, owner string, isAdmin bool) (
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
out, err := s.client.HeadBucket(ctx, input)
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, acl []byte) error {
|
||||
_, err := s.client.CreateBucket(ctx, input)
|
||||
func (s *S3be) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return handleError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tagSet []types.Tag
|
||||
tagSet = append(tagSet, types.Tag{
|
||||
Key: backend.GetStringPtr(aclKey),
|
||||
Value: backend.GetStringPtr(base64Encode(acl)),
|
||||
})
|
||||
|
||||
_, err = s.client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
|
||||
Bucket: input.Bucket,
|
||||
Tagging: &types.Tagging{
|
||||
TagSet: tagSet,
|
||||
},
|
||||
})
|
||||
return handleError(err)
|
||||
return client.HeadBucket(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput) error {
|
||||
_, err := s.client.DeleteBucket(ctx, input)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
out, err := s.client.CreateMultipartUpload(ctx, input)
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
out, err := s.client.CompleteMultipartUpload(ctx, input)
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
|
||||
_, err := s.client.AbortMultipartUpload(ctx, input)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
output, err := s.client.ListMultipartUploads(ctx, input)
|
||||
func (s *S3be) CreateBucket(ctx context.Context, input *s3.CreateBucketInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, handleError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.CreateBucket(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.DeleteBucket(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.CreateMultipartUpload(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.CompleteMultipartUpload(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.AbortMultipartUpload(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
)
|
||||
|
||||
func (s *S3be) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.ListMultipartUploads(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, err
|
||||
}
|
||||
|
||||
var uploads []s3response.Upload
|
||||
@@ -162,7 +164,7 @@ func (s *S3Proxy) ListMultipartUploads(ctx context.Context, input *s3.ListMultip
|
||||
DisplayName: *u.Owner.DisplayName,
|
||||
},
|
||||
StorageClass: string(u.StorageClass),
|
||||
Initiated: u.Initiated.Format(backend.RFC3339TimeFormat),
|
||||
Initiated: u.Initiated.Format(iso8601Format),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -182,26 +184,31 @@ func (s *S3Proxy) ListMultipartUploads(ctx context.Context, input *s3.ListMultip
|
||||
Delimiter: *output.Delimiter,
|
||||
Prefix: *output.Prefix,
|
||||
EncodingType: string(output.EncodingType),
|
||||
MaxUploads: int(*output.MaxUploads),
|
||||
IsTruncated: *output.IsTruncated,
|
||||
MaxUploads: int(output.MaxUploads),
|
||||
IsTruncated: output.IsTruncated,
|
||||
Uploads: uploads,
|
||||
CommonPrefixes: cps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
output, err := s.client.ListParts(ctx, input)
|
||||
func (s *S3be) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, handleError(err)
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.ListParts(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
|
||||
var parts []s3response.Part
|
||||
for _, p := range output.Parts {
|
||||
parts = append(parts, s3response.Part{
|
||||
PartNumber: int(*p.PartNumber),
|
||||
LastModified: p.LastModified.Format(backend.RFC3339TimeFormat),
|
||||
PartNumber: int(p.PartNumber),
|
||||
LastModified: p.LastModified.Format(iso8601Format),
|
||||
ETag: *p.ETag,
|
||||
Size: *p.Size,
|
||||
Size: p.Size,
|
||||
})
|
||||
}
|
||||
pnm, err := strconv.Atoi(*output.PartNumberMarker)
|
||||
@@ -231,29 +238,35 @@ func (s *S3Proxy) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3re
|
||||
StorageClass: string(output.StorageClass),
|
||||
PartNumberMarker: pnm,
|
||||
NextPartNumberMarker: npmn,
|
||||
MaxParts: int(*output.MaxParts),
|
||||
IsTruncated: *output.IsTruncated,
|
||||
MaxParts: int(output.MaxParts),
|
||||
IsTruncated: output.IsTruncated,
|
||||
Parts: parts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
|
||||
// streaming backend is not seekable,
|
||||
// use unsigned payload for streaming ops
|
||||
output, err := s.client.UploadPart(ctx, input, s3.WithAPIOptions(
|
||||
v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware,
|
||||
))
|
||||
func (s *S3be) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return "", handleError(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
output, err := client.UploadPart(ctx, input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return *output.ETag, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
output, err := s.client.UploadPartCopy(ctx, input)
|
||||
func (s *S3be) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, handleError(err)
|
||||
return s3response.CopyObjectResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.UploadPartCopy(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, err
|
||||
}
|
||||
|
||||
return s3response.CopyObjectResult{
|
||||
@@ -262,28 +275,38 @@ func (s *S3Proxy) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyIn
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObject(ctx context.Context, input *s3.PutObjectInput) (string, error) {
|
||||
// streaming backend is not seekable,
|
||||
// use unsigned payload for streaming ops
|
||||
output, err := s.client.PutObject(ctx, input, s3.WithAPIOptions(
|
||||
v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware,
|
||||
))
|
||||
func (s *S3be) PutObject(ctx context.Context, input *s3.PutObjectInput) (string, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return "", handleError(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
output, err := client.PutObject(ctx, input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return *output.ETag, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
out, err := s.client.HeadObject(ctx, input)
|
||||
return out, handleError(err)
|
||||
func (s *S3be) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.HeadObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput, w io.Writer) (*s3.GetObjectOutput, error) {
|
||||
output, err := s.client.GetObject(ctx, input)
|
||||
func (s *S3be) GetObject(ctx context.Context, input *s3.GetObjectInput, w io.Writer) (*s3.GetObjectOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := client.GetObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer output.Body.Close()
|
||||
|
||||
@@ -295,39 +318,61 @@ func (s *S3Proxy) GetObject(ctx context.Context, input *s3.GetObjectInput, w io.
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
out, err := s.client.GetObjectAttributes(ctx, input)
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
out, err := s.client.CopyObject(ctx, input)
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
out, err := s.client.ListObjects(ctx, input)
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
out, err := s.client.ListObjectsV2(ctx, input)
|
||||
return out, handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) error {
|
||||
_, err := s.client.DeleteObject(ctx, input)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
if len(input.Delete.Objects) == 0 {
|
||||
input.Delete.Objects = []types.ObjectIdentifier{}
|
||||
func (s *S3be) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := s.client.DeleteObjects(ctx, input)
|
||||
return client.GetObjectAttributes(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.DeleteObjectsResult{}, handleError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.CopyObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.ListObjects(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.ListObjectsV2(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.DeleteObject(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.DeleteObjectsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.DeleteObjects(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.DeleteObjectsResult{}, err
|
||||
}
|
||||
|
||||
return s3response.DeleteObjectsResult{
|
||||
@@ -336,63 +381,71 @@ func (s *S3Proxy) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInpu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
tagout, err := s.client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
|
||||
Bucket: input.Bucket,
|
||||
})
|
||||
func (s *S3be) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, tag := range tagout.TagSet {
|
||||
if *tag.Key == aclKey {
|
||||
acl, err := base64Decode(*tag.Value)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
return acl, nil
|
||||
}
|
||||
}
|
||||
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
|
||||
tagout, err := s.client.GetBucketTagging(ctx, &s3.GetBucketTaggingInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
output, err := client.GetBucketAcl(ctx, input)
|
||||
if err != nil {
|
||||
return handleError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var found bool
|
||||
for i, tag := range tagout.TagSet {
|
||||
if *tag.Key == aclKey {
|
||||
tagout.TagSet[i] = types.Tag{
|
||||
Key: backend.GetStringPtr(aclKey),
|
||||
Value: backend.GetStringPtr(base64Encode(data)),
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
tagout.TagSet = append(tagout.TagSet, types.Tag{
|
||||
Key: backend.GetStringPtr(aclKey),
|
||||
Value: backend.GetStringPtr(base64Encode(data)),
|
||||
var acl auth.ACL
|
||||
|
||||
acl.Owner = *output.Owner.ID
|
||||
for _, el := range output.Grants {
|
||||
acl.Grantees = append(acl.Grantees, auth.Grantee{
|
||||
Permission: el.Permission,
|
||||
Access: *el.Grantee.ID,
|
||||
})
|
||||
}
|
||||
|
||||
_, err = s.client.PutBucketTagging(ctx, &s3.PutBucketTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Tagging: &types.Tagging{
|
||||
TagSet: tagout.TagSet,
|
||||
},
|
||||
})
|
||||
return handleError(err)
|
||||
return json.Marshal(acl)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (s S3be) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acl, err := auth.ParseACL(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
input := &s3.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
ACL: acl.ACL,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Owner: &types.Owner{
|
||||
ID: &acl.Owner,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, el := range acl.Grantees {
|
||||
input.AccessControlPolicy.Grants = append(input.AccessControlPolicy.Grants, types.Grant{
|
||||
Permission: el.Permission,
|
||||
Grantee: &types.Grantee{
|
||||
ID: &el.Access,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
_, err = client.PutBucketAcl(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tagging := &types.Tagging{
|
||||
TagSet: []types.Tag{},
|
||||
}
|
||||
@@ -403,21 +456,26 @@ func (s *S3Proxy) PutObjectTagging(ctx context.Context, bucket, object string, t
|
||||
})
|
||||
}
|
||||
|
||||
_, err := s.client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
_, err = client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Tagging: tagging,
|
||||
})
|
||||
return handleError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
output, err := s.client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
|
||||
func (s *S3be) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
@@ -428,118 +486,15 @@ func (s *S3Proxy) GetObjectTagging(ctx context.Context, bucket, object string) (
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
_, err := s.client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
|
||||
func (s *S3be) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
})
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", s.endpoint, bucket, newOwner), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: s.access, SecretAccessKey: s.secret}, req, hexPayload, "s3", s.awsRegion, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode > 300 {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return fmt.Errorf(string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *S3Proxy) ListBucketsAndOwners(ctx context.Context) ([]s3response.Bucket, error) {
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-buckets", s.endpoint), nil)
|
||||
if err != nil {
|
||||
return []s3response.Bucket{}, fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: s.access, SecretAccessKey: s.secret}, req, hexPayload, "s3", s.awsRegion, time.Now())
|
||||
if signErr != nil {
|
||||
return []s3response.Bucket{}, fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return []s3response.Bucket{}, fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return []s3response.Bucket{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var buckets []s3response.Bucket
|
||||
if err := json.Unmarshal(body, &buckets); err != nil {
|
||||
return []s3response.Bucket{}, err
|
||||
}
|
||||
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func handleError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ae smithy.APIError
|
||||
if errors.As(err, &ae) {
|
||||
apiErr := s3err.APIError{
|
||||
Code: ae.ErrorCode(),
|
||||
Description: ae.ErrorMessage(),
|
||||
}
|
||||
var re *awshttp.ResponseError
|
||||
if errors.As(err, &re) {
|
||||
apiErr.HTTPStatusCode = re.Response.StatusCode
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func base64Encode(input []byte) string {
|
||||
return base64.StdEncoding.EncodeToString(input)
|
||||
}
|
||||
|
||||
func base64Decode(encoded string) ([]byte, error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(encoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return decoded, nil
|
||||
}
|
||||
|
||||
@@ -415,10 +415,8 @@ func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
}
|
||||
}
|
||||
|
||||
contentLength := fi.Size()
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: &contentLength,
|
||||
ContentLength: fi.Size(),
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ETag: &etag,
|
||||
@@ -509,17 +507,15 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
|
||||
tagCount := int32(len(tags))
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
ContentLength: &length,
|
||||
ContentLength: length,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentType: &contentType,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: &tagCount,
|
||||
TagCount: int32(len(tags)),
|
||||
StorageClass: types.StorageClassStandard,
|
||||
}, nil
|
||||
}
|
||||
@@ -546,26 +542,11 @@ func (s *ScoutFS) getXattrTags(bucket, object string) (map[string]string, error)
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
prefix := ""
|
||||
if input.Prefix != nil {
|
||||
prefix = *input.Prefix
|
||||
}
|
||||
marker := ""
|
||||
if input.Marker != nil {
|
||||
marker = *input.Marker
|
||||
}
|
||||
delim := ""
|
||||
if input.Delimiter != nil {
|
||||
delim = *input.Delimiter
|
||||
}
|
||||
maxkeys := int32(0)
|
||||
if input.MaxKeys != nil {
|
||||
maxkeys = *input.MaxKeys
|
||||
}
|
||||
prefix := *input.Prefix
|
||||
marker := *input.Marker
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -586,9 +567,9 @@ func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: &results.Truncated,
|
||||
IsTruncated: results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: &maxkeys,
|
||||
MaxKeys: maxkeys,
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
@@ -596,26 +577,11 @@ func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
prefix := ""
|
||||
if input.Prefix != nil {
|
||||
prefix = *input.Prefix
|
||||
}
|
||||
marker := ""
|
||||
if input.ContinuationToken != nil {
|
||||
marker = *input.ContinuationToken
|
||||
}
|
||||
delim := ""
|
||||
if input.Delimiter != nil {
|
||||
delim = *input.Delimiter
|
||||
}
|
||||
maxkeys := int32(0)
|
||||
if input.MaxKeys != nil {
|
||||
maxkeys = *input.MaxKeys
|
||||
}
|
||||
prefix := *input.Prefix
|
||||
marker := *input.ContinuationToken
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -636,9 +602,9 @@ func (s *ScoutFS) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input)
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: &results.Truncated,
|
||||
IsTruncated: results.Truncated,
|
||||
ContinuationToken: &marker,
|
||||
MaxKeys: &maxkeys,
|
||||
MaxKeys: int32(maxkeys),
|
||||
Name: &bucket,
|
||||
NextContinuationToken: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
@@ -711,13 +677,11 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
}
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
return types.Object{
|
||||
ETag: &etag,
|
||||
Key: &path,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Size: &size,
|
||||
Size: fi.Size(),
|
||||
StorageClass: sc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -55,13 +55,11 @@ func getObj(path string, d fs.DirEntry) (types.Object, error) {
|
||||
return types.Object{}, fmt.Errorf("get fileinfo: %w", err)
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
return types.Object{
|
||||
ETag: &etag,
|
||||
Key: &path,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Size: &size,
|
||||
Size: fi.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend/azure"
|
||||
)
|
||||
|
||||
var (
|
||||
azAccount, azKey, azServiceURL, azSASToken string
|
||||
)
|
||||
|
||||
func azureCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "azure",
|
||||
Usage: "azure blob storage backend",
|
||||
Description: `direct translation from s3 objects to azure blobs`,
|
||||
Action: runAzure,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "account",
|
||||
Usage: "azure account name",
|
||||
EnvVars: []string{"AZ_ACCOUNT_NAME"},
|
||||
Aliases: []string{"a"},
|
||||
Destination: &azAccount,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access-key",
|
||||
Usage: "azure account key",
|
||||
EnvVars: []string{"AZ_ACCESS_KEY"},
|
||||
Aliases: []string{"k"},
|
||||
Destination: &azKey,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "sas-token",
|
||||
Usage: "azure blob storage SAS token",
|
||||
EnvVars: []string{"AZ_SAS_TOKEN"},
|
||||
Aliases: []string{"st"},
|
||||
Destination: &azSASToken,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "url",
|
||||
Usage: "azure service URL",
|
||||
EnvVars: []string{"AZ_ENDPOINT"},
|
||||
Aliases: []string{"u"},
|
||||
Destination: &azServiceURL,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runAzure(ctx *cli.Context) error {
|
||||
be, err := azure.New(azAccount, azKey, azServiceURL, azSASToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init azure: %w", err)
|
||||
}
|
||||
|
||||
return runGateway(ctx.Context, be)
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/integration"
|
||||
)
|
||||
|
||||
const (
|
||||
tdir = "tempdir"
|
||||
)
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
func initEnv(dir string) {
|
||||
// both
|
||||
debug = true
|
||||
region = "us-east-1"
|
||||
|
||||
// server
|
||||
rootUserAccess = "user"
|
||||
rootUserSecret = "pass"
|
||||
iamDir = dir
|
||||
port = "127.0.0.1:7070"
|
||||
|
||||
// client
|
||||
awsID = "user"
|
||||
awsSecret = "pass"
|
||||
endpoint = "http://127.0.0.1:7070"
|
||||
}
|
||||
|
||||
func initPosix(ctx context.Context) {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("get current directory: %v", err)
|
||||
}
|
||||
|
||||
tempdir := filepath.Join(path, tdir)
|
||||
initEnv(tempdir)
|
||||
|
||||
err = os.RemoveAll(tempdir)
|
||||
if err != nil {
|
||||
log.Fatalf("remove temp directory: %v", err)
|
||||
}
|
||||
|
||||
err = os.Mkdir(tempdir, 0755)
|
||||
if err != nil {
|
||||
log.Fatalf("make temp directory: %v", err)
|
||||
}
|
||||
|
||||
be, err := posix.New(tempdir)
|
||||
if err != nil {
|
||||
log.Fatalf("init posix: %v", err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = runGateway(ctx, be)
|
||||
if err != nil && err != context.Canceled {
|
||||
log.Fatalf("run gateway: %v", err)
|
||||
}
|
||||
|
||||
err := os.RemoveAll(tempdir)
|
||||
if err != nil {
|
||||
log.Fatalf("remove temp directory: %v", err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
initPosix(ctx)
|
||||
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
|
||||
// replace below with desired test
|
||||
err := integration.HeadBucket_non_existing_bucket(s)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -43,15 +44,10 @@ var (
|
||||
logWebhookURL string
|
||||
accessLog string
|
||||
debug bool
|
||||
quiet bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify, s3IamDebug bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
@@ -75,7 +71,6 @@ func main() {
|
||||
posixCommand(),
|
||||
scoutfsCommand(),
|
||||
s3Command(),
|
||||
azureCommand(),
|
||||
adminCommand(),
|
||||
testCommand(),
|
||||
}
|
||||
@@ -178,12 +173,6 @@ func initFlags() []cli.Flag {
|
||||
Usage: "enable debug output",
|
||||
Destination: &debug,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "quiet",
|
||||
Usage: "silence stdout request logging output",
|
||||
Destination: &quiet,
|
||||
Aliases: []string{"q"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access-log",
|
||||
Usage: "enable server access logging to specified file",
|
||||
@@ -271,42 +260,6 @@ func initFlags() []cli.Flag {
|
||||
Usage: "ldap server user role attribute name",
|
||||
Destination: &ldapRoleAtr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-iam-access",
|
||||
Usage: "s3 IAM access key",
|
||||
Destination: &s3IamAccess,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-iam-secret",
|
||||
Usage: "s3 IAM secret key",
|
||||
Destination: &s3IamSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-iam-region",
|
||||
Usage: "s3 IAM region",
|
||||
Destination: &s3IamRegion,
|
||||
Value: "us-east-1",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-iam-bucket",
|
||||
Usage: "s3 IAM bucket",
|
||||
Destination: &s3IamBucket,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-iam-endpoint",
|
||||
Usage: "s3 IAM endpoint",
|
||||
Destination: &s3IamEndpoint,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "s3-iam-noverify",
|
||||
Usage: "s3 IAM disable ssl verification",
|
||||
Destination: &s3IamSslNoVerify,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "s3-iam-debug",
|
||||
Usage: "s3 IAM debug output",
|
||||
Destination: &s3IamDebug,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "iam-cache-disable",
|
||||
Usage: "disable local iam cache",
|
||||
@@ -327,12 +280,18 @@ func initFlags() []cli.Flag {
|
||||
}
|
||||
}
|
||||
|
||||
func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
func runGateway(ctx *cli.Context, be backend.Backend) error {
|
||||
// int32 max for 32 bit arch
|
||||
blimit := int64(2*1024*1024*1024 - 1)
|
||||
if strconv.IntSize > 32 {
|
||||
// 5GB max for 64 bit arch
|
||||
blimit = int64(5 * 1024 * 1024 * 1024)
|
||||
}
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
StreamRequestBody: true,
|
||||
DisableKeepalive: true,
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
BodyLimit: int(blimit),
|
||||
})
|
||||
|
||||
var opts []s3api.Option
|
||||
@@ -357,9 +316,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
if admPort == "" {
|
||||
opts = append(opts, s3api.WithAdminServer())
|
||||
}
|
||||
if quiet {
|
||||
opts = append(opts, s3api.WithQuiet())
|
||||
}
|
||||
|
||||
admApp := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
@@ -384,25 +340,18 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
}
|
||||
|
||||
iam, err := auth.New(&auth.Opts{
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
S3Debug: s3IamDebug,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
|
||||
@@ -49,5 +49,5 @@ func runPosix(ctx *cli.Context) error {
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(ctx.Context, be)
|
||||
return runGateway(ctx, be)
|
||||
}
|
||||
|
||||
@@ -15,15 +15,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend/s3proxy"
|
||||
)
|
||||
|
||||
var (
|
||||
s3proxyAccess string
|
||||
s3proxySecret string
|
||||
s3proxyEndpoint string
|
||||
s3proxyRegion string
|
||||
s3proxyDisableChecksum bool
|
||||
@@ -39,22 +35,6 @@ func s3Command() *cli.Command {
|
||||
to an s3 storage backend service.`,
|
||||
Action: runS3,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "s3 proxy server access key id",
|
||||
Value: "",
|
||||
Required: true,
|
||||
Destination: &s3proxyAccess,
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "secret",
|
||||
Usage: "s3 proxy server secret access key",
|
||||
Value: "",
|
||||
Required: true,
|
||||
Destination: &s3proxySecret,
|
||||
Aliases: []string{"s"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "endpoint",
|
||||
Usage: "s3 service endpoint, default AWS if not specified",
|
||||
@@ -90,10 +70,7 @@ to an s3 storage backend service.`,
|
||||
}
|
||||
|
||||
func runS3(ctx *cli.Context) error {
|
||||
be, err := s3proxy.New(s3proxyAccess, s3proxySecret, s3proxyEndpoint, s3proxyRegion,
|
||||
be := s3proxy.New(s3proxyEndpoint, s3proxyRegion,
|
||||
s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyDebug)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init s3 backend: %w", err)
|
||||
}
|
||||
return runGateway(ctx.Context, be)
|
||||
return runGateway(ctx, be)
|
||||
}
|
||||
|
||||
@@ -69,5 +69,5 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
return fmt.Errorf("init scoutfs: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(ctx.Context, be)
|
||||
return runGateway(ctx, be)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/integration"
|
||||
@@ -13,6 +16,7 @@ var (
|
||||
endpoint string
|
||||
prefix string
|
||||
dstBucket string
|
||||
proxyURL string
|
||||
partSize int64
|
||||
objSize int64
|
||||
concurrency int
|
||||
@@ -67,7 +71,7 @@ func initTestFlags() []cli.Flag {
|
||||
}
|
||||
|
||||
func initTestCommands() []*cli.Command {
|
||||
return append([]*cli.Command{
|
||||
return []*cli.Command{
|
||||
{
|
||||
Name: "full-flow",
|
||||
Usage: "Tests the full flow of gateway.",
|
||||
@@ -118,6 +122,7 @@ func initTestCommands() []*cli.Command {
|
||||
Name: "bucket",
|
||||
Usage: "Destination bucket name to read/write data",
|
||||
Destination: &dstBucket,
|
||||
Required: true,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "partSize",
|
||||
@@ -143,6 +148,11 @@ func initTestCommands() []*cli.Command {
|
||||
Value: false,
|
||||
Destination: &checksumDisable,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "proxy-url",
|
||||
Usage: "S3 proxy server url to compare",
|
||||
Destination: &proxyURL,
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
if upload && download {
|
||||
@@ -152,10 +162,6 @@ func initTestCommands() []*cli.Command {
|
||||
return fmt.Errorf("must specify one of upload or download")
|
||||
}
|
||||
|
||||
if dstBucket == "" {
|
||||
return fmt.Errorf("must specify bucket")
|
||||
}
|
||||
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
@@ -177,9 +183,47 @@ func initTestCommands() []*cli.Command {
|
||||
s3conf := integration.NewS3Conf(opts...)
|
||||
|
||||
if upload {
|
||||
return integration.TestUpload(s3conf, files, objSize, dstBucket, prefix)
|
||||
if proxyURL == "" {
|
||||
integration.TestUpload(s3conf, files, objSize, dstBucket, prefix)
|
||||
return nil
|
||||
} else {
|
||||
size, elapsed, err := integration.TestUpload(s3conf, files, objSize, dstBucket, prefix)
|
||||
opts = append(opts, integration.WithEndpoint(proxyURL))
|
||||
proxyS3Conf := integration.NewS3Conf(opts...)
|
||||
proxySize, proxyElapsed, proxyErr := integration.TestUpload(proxyS3Conf, files, objSize, dstBucket, prefix)
|
||||
if err != nil || proxyErr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
printProxyResultsTable([][4]string{
|
||||
{" # ", "Total Size", "Time Taken", "Speed(MB/S)"},
|
||||
{"---------", "----------", "----------", "-----------"},
|
||||
{"S3 Server", fmt.Sprint(size), fmt.Sprintf("%v", elapsed), fmt.Sprint(int(math.Ceil(float64(size)/elapsed.Seconds()) / 1048576))},
|
||||
{"S3 Proxy", fmt.Sprint(proxySize), fmt.Sprintf("%v", proxyElapsed), fmt.Sprint(int(math.Ceil(float64(proxySize)/proxyElapsed.Seconds()) / 1048576))},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
return integration.TestDownload(s3conf, files, objSize, dstBucket, prefix)
|
||||
if proxyURL == "" {
|
||||
integration.TestDownload(s3conf, files, objSize, dstBucket, prefix)
|
||||
return nil
|
||||
} else {
|
||||
size, elapsed, err := integration.TestDownload(s3conf, files, objSize, dstBucket, prefix)
|
||||
opts = append(opts, integration.WithEndpoint(proxyURL))
|
||||
proxyS3Conf := integration.NewS3Conf(opts...)
|
||||
proxySize, proxyElapsed, proxyErr := integration.TestDownload(proxyS3Conf, files, objSize, dstBucket, prefix)
|
||||
if err != nil || proxyErr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
printProxyResultsTable([][4]string{
|
||||
{" # ", "Total Size", "Time Taken", "Speed(MB/S)"},
|
||||
{"---------", "----------", "----------", "-----------"},
|
||||
{"S3 server", fmt.Sprint(size), fmt.Sprintf("%v", elapsed), fmt.Sprint(int(math.Ceil(float64(size)/elapsed.Seconds()) / 1048576))},
|
||||
{"S3 proxy", fmt.Sprint(proxySize), fmt.Sprintf("%v", proxyElapsed), fmt.Sprint(int(math.Ceil(float64(proxySize)/proxyElapsed.Seconds()) / 1048576))},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
@@ -211,12 +255,13 @@ func initTestCommands() []*cli.Command {
|
||||
Value: false,
|
||||
Destination: &checksumDisable,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "proxy-url",
|
||||
Usage: "S3 proxy server url to compare",
|
||||
Destination: &proxyURL,
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
if dstBucket == "" {
|
||||
return fmt.Errorf("must specify the destination bucket")
|
||||
}
|
||||
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
@@ -233,10 +278,30 @@ func initTestCommands() []*cli.Command {
|
||||
|
||||
s3conf := integration.NewS3Conf(opts...)
|
||||
|
||||
return integration.TestReqPerSec(s3conf, totalReqs, dstBucket)
|
||||
if proxyURL == "" {
|
||||
_, _, err := integration.TestReqPerSec(s3conf, totalReqs, dstBucket)
|
||||
return err
|
||||
} else {
|
||||
elapsed, rps, err := integration.TestReqPerSec(s3conf, totalReqs, dstBucket)
|
||||
opts = append(opts, integration.WithEndpoint(proxyURL))
|
||||
s3proxy := integration.NewS3Conf(opts...)
|
||||
proxyElapsed, proxyRPS, proxyErr := integration.TestReqPerSec(s3proxy, totalReqs, dstBucket)
|
||||
if err != nil || proxyErr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
printProxyResultsTable([][4]string{
|
||||
{" # ", "Total Requests", "Time Taken", "Requests Per Second(Req/Sec)"},
|
||||
{"---------", "--------------", "----------", "----------------------------"},
|
||||
{"S3 Server", fmt.Sprint(totalReqs), fmt.Sprintf("%v", elapsed), fmt.Sprint(rps)},
|
||||
{"S3 Proxy", fmt.Sprint(totalReqs), fmt.Sprintf("%v", proxyElapsed), fmt.Sprint(proxyRPS)},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
},
|
||||
},
|
||||
}, extractIntTests()...)
|
||||
}
|
||||
}
|
||||
|
||||
type testFunc func(*integration.S3Conf)
|
||||
@@ -265,30 +330,12 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func extractIntTests() (commands []*cli.Command) {
|
||||
tests := integration.GetIntTests()
|
||||
for key, val := range tests {
|
||||
k := key
|
||||
testFunc := val
|
||||
commands = append(commands, &cli.Command{
|
||||
Name: k,
|
||||
Usage: fmt.Sprintf("Runs %v integration test", key),
|
||||
Action: func(ctx *cli.Context) error {
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
|
||||
s := integration.NewS3Conf(opts...)
|
||||
err := testFunc(s)
|
||||
return err
|
||||
},
|
||||
})
|
||||
func printProxyResultsTable(stats [][4]string) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
for _, elem := range stats {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", elem[0], elem[1], elem[2], elem[3])
|
||||
}
|
||||
return
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
posix:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./Dockerfile.dev
|
||||
args:
|
||||
- IAM_DIR=${IAM_DIR}
|
||||
- SETUP_DIR=${SETUP_DIR}
|
||||
volumes:
|
||||
- ./:/app
|
||||
ports:
|
||||
- "${POSIX_PORT}:${POSIX_PORT}"
|
||||
command: ["sh", "-c", CompileDaemon -build="go build -C ./cmd/versitygw -o versitygw" -command="./cmd/versitygw/versitygw -p :$POSIX_PORT -a $ACCESS_KEY_ID -s $SECRET_ACCESS_KEY --iam-dir $IAM_DIR posix $SETUP_DIR"]
|
||||
proxy:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./Dockerfile.dev
|
||||
volumes:
|
||||
- ./:/app
|
||||
ports:
|
||||
- "${PROXY_PORT}:${PROXY_PORT}"
|
||||
command: ["sh", "-c", CompileDaemon -build="go build -C ./cmd/versitygw -o versitygw" -command="./cmd/versitygw/versitygw -p :$PROXY_PORT s3 -a $ACCESS_KEY_ID -s $SECRET_ACCESS_KEY --endpoint http://posix:$POSIX_PORT"]
|
||||
azurite:
|
||||
image: mcr.microsoft.com/azure-storage/azurite
|
||||
ports:
|
||||
- "10000:10000"
|
||||
- "10001:10001"
|
||||
- "10002:10002"
|
||||
azuritegw:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./Dockerfile.dev
|
||||
volumes:
|
||||
- ./:/app
|
||||
ports:
|
||||
- 7070:7070
|
||||
command: ["sh", "-c", CompileDaemon -build="go build -C ./cmd/versitygw -o versitygw" -command="./cmd/versitygw/versitygw -a $ACCESS_KEY_ID -s $SECRET_ACCESS_KEY --iam-dir $IAM_DIR azure -a $AZ_ACCOUNT_NAME -k $AZ_ACCOUNT_KEY --url http://azurite:10000/$AZ_ACCOUNT_NAME"]
|
||||
73
go.mod
73
go.mod
@@ -3,63 +3,54 @@ module github.com/versity/versitygw
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1
|
||||
github.com/aws/smithy-go v1.19.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.22.2
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1
|
||||
github.com/aws/smithy-go v1.16.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.6
|
||||
github.com/gofiber/fiber/v2 v2.52.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/nats-io/nats.go v1.32.0
|
||||
github.com/gofiber/fiber/v2 v2.50.0
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/nats-io/nats.go v1.31.0
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
github.com/urfave/cli/v2 v2.27.1
|
||||
github.com/valyala/fasthttp v1.52.0
|
||||
github.com/segmentio/kafka-go v0.4.44
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
github.com/valyala/fasthttp v1.50.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9
|
||||
golang.org/x/sys v0.17.0
|
||||
golang.org/x/sys v0.14.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nkeys v0.4.6 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.18 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
github.com/stretchr/testify v1.8.1 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect
|
||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.24.0
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.2
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/klauspost/compress v1.17.6 // indirect
|
||||
github.com/klauspost/compress v1.17.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
|
||||
154
go.sum
154
go.sum
@@ -1,103 +1,85 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 h1:IfFdxTUDiV58iZqPKgyWiz4X4fCxZeQ1pTQPImLYXpY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 h1:2MUXyGW6dVaQz6aqycpbdLIH1NMcUI6kW6vQ0RabGYg=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15/go.mod h1:aHbhbR6WEQgHAiRj41EQ2W47yOYwNtIkWTXmcAtYqj8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 h1:5XNlsBsEvBZBMO6p82y+sqpWg8j5aBCe+5C2GBFgqBQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
|
||||
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
|
||||
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
|
||||
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 h1:hHgLiIrTRtddC0AKcJr5s7i/hLgcpTt+q/FKxf1Zayk=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0/go.mod h1:w4I/v3NOWgD+qvs1NPEwhd++1h3XPHFaVxasfY6HlYQ=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.24.0 h1:4LEk29JO3w+y9dEo/5Tq5QTP7uIEw+KQrKiHOs4xlu4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.24.0/go.mod h1:11nNDAuK86kOUHeuEQo8f3CkcV5xuUxvPwFjTZE/PnQ=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.2 h1:rKH7khRMxPdD0u3dHecd0Q7NOVw3EUe7AqdkUOkiOGI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.2/go.mod h1:tXM8wmaeAhfC7nZoCxb0FzM/aRaB1m1WQ7x0qlBLq80=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 h1:G5KawTAkyHH6WyKQCdHiW4h3PmAXNJpOgwKg3H7sDRE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3/go.mod h1:hugKmSFnZB+HgNI1sYGT14BUPZkO6alC/e0AWu+0IAQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6 h1:IpQbitxCZeC64C1ALz9QZu6AHHWundnU2evQ9xbp5k8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6/go.mod h1:27jIVQK+al9s0yTo3pkMdahRinbscqSC6zNGfNWXPZc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 h1:AaQsr5vvGR7rmeSWBtTCcw16tT9r51mWijuCQhzLnq8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2/go.mod h1:o1IiRn7CWocIFTXJjGKJDOwxv1ibL53NpcvcqGWyRBA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 h1:UZx8SXZ0YtzRiALzYAWcjb9Y9hZUR7MBKaBQ5ouOjPs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2/go.mod h1:ipuRpcSaklmxR6C39G187TpBAO132gUfleTGccUPs8c=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 h1:usgqiJtamuGIBj+OvYmMq89+Z1hIKkMJToz1WpoeNUY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2 h1:pyVrNAf7Hwz0u39dLKN5t+n0+K/3rMYKuiOoIum3AsU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2/go.mod h1:mydrfOb9uiOYCxuCPR8YHQNQyGQwUQ7gPMZGBKbH8NY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 h1:CJxo7ZBbaIzmXfV3hjcx36n9V87gJsIUPJflwqEHl3Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0/go.mod h1:yjVfjuY4nD1EW9i387Kau+I6V5cBA5YnC/mWNopjZrI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2 h1:f2LhPofnjcdOQKRtumKjMvIHkfSQ8aH/rwKUDEQ/SB4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2/go.mod h1:q+xX0H4OfuWDuBy7y/LDi4v8IBOWuF+vtp8Z6ex+lw4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 h1:h7j73yuAVVjic8pqswh+L/7r2IHP43QwRyOu6zcCDDE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2/go.mod h1:H07AHdK5LSy8F7EJUQhoxyiCNkePoHj2D8P2yGTWafo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2 h1:gbIaOzpXixUpoPK+js/bCBK1QBDXM22SigsnzGZio0U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2/go.mod h1:p+S7RNbdGN8qgHDSg2SCQJ9FeMAmvcETQiVpeGhYnNM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1 h1:o6MCcX1rJW8Y3g+hvg2xpjF6JR6DftuYhfl3Nc1WV9Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1/go.mod h1:UDtxEWbREX6y4KREapT+jjtjoH0TiVSS6f5nfaY1UaM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 h1:km+ZNjtLtpXYf42RdaDZnNHm9s7SYAuDGTafy6nd89A=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1/go.mod h1:aHBr3pvBSD5MbzOvQtYutyPLLRPbl/y9x86XyJJnUXQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 h1:iRFNqZH4a67IqPvK8xxtyQYnyrlsvwmpHOe9r55ggBA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1/go.mod h1:pTy5WM+6sNv2tB24JNKFtn6EvciQ5k40ZJ0pq/Iaxj0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 h1:txgVXIXWPXyqdiVn92BV6a/rgtpX31HYdsOYj0sVQQQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1/go.mod h1:VAiJiNaoP1L89STFlEMgmHX1bKixY+FaP+TpRFrmyZ4=
|
||||
github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik=
|
||||
github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A=
|
||||
github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc=
|
||||
github.com/gofiber/fiber/v2 v2.52.0 h1:S+qXi7y+/Pgvqq4DrSmREGiFwtB7Bu6+QFLuIHYw/UE=
|
||||
github.com/gofiber/fiber/v2 v2.52.0/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/gofiber/fiber/v2 v2.50.0 h1:ia0JaB+uw3GpNSCR5nvC5dsaxXjRU5OEu36aytx+zGw=
|
||||
github.com/gofiber/fiber/v2 v2.50.0/go.mod h1:21eytvay9Is7S6z+OgPi7c7n4++tnClWmhpimVHMimw=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
|
||||
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
|
||||
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/nats-io/nats.go v1.32.0 h1:Bx9BZS+aXYlxW08k8Gd3yR2s73pV5XSoAQUyp1Kwvp0=
|
||||
github.com/nats-io/nats.go v1.32.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nats.go v1.31.0 h1:/WFBHEc/dOKBF6qf1TZhrdEfTmOZ5JzdJ+Y3m6Y/p7E=
|
||||
github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8=
|
||||
github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY=
|
||||
github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -107,19 +89,21 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
|
||||
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/segmentio/kafka-go v0.4.44 h1:Vjjksniy0WSTZ7CuVJrz1k04UoZeTc77UV6Yyk6tLY4=
|
||||
github.com/segmentio/kafka-go v0.4.44/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
|
||||
github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.52.0 h1:wqBQpxH71XW0e2g+Og4dzQM8pk34aFYlA1Ga8db7gU0=
|
||||
github.com/valyala/fasthttp v1.52.0/go.mod h1:hf5C4QnVMkNXMspnsUlfM3WitlgYflyhHYoKol/szxQ=
|
||||
github.com/valyala/fasthttp v1.50.0 h1:H7fweIlBm0rXLs2q0XbalvJ6r0CUPFWK3/bB4N13e9M=
|
||||
github.com/valyala/fasthttp v1.50.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9 h1:ZfmQR01Kk6/kQh6+zlqfBYszVY02fzf9xYrchOY4NFM=
|
||||
@@ -136,9 +120,8 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -146,9 +129,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -159,14 +141,13 @@ golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -179,17 +160,16 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -17,16 +17,16 @@ type prefResult struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func TestUpload(s *S3Conf, files int, objSize int64, bucket, prefix string) error {
|
||||
func TestUpload(s *S3Conf, files int, objSize int64, bucket, prefix string) (size int64, elapsed time.Duration, err error) {
|
||||
var sg sync.WaitGroup
|
||||
results := make([]prefResult, files)
|
||||
start := time.Now()
|
||||
if objSize == 0 {
|
||||
return fmt.Errorf("must specify object size for upload")
|
||||
return 0, time.Since(start), fmt.Errorf("must specify object size for upload")
|
||||
}
|
||||
|
||||
if objSize > (int64(10000) * s.PartSize) {
|
||||
return fmt.Errorf("object size can not exceed 10000 * chunksize")
|
||||
return 0, time.Since(start), fmt.Errorf("object size can not exceed 10000 * chunksize")
|
||||
}
|
||||
|
||||
runF("performance test: upload objects")
|
||||
@@ -45,13 +45,13 @@ func TestUpload(s *S3Conf, files int, objSize int64, bucket, prefix string) erro
|
||||
}(i)
|
||||
}
|
||||
sg.Wait()
|
||||
elapsed := time.Since(start)
|
||||
elapsed = time.Since(start)
|
||||
|
||||
var tot int64
|
||||
for i, res := range results {
|
||||
if res.err != nil {
|
||||
failF("%v: %v\n", i, res.err)
|
||||
break
|
||||
return 0, time.Since(start), res.err
|
||||
}
|
||||
tot += res.size
|
||||
fmt.Printf("%v: %v in %v (%v MB/s)\n",
|
||||
@@ -63,10 +63,10 @@ func TestUpload(s *S3Conf, files int, objSize int64, bucket, prefix string) erro
|
||||
passF("run upload: %v in %v (%v MB/s)\n",
|
||||
tot, elapsed, int(math.Ceil(float64(tot)/elapsed.Seconds())/1048576))
|
||||
|
||||
return nil
|
||||
return tot, time.Since(start), nil
|
||||
}
|
||||
|
||||
func TestDownload(s *S3Conf, files int, objSize int64, bucket, prefix string) error {
|
||||
func TestDownload(s *S3Conf, files int, objSize int64, bucket, prefix string) (size int64, elapsed time.Duration, err error) {
|
||||
var sg sync.WaitGroup
|
||||
results := make([]prefResult, files)
|
||||
start := time.Now()
|
||||
@@ -86,13 +86,13 @@ func TestDownload(s *S3Conf, files int, objSize int64, bucket, prefix string) er
|
||||
}(i)
|
||||
}
|
||||
sg.Wait()
|
||||
elapsed := time.Since(start)
|
||||
elapsed = time.Since(start)
|
||||
|
||||
var tot int64
|
||||
for i, res := range results {
|
||||
if res.err != nil {
|
||||
failF("%v: %v\n", i, res.err)
|
||||
break
|
||||
return 0, elapsed, err
|
||||
}
|
||||
tot += res.size
|
||||
fmt.Printf("%v: %v in %v (%v MB/s)\n",
|
||||
@@ -104,10 +104,10 @@ func TestDownload(s *S3Conf, files int, objSize int64, bucket, prefix string) er
|
||||
passF("run download: %v in %v (%v MB/s)\n",
|
||||
tot, elapsed, int(math.Ceil(float64(tot)/elapsed.Seconds())/1048576))
|
||||
|
||||
return nil
|
||||
return tot, elapsed, nil
|
||||
}
|
||||
|
||||
func TestReqPerSec(s *S3Conf, totalReqs int, bucket string) error {
|
||||
func TestReqPerSec(s *S3Conf, totalReqs int, bucket string) (time.Duration, int, error) {
|
||||
client := s3.NewFromConfig(s.Config())
|
||||
var wg sync.WaitGroup
|
||||
var resErr error
|
||||
@@ -132,11 +132,11 @@ func TestReqPerSec(s *S3Conf, totalReqs int, bucket string) error {
|
||||
wg.Wait()
|
||||
if resErr != nil {
|
||||
failF("performance test failed with error: %w", resErr)
|
||||
return nil
|
||||
return time.Since(startTime), 0, resErr
|
||||
}
|
||||
elapsedTime := time.Since(startTime)
|
||||
rps := int(float64(totalReqs) / elapsedTime.Seconds())
|
||||
|
||||
passF("Success\nTotal Requests: %d,\nConcurrency Level: %d,\nTime Taken: %s,\nRequests Per Second: %dreq/sec", totalReqs, s.Concurrency, elapsedTime, rps)
|
||||
return nil
|
||||
return elapsedTime, rps, nil
|
||||
}
|
||||
|
||||
@@ -22,36 +22,10 @@ func TestAuthentication(s *S3Conf) {
|
||||
Authentication_signature_error_incorrect_secret_key(s)
|
||||
}
|
||||
|
||||
func TestPresignedAuthentication(s *S3Conf) {
|
||||
PresignedAuth_missing_algo_query_param(s)
|
||||
PresignedAuth_unsupported_algorithm(s)
|
||||
PresignedAuth_missing_credentials_query_param(s)
|
||||
PresignedAuth_malformed_creds_invalid_parts(s)
|
||||
PresignedAuth_malformed_creds_invalid_parts(s)
|
||||
PresignedAuth_creds_incorrect_service(s)
|
||||
PresignedAuth_creds_incorrect_region(s)
|
||||
PresignedAuth_creds_invalid_date(s)
|
||||
PresignedAuth_missing_date_query(s)
|
||||
PresignedAuth_dates_mismatch(s)
|
||||
PresignedAuth_non_existing_access_key_id(s)
|
||||
PresignedAuth_missing_signed_headers_query_param(s)
|
||||
PresignedAuth_missing_expiration_query_param(s)
|
||||
PresignedAuth_invalid_expiration_query_param(s)
|
||||
PresignedAuth_negative_expiration_query_param(s)
|
||||
PresignedAuth_exceeding_expiration_query_param(s)
|
||||
PresignedAuth_expired_request(s)
|
||||
PresignedAuth_incorrect_secret_key(s)
|
||||
PresignedAuth_PutObject_success(s)
|
||||
PresignedAuth_Put_GetObject_with_data(s)
|
||||
PresignedAuth_UploadPart(s)
|
||||
}
|
||||
|
||||
func TestCreateBucket(s *S3Conf) {
|
||||
CreateBucket_invalid_bucket_name(s)
|
||||
CreateBucket_existing_bucket(s)
|
||||
CreateBucket_as_user(s)
|
||||
CreateBucket_default_acl(s)
|
||||
CreateBucket_non_default_acl(s)
|
||||
CreateDeleteBucket_success(s)
|
||||
}
|
||||
|
||||
@@ -72,29 +46,11 @@ func TestDeleteBucket(s *S3Conf) {
|
||||
DeleteBucket_success_status_code(s)
|
||||
}
|
||||
|
||||
func TestPutBucketTagging(s *S3Conf) {
|
||||
PutBucketTagging_non_existing_bucket(s)
|
||||
PutBucketTagging_long_tags(s)
|
||||
PutBucketTagging_success(s)
|
||||
}
|
||||
|
||||
func TestGetBucketTagging(s *S3Conf) {
|
||||
GetBucketTagging_non_existing_bucket(s)
|
||||
GetBucketTagging_success(s)
|
||||
}
|
||||
|
||||
func TestDeleteBucketTagging(s *S3Conf) {
|
||||
DeleteBucketTagging_non_existing_object(s)
|
||||
DeleteBucketTagging_success_status(s)
|
||||
DeleteBucketTagging_success(s)
|
||||
}
|
||||
|
||||
func TestPutObject(s *S3Conf) {
|
||||
PutObject_non_existing_bucket(s)
|
||||
PutObject_special_chars(s)
|
||||
PutObject_invalid_long_tags(s)
|
||||
PutObject_success(s)
|
||||
PutObject_invalid_credentials(s)
|
||||
}
|
||||
|
||||
func TestHeadObject(s *S3Conf) {
|
||||
@@ -121,13 +77,6 @@ func TestListObjects(s *S3Conf) {
|
||||
ListObjects_marker_not_from_obj_list(s)
|
||||
}
|
||||
|
||||
func TestListObjectsV2(s *S3Conf) {
|
||||
ListObjectsV2_start_after(s)
|
||||
ListObjectsV2_both_start_after_and_continuation_token(s)
|
||||
ListObjectsV2_start_after_not_in_list(s)
|
||||
ListObjectsV2_start_after_empty_result(s)
|
||||
}
|
||||
|
||||
func TestDeleteObject(s *S3Conf) {
|
||||
DeleteObject_non_existing_object(s)
|
||||
DeleteObject_success(s)
|
||||
@@ -243,19 +192,14 @@ func TestGetBucketAcl(s *S3Conf) {
|
||||
|
||||
func TestFullFlow(s *S3Conf) {
|
||||
TestAuthentication(s)
|
||||
TestPresignedAuthentication(s)
|
||||
TestCreateBucket(s)
|
||||
TestHeadBucket(s)
|
||||
TestListBuckets(s)
|
||||
TestDeleteBucket(s)
|
||||
TestPutBucketTagging(s)
|
||||
TestGetBucketTagging(s)
|
||||
TestDeleteBucketTagging(s)
|
||||
TestPutObject(s)
|
||||
TestHeadObject(s)
|
||||
TestGetObject(s)
|
||||
TestListObjects(s)
|
||||
TestListObjectsV2(s)
|
||||
TestDeleteObject(s)
|
||||
TestDeleteObjects(s)
|
||||
TestCopyObject(s)
|
||||
@@ -278,167 +222,3 @@ func TestPosix(s *S3Conf) {
|
||||
PutObject_dir_obj_with_data(s)
|
||||
CreateMultipartUpload_dir_obj(s)
|
||||
}
|
||||
|
||||
type IntTests map[string]func(s *S3Conf) error
|
||||
|
||||
func GetIntTests() IntTests {
|
||||
return IntTests{
|
||||
"Authentication_empty_auth_header": Authentication_empty_auth_header,
|
||||
"Authentication_invalid_auth_header": Authentication_invalid_auth_header,
|
||||
"Authentication_unsupported_signature_version": Authentication_unsupported_signature_version,
|
||||
"Authentication_malformed_credentials": Authentication_malformed_credentials,
|
||||
"Authentication_malformed_credentials_invalid_parts": Authentication_malformed_credentials_invalid_parts,
|
||||
"Authentication_credentials_terminated_string": Authentication_credentials_terminated_string,
|
||||
"Authentication_credentials_incorrect_service": Authentication_credentials_incorrect_service,
|
||||
"Authentication_credentials_incorrect_region": Authentication_credentials_incorrect_region,
|
||||
"Authentication_credentials_invalid_date": Authentication_credentials_invalid_date,
|
||||
"Authentication_credentials_future_date": Authentication_credentials_future_date,
|
||||
"Authentication_credentials_past_date": Authentication_credentials_past_date,
|
||||
"Authentication_credentials_non_existing_access_key": Authentication_credentials_non_existing_access_key,
|
||||
"Authentication_invalid_signed_headers": Authentication_invalid_signed_headers,
|
||||
"Authentication_missing_date_header": Authentication_missing_date_header,
|
||||
"Authentication_invalid_date_header": Authentication_invalid_date_header,
|
||||
"Authentication_date_mismatch": Authentication_date_mismatch,
|
||||
"Authentication_incorrect_payload_hash": Authentication_incorrect_payload_hash,
|
||||
"Authentication_incorrect_md5": Authentication_incorrect_md5,
|
||||
"Authentication_signature_error_incorrect_secret_key": Authentication_signature_error_incorrect_secret_key,
|
||||
"PresignedAuth_missing_algo_query_param": PresignedAuth_missing_algo_query_param,
|
||||
"PresignedAuth_unsupported_algorithm": PresignedAuth_unsupported_algorithm,
|
||||
"PresignedAuth_missing_credentials_query_param": PresignedAuth_missing_credentials_query_param,
|
||||
"PresignedAuth_malformed_creds_invalid_parts": PresignedAuth_malformed_creds_invalid_parts,
|
||||
"PresignedAuth_creds_invalid_terminator": PresignedAuth_creds_invalid_terminator,
|
||||
"PresignedAuth_creds_incorrect_service": PresignedAuth_creds_incorrect_service,
|
||||
"PresignedAuth_creds_incorrect_region": PresignedAuth_creds_incorrect_region,
|
||||
"PresignedAuth_creds_invalid_date": PresignedAuth_creds_invalid_date,
|
||||
"PresignedAuth_missing_date_query": PresignedAuth_missing_date_query,
|
||||
"PresignedAuth_dates_mismatch": PresignedAuth_dates_mismatch,
|
||||
"PresignedAuth_non_existing_access_key_id": PresignedAuth_non_existing_access_key_id,
|
||||
"PresignedAuth_missing_signed_headers_query_param": PresignedAuth_missing_signed_headers_query_param,
|
||||
"PresignedAuth_missing_expiration_query_param": PresignedAuth_missing_expiration_query_param,
|
||||
"PresignedAuth_invalid_expiration_query_param": PresignedAuth_invalid_expiration_query_param,
|
||||
"PresignedAuth_negative_expiration_query_param": PresignedAuth_negative_expiration_query_param,
|
||||
"PresignedAuth_exceeding_expiration_query_param": PresignedAuth_exceeding_expiration_query_param,
|
||||
"PresignedAuth_expired_request": PresignedAuth_expired_request,
|
||||
"PresignedAuth_incorrect_secret_key": PresignedAuth_incorrect_secret_key,
|
||||
"PresignedAuth_PutObject_success": PresignedAuth_PutObject_success,
|
||||
"PresignedAuth_Put_GetObject_with_data": PresignedAuth_Put_GetObject_with_data,
|
||||
"PresignedAuth_UploadPart": PresignedAuth_UploadPart,
|
||||
"CreateBucket_invalid_bucket_name": CreateBucket_invalid_bucket_name,
|
||||
"CreateBucket_existing_bucket": CreateBucket_existing_bucket,
|
||||
"CreateBucket_as_user": CreateBucket_as_user,
|
||||
"CreateDeleteBucket_success": CreateDeleteBucket_success,
|
||||
"CreateBucket_default_acl": CreateBucket_default_acl,
|
||||
"CreateBucket_non_default_acl": CreateBucket_non_default_acl,
|
||||
"HeadBucket_non_existing_bucket": HeadBucket_non_existing_bucket,
|
||||
"HeadBucket_success": HeadBucket_success,
|
||||
"ListBuckets_as_user": ListBuckets_as_user,
|
||||
"ListBuckets_as_admin": ListBuckets_as_admin,
|
||||
"ListBuckets_success": ListBuckets_success,
|
||||
"DeleteBucket_non_existing_bucket": DeleteBucket_non_existing_bucket,
|
||||
"DeleteBucket_non_empty_bucket": DeleteBucket_non_empty_bucket,
|
||||
"DeleteBucket_success_status_code": DeleteBucket_success_status_code,
|
||||
"PutBucketTagging_non_existing_bucket": PutBucketTagging_non_existing_bucket,
|
||||
"PutBucketTagging_long_tags": PutBucketTagging_long_tags,
|
||||
"PutBucketTagging_success": PutBucketTagging_success,
|
||||
"GetBucketTagging_non_existing_bucket": GetBucketTagging_non_existing_bucket,
|
||||
"GetBucketTagging_success": GetBucketTagging_success,
|
||||
"DeleteBucketTagging_non_existing_object": DeleteBucketTagging_non_existing_object,
|
||||
"DeleteBucketTagging_success_status": DeleteBucketTagging_success_status,
|
||||
"DeleteBucketTagging_success": DeleteBucketTagging_success,
|
||||
"PutObject_non_existing_bucket": PutObject_non_existing_bucket,
|
||||
"PutObject_special_chars": PutObject_special_chars,
|
||||
"PutObject_invalid_long_tags": PutObject_invalid_long_tags,
|
||||
"PutObject_success": PutObject_success,
|
||||
"HeadObject_non_existing_object": HeadObject_non_existing_object,
|
||||
"HeadObject_success": HeadObject_success,
|
||||
"GetObject_non_existing_key": GetObject_non_existing_key,
|
||||
"GetObject_invalid_ranges": GetObject_invalid_ranges,
|
||||
"GetObject_with_meta": GetObject_with_meta,
|
||||
"GetObject_success": GetObject_success,
|
||||
"GetObject_by_range_success": GetObject_by_range_success,
|
||||
"ListObjects_non_existing_bucket": ListObjects_non_existing_bucket,
|
||||
"ListObjects_with_prefix": ListObjects_with_prefix,
|
||||
"ListObject_truncated": ListObject_truncated,
|
||||
"ListObjects_invalid_max_keys": ListObjects_invalid_max_keys,
|
||||
"ListObjects_max_keys_0": ListObjects_max_keys_0,
|
||||
"ListObjects_delimiter": ListObjects_delimiter,
|
||||
"ListObjects_max_keys_none": ListObjects_max_keys_none,
|
||||
"ListObjects_marker_not_from_obj_list": ListObjects_marker_not_from_obj_list,
|
||||
"ListObjectsV2_start_after": ListObjectsV2_start_after,
|
||||
"ListObjectsV2_both_start_after_and_continuation_token": ListObjectsV2_both_start_after_and_continuation_token,
|
||||
"ListObjectsV2_start_after_not_in_list": ListObjectsV2_start_after_not_in_list,
|
||||
"ListObjectsV2_start_after_empty_result": ListObjectsV2_start_after_empty_result,
|
||||
"DeleteObject_non_existing_object": DeleteObject_non_existing_object,
|
||||
"DeleteObject_success": DeleteObject_success,
|
||||
"DeleteObject_success_status_code": DeleteObject_success_status_code,
|
||||
"DeleteObjects_empty_input": DeleteObjects_empty_input,
|
||||
"DeleteObjects_non_existing_objects": DeleteObjects_non_existing_objects,
|
||||
"DeleteObjects_success": DeleteObjects_success,
|
||||
"CopyObject_non_existing_dst_bucket": CopyObject_non_existing_dst_bucket,
|
||||
"CopyObject_not_owned_source_bucket": CopyObject_not_owned_source_bucket,
|
||||
"CopyObject_copy_to_itself": CopyObject_copy_to_itself,
|
||||
"CopyObject_to_itself_with_new_metadata": CopyObject_to_itself_with_new_metadata,
|
||||
"CopyObject_success": CopyObject_success,
|
||||
"PutObjectTagging_non_existing_object": PutObjectTagging_non_existing_object,
|
||||
"PutObjectTagging_long_tags": PutObjectTagging_long_tags,
|
||||
"PutObjectTagging_success": PutObjectTagging_success,
|
||||
"GetObjectTagging_non_existing_object": GetObjectTagging_non_existing_object,
|
||||
"GetObjectTagging_success": GetObjectTagging_success,
|
||||
"DeleteObjectTagging_non_existing_object": DeleteObjectTagging_non_existing_object,
|
||||
"DeleteObjectTagging_success_status": DeleteObjectTagging_success_status,
|
||||
"DeleteObjectTagging_success": DeleteObjectTagging_success,
|
||||
"CreateMultipartUpload_non_existing_bucket": CreateMultipartUpload_non_existing_bucket,
|
||||
"CreateMultipartUpload_success": CreateMultipartUpload_success,
|
||||
"UploadPart_non_existing_bucket": UploadPart_non_existing_bucket,
|
||||
"UploadPart_invalid_part_number": UploadPart_invalid_part_number,
|
||||
"UploadPart_non_existing_key": UploadPart_non_existing_key,
|
||||
"UploadPart_non_existing_mp_upload": UploadPart_non_existing_mp_upload,
|
||||
"UploadPart_success": UploadPart_success,
|
||||
"UploadPartCopy_non_existing_bucket": UploadPartCopy_non_existing_bucket,
|
||||
"UploadPartCopy_incorrect_uploadId": UploadPartCopy_incorrect_uploadId,
|
||||
"UploadPartCopy_incorrect_object_key": UploadPartCopy_incorrect_object_key,
|
||||
"UploadPartCopy_invalid_part_number": UploadPartCopy_invalid_part_number,
|
||||
"UploadPartCopy_invalid_copy_source": UploadPartCopy_invalid_copy_source,
|
||||
"UploadPartCopy_non_existing_source_bucket": UploadPartCopy_non_existing_source_bucket,
|
||||
"UploadPartCopy_non_existing_source_object_key": UploadPartCopy_non_existing_source_object_key,
|
||||
"UploadPartCopy_success": UploadPartCopy_success,
|
||||
"UploadPartCopy_by_range_invalid_range": UploadPartCopy_by_range_invalid_range,
|
||||
"UploadPartCopy_greater_range_than_obj_size": UploadPartCopy_greater_range_than_obj_size,
|
||||
"UploadPartCopy_by_range_success": UploadPartCopy_by_range_success,
|
||||
"ListParts_incorrect_uploadId": ListParts_incorrect_uploadId,
|
||||
"ListParts_incorrect_object_key": ListParts_incorrect_object_key,
|
||||
"ListParts_success": ListParts_success,
|
||||
"ListMultipartUploads_non_existing_bucket": ListMultipartUploads_non_existing_bucket,
|
||||
"ListMultipartUploads_empty_result": ListMultipartUploads_empty_result,
|
||||
"ListMultipartUploads_invalid_max_uploads": ListMultipartUploads_invalid_max_uploads,
|
||||
"ListMultipartUploads_max_uploads": ListMultipartUploads_max_uploads,
|
||||
"ListMultipartUploads_incorrect_next_key_marker": ListMultipartUploads_incorrect_next_key_marker,
|
||||
"ListMultipartUploads_ignore_upload_id_marker": ListMultipartUploads_ignore_upload_id_marker,
|
||||
"ListMultipartUploads_success": ListMultipartUploads_success,
|
||||
"AbortMultipartUpload_non_existing_bucket": AbortMultipartUpload_non_existing_bucket,
|
||||
"AbortMultipartUpload_incorrect_uploadId": AbortMultipartUpload_incorrect_uploadId,
|
||||
"AbortMultipartUpload_incorrect_object_key": AbortMultipartUpload_incorrect_object_key,
|
||||
"AbortMultipartUpload_success": AbortMultipartUpload_success,
|
||||
"AbortMultipartUpload_success_status_code": AbortMultipartUpload_success_status_code,
|
||||
"CompletedMultipartUpload_non_existing_bucket": CompletedMultipartUpload_non_existing_bucket,
|
||||
"CompleteMultipartUpload_invalid_part_number": CompleteMultipartUpload_invalid_part_number,
|
||||
"CompleteMultipartUpload_invalid_ETag": CompleteMultipartUpload_invalid_ETag,
|
||||
"CompleteMultipartUpload_success": CompleteMultipartUpload_success,
|
||||
"PutBucketAcl_non_existing_bucket": PutBucketAcl_non_existing_bucket,
|
||||
"PutBucketAcl_invalid_acl_canned_and_acp": PutBucketAcl_invalid_acl_canned_and_acp,
|
||||
"PutBucketAcl_invalid_acl_canned_and_grants": PutBucketAcl_invalid_acl_canned_and_grants,
|
||||
"PutBucketAcl_invalid_acl_acp_and_grants": PutBucketAcl_invalid_acl_acp_and_grants,
|
||||
"PutBucketAcl_invalid_owner": PutBucketAcl_invalid_owner,
|
||||
"PutBucketAcl_success_access_denied": PutBucketAcl_success_access_denied,
|
||||
"PutBucketAcl_success_grants": PutBucketAcl_success_grants,
|
||||
"PutBucketAcl_success_canned_acl": PutBucketAcl_success_canned_acl,
|
||||
"PutBucketAcl_success_acp": PutBucketAcl_success_acp,
|
||||
"GetBucketAcl_non_existing_bucket": GetBucketAcl_non_existing_bucket,
|
||||
"GetBucketAcl_access_denied": GetBucketAcl_access_denied,
|
||||
"GetBucketAcl_success": GetBucketAcl_success,
|
||||
"PutObject_overwrite_dir_obj": PutObject_overwrite_dir_obj,
|
||||
"PutObject_overwrite_file_obj": PutObject_overwrite_file_obj,
|
||||
"PutObject_dir_obj_with_data": PutObject_dir_obj_with_data,
|
||||
"CreateMultipartUpload_dir_obj": CreateMultipartUpload_dir_obj,
|
||||
}
|
||||
}
|
||||
|
||||
2048
integration/tests.go
2048
integration/tests.go
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,6 @@ import (
|
||||
"io"
|
||||
rnd "math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -61,7 +60,7 @@ func teardown(s *S3Conf, bucket string) error {
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete object %v: %w", *key, err)
|
||||
return fmt.Errorf("failed to delete object %v: %v", *key, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -72,7 +71,7 @@ func teardown(s *S3Conf, bucket string) error {
|
||||
out, err := s3client.ListObjectsV2(ctx, in)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list objects: %w", err)
|
||||
return fmt.Errorf("failed to list objects: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range out.Contents {
|
||||
@@ -82,7 +81,7 @@ func teardown(s *S3Conf, bucket string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if out.IsTruncated != nil && *out.IsTruncated {
|
||||
if out.IsTruncated {
|
||||
in.ContinuationToken = out.ContinuationToken
|
||||
} else {
|
||||
break
|
||||
@@ -97,32 +96,31 @@ func teardown(s *S3Conf, bucket string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func actionHandler(s *S3Conf, testName string, handler func(s3client *s3.Client, bucket string) error) error {
|
||||
func actionHandler(s *S3Conf, testName string, handler func(s3client *s3.Client, bucket string) error) {
|
||||
runF(testName)
|
||||
bucketName := getBucketName()
|
||||
err := setup(s, bucketName)
|
||||
if err != nil {
|
||||
failF("%v: failed to create a bucket: %v", testName, err)
|
||||
return fmt.Errorf("%v: failed to create a bucket: %w", testName, err)
|
||||
failF("%v: failed to create a bucket: %v", testName, err.Error())
|
||||
return
|
||||
}
|
||||
client := s3.NewFromConfig(s.Config())
|
||||
handlerErr := handler(client, bucketName)
|
||||
if handlerErr != nil {
|
||||
failF("%v: %v", testName, handlerErr)
|
||||
failF("%v: %v", testName, handlerErr.Error())
|
||||
}
|
||||
|
||||
err = teardown(s, bucketName)
|
||||
if err != nil {
|
||||
fmt.Printf(colorRed+"%v: failed to delete the bucket: %v", testName, err)
|
||||
if handlerErr == nil {
|
||||
return fmt.Errorf("%v: failed to delete the bucket: %w", testName, err)
|
||||
failF("%v: failed to delete the bucket: %v", testName, err.Error())
|
||||
} else {
|
||||
fmt.Printf(colorRed+"%v: failed to delete the bucket: %v", testName, err.Error())
|
||||
}
|
||||
}
|
||||
if handlerErr == nil {
|
||||
passF(testName)
|
||||
}
|
||||
|
||||
return handlerErr
|
||||
}
|
||||
|
||||
type authConfig struct {
|
||||
@@ -134,35 +132,20 @@ type authConfig struct {
|
||||
date time.Time
|
||||
}
|
||||
|
||||
func authHandler(s *S3Conf, cfg *authConfig, handler func(req *http.Request) error) error {
|
||||
func authHandler(s *S3Conf, cfg *authConfig, handler func(req *http.Request) error) {
|
||||
runF(cfg.testName)
|
||||
req, err := createSignedReq(cfg.method, s.endpoint, cfg.path, s.awsID, s.awsSecret, cfg.service, s.awsRegion, cfg.body, cfg.date)
|
||||
if err != nil {
|
||||
failF("%v: %v", cfg.testName, err)
|
||||
return fmt.Errorf("%v: %w", cfg.testName, err)
|
||||
failF("%v: %v", cfg.testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = handler(req)
|
||||
if err != nil {
|
||||
failF("%v: %v", cfg.testName, err)
|
||||
return fmt.Errorf("%v: %w", cfg.testName, err)
|
||||
failF("%v: %v", cfg.testName, err.Error())
|
||||
return
|
||||
}
|
||||
passF(cfg.testName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func presignedAuthHandler(s *S3Conf, testName string, handler func(client *s3.PresignClient) error) error {
|
||||
runF(testName)
|
||||
clt := s3.NewPresignClient(s3.NewFromConfig(s.Config()))
|
||||
|
||||
err := handler(clt)
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
passF(testName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func createSignedReq(method, endpoint, path, access, secret, service, region string, body []byte, date time.Time) (*http.Request, error) {
|
||||
@@ -222,15 +205,16 @@ func checkApiErr(err error, apiErr s3err.APIError) error {
|
||||
}
|
||||
|
||||
return fmt.Errorf("expected %v, instead got %v", apiErr.Code, ae.ErrorCode())
|
||||
} else {
|
||||
return fmt.Errorf("expected aws api error, instead got: %v", err.Error())
|
||||
}
|
||||
return fmt.Errorf("expected aws api error, instead got: %w", err)
|
||||
}
|
||||
|
||||
func checkSdkApiErr(err error, code string) error {
|
||||
var ae smithy.APIError
|
||||
if errors.As(err, &ae) {
|
||||
if ae.ErrorCode() != code {
|
||||
return fmt.Errorf("expected %v, instead got %v", code, ae.ErrorCode())
|
||||
return fmt.Errorf("expected %v, instead got %v", ae.ErrorCode(), code)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -309,7 +293,7 @@ func compareParts(parts1, parts2 []types.Part) bool {
|
||||
}
|
||||
|
||||
for i, prt := range parts1 {
|
||||
if *prt.PartNumber != *parts2[i].PartNumber {
|
||||
if prt.PartNumber != parts2[i].PartNumber {
|
||||
return false
|
||||
}
|
||||
if *prt.ETag != *parts2[i].ETag {
|
||||
@@ -500,23 +484,20 @@ func uploadParts(client *s3.Client, size, partCount int, bucket, key, uploadId s
|
||||
return parts, err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
pn := int32(partNumber)
|
||||
out, err := client.UploadPart(ctx, &s3.UploadPartInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
UploadId: &uploadId,
|
||||
Body: bytes.NewReader(partBuffer),
|
||||
PartNumber: &pn,
|
||||
PartNumber: int32(partNumber),
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return parts, err
|
||||
} else {
|
||||
parts = append(parts, types.Part{ETag: out.ETag, PartNumber: int32(partNumber)})
|
||||
offset += partSize
|
||||
}
|
||||
parts = append(parts, types.Part{
|
||||
ETag: out.ETag,
|
||||
PartNumber: &pn,
|
||||
})
|
||||
offset += partSize
|
||||
}
|
||||
|
||||
return parts, err
|
||||
@@ -566,26 +547,3 @@ func genRandString(length int) string {
|
||||
}
|
||||
return string(result)
|
||||
}
|
||||
|
||||
const (
|
||||
credAccess int = iota
|
||||
credDate
|
||||
credRegion
|
||||
credService
|
||||
credTerminator
|
||||
)
|
||||
|
||||
func changeAuthCred(uri, newVal string, index int) (string, error) {
|
||||
urlParsed, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
queries := urlParsed.Query()
|
||||
creds := strings.Split(queries.Get("X-Amz-Credential"), "/")
|
||||
creds[index] = newVal
|
||||
queries.Set("X-Amz-Credential", strings.Join(creds, "/"))
|
||||
urlParsed.RawQuery = queries.Encode()
|
||||
|
||||
return urlParsed.String(), nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# make temp dirs
|
||||
rm -rf /tmp/gw
|
||||
mkdir /tmp/gw
|
||||
rm -rf /tmp/covdata
|
||||
mkdir /tmp/covdata
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/versity/versitygw/backend"
|
||||
@@ -35,7 +34,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
// panic("mock out the CopyObject method")
|
||||
// },
|
||||
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
|
||||
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput) error {
|
||||
// panic("mock out the CreateBucket method")
|
||||
// },
|
||||
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
@@ -44,9 +43,6 @@ var _ backend.Backend = &BackendMock{}
|
||||
// DeleteBucketFunc: func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error {
|
||||
// panic("mock out the DeleteBucket method")
|
||||
// },
|
||||
// DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
// panic("mock out the DeleteBucketTagging method")
|
||||
// },
|
||||
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error {
|
||||
// panic("mock out the DeleteObject method")
|
||||
// },
|
||||
@@ -59,9 +55,6 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetBucketAclFunc: func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error) {
|
||||
// panic("mock out the GetBucketAcl method")
|
||||
// },
|
||||
// GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
|
||||
// panic("mock out the GetBucketTagging method")
|
||||
// },
|
||||
// GetObjectFunc: func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
// panic("mock out the GetObject method")
|
||||
// },
|
||||
@@ -101,9 +94,6 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutBucketAclFunc: func(contextMoqParam context.Context, bucket string, data []byte) error {
|
||||
// panic("mock out the PutBucketAcl method")
|
||||
// },
|
||||
// PutBucketTaggingFunc: func(contextMoqParam context.Context, bucket string, tags map[string]string) error {
|
||||
// panic("mock out the PutBucketTagging method")
|
||||
// },
|
||||
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
// panic("mock out the PutObject method")
|
||||
// },
|
||||
@@ -116,7 +106,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// RestoreObjectFunc: func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error {
|
||||
// panic("mock out the RestoreObject method")
|
||||
// },
|
||||
// SelectObjectContentFunc: func(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer) {
|
||||
// SelectObjectContentFunc: func(contextMoqParam context.Context, selectObjectContentInput *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error) {
|
||||
// panic("mock out the SelectObjectContent method")
|
||||
// },
|
||||
// ShutdownFunc: func() {
|
||||
@@ -151,7 +141,7 @@ type BackendMock struct {
|
||||
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
|
||||
// CreateBucketFunc mocks the CreateBucket method.
|
||||
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error
|
||||
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput) error
|
||||
|
||||
// CreateMultipartUploadFunc mocks the CreateMultipartUpload method.
|
||||
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
@@ -159,9 +149,6 @@ type BackendMock struct {
|
||||
// DeleteBucketFunc mocks the DeleteBucket method.
|
||||
DeleteBucketFunc func(contextMoqParam context.Context, deleteBucketInput *s3.DeleteBucketInput) error
|
||||
|
||||
// DeleteBucketTaggingFunc mocks the DeleteBucketTagging method.
|
||||
DeleteBucketTaggingFunc func(contextMoqParam context.Context, bucket string) error
|
||||
|
||||
// DeleteObjectFunc mocks the DeleteObject method.
|
||||
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error
|
||||
|
||||
@@ -174,9 +161,6 @@ type BackendMock struct {
|
||||
// GetBucketAclFunc mocks the GetBucketAcl method.
|
||||
GetBucketAclFunc func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error)
|
||||
|
||||
// GetBucketTaggingFunc mocks the GetBucketTagging method.
|
||||
GetBucketTaggingFunc func(contextMoqParam context.Context, bucket string) (map[string]string, error)
|
||||
|
||||
// GetObjectFunc mocks the GetObject method.
|
||||
GetObjectFunc func(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error)
|
||||
|
||||
@@ -216,9 +200,6 @@ type BackendMock struct {
|
||||
// PutBucketAclFunc mocks the PutBucketAcl method.
|
||||
PutBucketAclFunc func(contextMoqParam context.Context, bucket string, data []byte) error
|
||||
|
||||
// PutBucketTaggingFunc mocks the PutBucketTagging method.
|
||||
PutBucketTaggingFunc func(contextMoqParam context.Context, bucket string, tags map[string]string) error
|
||||
|
||||
// PutObjectFunc mocks the PutObject method.
|
||||
PutObjectFunc func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error)
|
||||
|
||||
@@ -232,7 +213,7 @@ type BackendMock struct {
|
||||
RestoreObjectFunc func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error
|
||||
|
||||
// SelectObjectContentFunc mocks the SelectObjectContent method.
|
||||
SelectObjectContentFunc func(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer)
|
||||
SelectObjectContentFunc func(contextMoqParam context.Context, selectObjectContentInput *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error)
|
||||
|
||||
// ShutdownFunc mocks the Shutdown method.
|
||||
ShutdownFunc func()
|
||||
@@ -284,8 +265,6 @@ type BackendMock struct {
|
||||
ContextMoqParam context.Context
|
||||
// CreateBucketInput is the createBucketInput argument value.
|
||||
CreateBucketInput *s3.CreateBucketInput
|
||||
// DefaultACL is the defaultACL argument value.
|
||||
DefaultACL []byte
|
||||
}
|
||||
// CreateMultipartUpload holds details about calls to the CreateMultipartUpload method.
|
||||
CreateMultipartUpload []struct {
|
||||
@@ -301,13 +280,6 @@ type BackendMock struct {
|
||||
// DeleteBucketInput is the deleteBucketInput argument value.
|
||||
DeleteBucketInput *s3.DeleteBucketInput
|
||||
}
|
||||
// DeleteBucketTagging holds details about calls to the DeleteBucketTagging method.
|
||||
DeleteBucketTagging []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// DeleteObject holds details about calls to the DeleteObject method.
|
||||
DeleteObject []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -338,13 +310,6 @@ type BackendMock struct {
|
||||
// GetBucketAclInput is the getBucketAclInput argument value.
|
||||
GetBucketAclInput *s3.GetBucketAclInput
|
||||
}
|
||||
// GetBucketTagging holds details about calls to the GetBucketTagging method.
|
||||
GetBucketTagging []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// GetObject holds details about calls to the GetObject method.
|
||||
GetObject []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -442,15 +407,6 @@ type BackendMock struct {
|
||||
// Data is the data argument value.
|
||||
Data []byte
|
||||
}
|
||||
// PutBucketTagging holds details about calls to the PutBucketTagging method.
|
||||
PutBucketTagging []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Tags is the tags argument value.
|
||||
Tags map[string]string
|
||||
}
|
||||
// PutObject holds details about calls to the PutObject method.
|
||||
PutObject []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -485,10 +441,10 @@ type BackendMock struct {
|
||||
}
|
||||
// SelectObjectContent holds details about calls to the SelectObjectContent method.
|
||||
SelectObjectContent []struct {
|
||||
// Ctx is the ctx argument value.
|
||||
Ctx context.Context
|
||||
// Input is the input argument value.
|
||||
Input *s3.SelectObjectContentInput
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// SelectObjectContentInput is the selectObjectContentInput argument value.
|
||||
SelectObjectContentInput *s3.SelectObjectContentInput
|
||||
}
|
||||
// Shutdown holds details about calls to the Shutdown method.
|
||||
Shutdown []struct {
|
||||
@@ -518,12 +474,10 @@ type BackendMock struct {
|
||||
lockCreateBucket sync.RWMutex
|
||||
lockCreateMultipartUpload sync.RWMutex
|
||||
lockDeleteBucket sync.RWMutex
|
||||
lockDeleteBucketTagging sync.RWMutex
|
||||
lockDeleteObject sync.RWMutex
|
||||
lockDeleteObjectTagging sync.RWMutex
|
||||
lockDeleteObjects sync.RWMutex
|
||||
lockGetBucketAcl sync.RWMutex
|
||||
lockGetBucketTagging sync.RWMutex
|
||||
lockGetObject sync.RWMutex
|
||||
lockGetObjectAcl sync.RWMutex
|
||||
lockGetObjectAttributes sync.RWMutex
|
||||
@@ -537,7 +491,6 @@ type BackendMock struct {
|
||||
lockListObjectsV2 sync.RWMutex
|
||||
lockListParts sync.RWMutex
|
||||
lockPutBucketAcl sync.RWMutex
|
||||
lockPutBucketTagging sync.RWMutex
|
||||
lockPutObject sync.RWMutex
|
||||
lockPutObjectAcl sync.RWMutex
|
||||
lockPutObjectTagging sync.RWMutex
|
||||
@@ -698,23 +651,21 @@ func (mock *BackendMock) CopyObjectCalls() []struct {
|
||||
}
|
||||
|
||||
// CreateBucket calls CreateBucketFunc.
|
||||
func (mock *BackendMock) CreateBucket(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
|
||||
func (mock *BackendMock) CreateBucket(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput) error {
|
||||
if mock.CreateBucketFunc == nil {
|
||||
panic("BackendMock.CreateBucketFunc: method is nil but Backend.CreateBucket was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
CreateBucketInput *s3.CreateBucketInput
|
||||
DefaultACL []byte
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
CreateBucketInput: createBucketInput,
|
||||
DefaultACL: defaultACL,
|
||||
}
|
||||
mock.lockCreateBucket.Lock()
|
||||
mock.calls.CreateBucket = append(mock.calls.CreateBucket, callInfo)
|
||||
mock.lockCreateBucket.Unlock()
|
||||
return mock.CreateBucketFunc(contextMoqParam, createBucketInput, defaultACL)
|
||||
return mock.CreateBucketFunc(contextMoqParam, createBucketInput)
|
||||
}
|
||||
|
||||
// CreateBucketCalls gets all the calls that were made to CreateBucket.
|
||||
@@ -724,12 +675,10 @@ func (mock *BackendMock) CreateBucket(contextMoqParam context.Context, createBuc
|
||||
func (mock *BackendMock) CreateBucketCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
CreateBucketInput *s3.CreateBucketInput
|
||||
DefaultACL []byte
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
CreateBucketInput *s3.CreateBucketInput
|
||||
DefaultACL []byte
|
||||
}
|
||||
mock.lockCreateBucket.RLock()
|
||||
calls = mock.calls.CreateBucket
|
||||
@@ -809,42 +758,6 @@ func (mock *BackendMock) DeleteBucketCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteBucketTagging calls DeleteBucketTaggingFunc.
|
||||
func (mock *BackendMock) DeleteBucketTagging(contextMoqParam context.Context, bucket string) error {
|
||||
if mock.DeleteBucketTaggingFunc == nil {
|
||||
panic("BackendMock.DeleteBucketTaggingFunc: method is nil but Backend.DeleteBucketTagging was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
}
|
||||
mock.lockDeleteBucketTagging.Lock()
|
||||
mock.calls.DeleteBucketTagging = append(mock.calls.DeleteBucketTagging, callInfo)
|
||||
mock.lockDeleteBucketTagging.Unlock()
|
||||
return mock.DeleteBucketTaggingFunc(contextMoqParam, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketTaggingCalls gets all the calls that were made to DeleteBucketTagging.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.DeleteBucketTaggingCalls())
|
||||
func (mock *BackendMock) DeleteBucketTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}
|
||||
mock.lockDeleteBucketTagging.RLock()
|
||||
calls = mock.calls.DeleteBucketTagging
|
||||
mock.lockDeleteBucketTagging.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteObject calls DeleteObjectFunc.
|
||||
func (mock *BackendMock) DeleteObject(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error {
|
||||
if mock.DeleteObjectFunc == nil {
|
||||
@@ -993,42 +906,6 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetBucketTagging calls GetBucketTaggingFunc.
|
||||
func (mock *BackendMock) GetBucketTagging(contextMoqParam context.Context, bucket string) (map[string]string, error) {
|
||||
if mock.GetBucketTaggingFunc == nil {
|
||||
panic("BackendMock.GetBucketTaggingFunc: method is nil but Backend.GetBucketTagging was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
}
|
||||
mock.lockGetBucketTagging.Lock()
|
||||
mock.calls.GetBucketTagging = append(mock.calls.GetBucketTagging, callInfo)
|
||||
mock.lockGetBucketTagging.Unlock()
|
||||
return mock.GetBucketTaggingFunc(contextMoqParam, bucket)
|
||||
}
|
||||
|
||||
// GetBucketTaggingCalls gets all the calls that were made to GetBucketTagging.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.GetBucketTaggingCalls())
|
||||
func (mock *BackendMock) GetBucketTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
}
|
||||
mock.lockGetBucketTagging.RLock()
|
||||
calls = mock.calls.GetBucketTagging
|
||||
mock.lockGetBucketTagging.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetObject calls GetObjectFunc.
|
||||
func (mock *BackendMock) GetObject(contextMoqParam context.Context, getObjectInput *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
if mock.GetObjectFunc == nil {
|
||||
@@ -1509,46 +1386,6 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// PutBucketTagging calls PutBucketTaggingFunc.
|
||||
func (mock *BackendMock) PutBucketTagging(contextMoqParam context.Context, bucket string, tags map[string]string) error {
|
||||
if mock.PutBucketTaggingFunc == nil {
|
||||
panic("BackendMock.PutBucketTaggingFunc: method is nil but Backend.PutBucketTagging was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Tags map[string]string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Tags: tags,
|
||||
}
|
||||
mock.lockPutBucketTagging.Lock()
|
||||
mock.calls.PutBucketTagging = append(mock.calls.PutBucketTagging, callInfo)
|
||||
mock.lockPutBucketTagging.Unlock()
|
||||
return mock.PutBucketTaggingFunc(contextMoqParam, bucket, tags)
|
||||
}
|
||||
|
||||
// PutBucketTaggingCalls gets all the calls that were made to PutBucketTagging.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.PutBucketTaggingCalls())
|
||||
func (mock *BackendMock) PutBucketTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Tags map[string]string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Tags map[string]string
|
||||
}
|
||||
mock.lockPutBucketTagging.RLock()
|
||||
calls = mock.calls.PutBucketTagging
|
||||
mock.lockPutBucketTagging.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// PutObject calls PutObjectFunc.
|
||||
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
if mock.PutObjectFunc == nil {
|
||||
@@ -1702,21 +1539,21 @@ func (mock *BackendMock) RestoreObjectCalls() []struct {
|
||||
}
|
||||
|
||||
// SelectObjectContent calls SelectObjectContentFunc.
|
||||
func (mock *BackendMock) SelectObjectContent(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer) {
|
||||
func (mock *BackendMock) SelectObjectContent(contextMoqParam context.Context, selectObjectContentInput *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error) {
|
||||
if mock.SelectObjectContentFunc == nil {
|
||||
panic("BackendMock.SelectObjectContentFunc: method is nil but Backend.SelectObjectContent was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Ctx context.Context
|
||||
Input *s3.SelectObjectContentInput
|
||||
ContextMoqParam context.Context
|
||||
SelectObjectContentInput *s3.SelectObjectContentInput
|
||||
}{
|
||||
Ctx: ctx,
|
||||
Input: input,
|
||||
ContextMoqParam: contextMoqParam,
|
||||
SelectObjectContentInput: selectObjectContentInput,
|
||||
}
|
||||
mock.lockSelectObjectContent.Lock()
|
||||
mock.calls.SelectObjectContent = append(mock.calls.SelectObjectContent, callInfo)
|
||||
mock.lockSelectObjectContent.Unlock()
|
||||
return mock.SelectObjectContentFunc(ctx, input)
|
||||
return mock.SelectObjectContentFunc(contextMoqParam, selectObjectContentInput)
|
||||
}
|
||||
|
||||
// SelectObjectContentCalls gets all the calls that were made to SelectObjectContent.
|
||||
@@ -1724,12 +1561,12 @@ func (mock *BackendMock) SelectObjectContent(ctx context.Context, input *s3.Sele
|
||||
//
|
||||
// len(mockedBackend.SelectObjectContentCalls())
|
||||
func (mock *BackendMock) SelectObjectContentCalls() []struct {
|
||||
Ctx context.Context
|
||||
Input *s3.SelectObjectContentInput
|
||||
ContextMoqParam context.Context
|
||||
SelectObjectContentInput *s3.SelectObjectContentInput
|
||||
} {
|
||||
var calls []struct {
|
||||
Ctx context.Context
|
||||
Input *s3.SelectObjectContentInput
|
||||
ContextMoqParam context.Context
|
||||
SelectObjectContentInput *s3.SelectObjectContentInput
|
||||
}
|
||||
mock.lockSelectObjectContent.RLock()
|
||||
calls = mock.calls.SelectObjectContent
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,7 +15,6 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -175,7 +174,6 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
app := fiber.New()
|
||||
contentLength := int64(1000)
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
@@ -196,7 +194,7 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
ContentType: getPtr("application/xml"),
|
||||
ContentEncoding: getPtr("gzip"),
|
||||
ETag: getPtr("98sda7f97sa9df798sd79f8as9df"),
|
||||
ContentLength: &contentLength,
|
||||
ContentLength: 1000,
|
||||
LastModified: &now,
|
||||
StorageClass: "storage class",
|
||||
}, nil
|
||||
@@ -343,9 +341,6 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
ListObjectsFunc: func(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
return &s3.ListObjectsOutput{}, nil
|
||||
},
|
||||
GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -368,9 +363,6 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
ListObjectsFunc: func(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
GetBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
},
|
||||
},
|
||||
}
|
||||
appError := fiber.New()
|
||||
@@ -390,24 +382,6 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Get-bucket-tagging-non-existing-bucket",
|
||||
app: appError,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodGet, "/my-bucket?tagging", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 404,
|
||||
},
|
||||
{
|
||||
name: "Get-bucket-tagging-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodGet, "/my-bucket?tagging", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Get-bucket-acl-success",
|
||||
app: app,
|
||||
@@ -516,17 +490,6 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
</AccessControlPolicy>
|
||||
`
|
||||
|
||||
tagBody := `
|
||||
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<TagSet>
|
||||
<Tag>
|
||||
<Key>organization</Key>
|
||||
<Value>marketing</Value>
|
||||
</Tag>
|
||||
</TagSet>
|
||||
</Tagging>
|
||||
`
|
||||
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
@@ -535,10 +498,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
PutBucketAclFunc: func(context.Context, string, []byte) error {
|
||||
return nil
|
||||
},
|
||||
CreateBucketFunc: func(context.Context, *s3.CreateBucketInput, []byte) error {
|
||||
return nil
|
||||
},
|
||||
PutBucketTaggingFunc: func(contextMoqParam context.Context, bucket string, tags map[string]string) error {
|
||||
CreateBucketFunc: func(context.Context, *s3.CreateBucketInput) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -581,24 +541,6 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Put-bucket-tagging-invalid-body",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket?tagging", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Put-bucket-tagging-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket?tagging", strings.NewReader(tagBody)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Put-bucket-acl-invalid-acl",
|
||||
app: app,
|
||||
@@ -925,10 +867,10 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
DeleteBucketFunc: func(context.Context, *s3.DeleteBucketInput) error {
|
||||
return nil
|
||||
GetBucketAclFunc: func(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
DeleteBucketTaggingFunc: func(contextMoqParam context.Context, bucket string) error {
|
||||
DeleteBucketFunc: func(context.Context, *s3.DeleteBucketInput) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -960,15 +902,6 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 204,
|
||||
},
|
||||
{
|
||||
name: "Delete-bucket-tagging-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodDelete, "/my-bucket?tagging", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 204,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
@@ -1265,7 +1198,6 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
contentType := "application/xml"
|
||||
eTag := "Valid etag"
|
||||
lastModifie := time.Now()
|
||||
contentLength := int64(64)
|
||||
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
@@ -1275,7 +1207,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
HeadObjectFunc: func(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentLength: &contentLength,
|
||||
ContentLength: 64,
|
||||
ContentType: &contentType,
|
||||
LastModified: &lastModifie,
|
||||
ETag: &eTag,
|
||||
@@ -1374,8 +1306,8 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
CreateMultipartUploadFunc: func(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
return &s3.CreateMultipartUploadOutput{}, nil
|
||||
},
|
||||
SelectObjectContentFunc: func(context.Context, *s3.SelectObjectContentInput) func(w *bufio.Writer) {
|
||||
return func(w *bufio.Writer) {}
|
||||
SelectObjectContentFunc: func(contextMoqParam context.Context, selectObjectContentInput *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error) {
|
||||
return s3response.SelectObjectContentResult{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger) fiber.Handler {
|
||||
if ctx.Method() == http.MethodPatch {
|
||||
return ctx.Next()
|
||||
}
|
||||
if len(pathParts) == 2 && pathParts[1] != "" && ctx.Method() == http.MethodPut && !ctx.Request().URI().QueryArgs().Has("acl") && !ctx.Request().URI().QueryArgs().Has("tagging") {
|
||||
if len(pathParts) == 2 && pathParts[1] != "" && ctx.Method() == http.MethodPut && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
if err := auth.IsAdmin(acct, isRoot); err != nil {
|
||||
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
|
||||
}
|
||||
|
||||
@@ -18,11 +18,15 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
@@ -33,6 +37,7 @@ import (
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
YYYYMMDD = "20060102"
|
||||
)
|
||||
|
||||
type RootUserConfig struct {
|
||||
@@ -44,103 +49,141 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// If account is set in context locals, it means it was presigned url case
|
||||
_, ok := ctx.Locals("account").(auth.Account)
|
||||
if ok {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
ctx.Locals("region", region)
|
||||
ctx.Locals("startTime", time.Now())
|
||||
authorization := ctx.Get("Authorization")
|
||||
if authorization == "" {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty), logger)
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
authData, err := utils.ParseAuthorization(authorization)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
// Check the signature version
|
||||
authParts := strings.Split(authorization, ",")
|
||||
for i, el := range authParts {
|
||||
authParts[i] = strings.TrimSpace(el)
|
||||
}
|
||||
|
||||
if authData.Algorithm != "AWS4-HMAC-SHA256" {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported), logger)
|
||||
if len(authParts) != 3 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
if authData.Region != region {
|
||||
return sendResponse(ctx, s3err.APIError{
|
||||
startParts := strings.Split(authParts[0], " ")
|
||||
|
||||
if startParts[0] != "AWS4-HMAC-SHA256" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
credKv := strings.Split(startParts[1], "=")
|
||||
if len(credKv) != 2 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
// Credential variables validation
|
||||
creds := strings.Split(credKv[1], "/")
|
||||
if len(creds) != 5 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if creds[4] != "aws4_request" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureTerminationStr), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if creds[3] != "s3" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureIncorrService), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if creds[2] != region {
|
||||
return controllers.SendResponse(ctx, s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", authData.Region),
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", creds[2]),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}, logger)
|
||||
}, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
ctx.Locals("isRoot", authData.Access == root.Access)
|
||||
ctx.Locals("isRoot", creds[0] == root.Access)
|
||||
|
||||
account, err := acct.getAccount(authData.Access)
|
||||
_, err := time.Parse(YYYYMMDD, creds[1])
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
signHdrKv := strings.Split(authParts[1], "=")
|
||||
if len(signHdrKv) != 2 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
signedHdrs := strings.Split(signHdrKv[1], ";")
|
||||
|
||||
account, err := acct.getAccount(creds[0])
|
||||
if err == auth.ErrNoSuchUser {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), logger)
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
ctx.Locals("account", account)
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader), logger)
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate), logger)
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
if date[:8] != authData.Date {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch), logger)
|
||||
if date[:8] != creds[1] {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
// Validate the dates difference
|
||||
err = utils.ValidateDate(tdate)
|
||||
err = validateDate(tdate)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
// for streaming PUT actions, authorization is deferred
|
||||
// until end of stream due to need to get length and
|
||||
// checksum of the stream to validate authorization
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
return utils.NewAuthReader(ctx, r, authData, account.Secret, debug)
|
||||
})
|
||||
return ctx.Next()
|
||||
}
|
||||
hashPayloadHeader := ctx.Get("X-Amz-Content-Sha256")
|
||||
ok := isSpecialPayload(hashPayloadHeader)
|
||||
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if !utils.IsSpecialPayload(hashPayload) {
|
||||
if !ok {
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if hashPayload != hexPayload {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch), logger)
|
||||
if hashPayloadHeader != hexPayload {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
}
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), logger)
|
||||
}
|
||||
}
|
||||
|
||||
err = utils.CheckValidSignature(ctx, authData, account.Secret, hashPayload, tdate, contentLength, debug)
|
||||
// Create a new http request instance from fasthttp request
|
||||
req, err := utils.CreateHttpRequestFromCtx(ctx, signedHdrs)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{
|
||||
AccessKeyID: creds[0],
|
||||
SecretAccessKey: account.Secret,
|
||||
}, req, hashPayloadHeader, creds[3], region, tdate, func(options *v4.SignerOptions) {
|
||||
options.DisableURIPathEscaping = true
|
||||
if debug {
|
||||
options.LogSigning = true
|
||||
options.Logger = logging.NewStandardLogger(os.Stderr)
|
||||
}
|
||||
})
|
||||
if signErr != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
parts := strings.Split(req.Header.Get("Authorization"), " ")
|
||||
if len(parts) < 4 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
calculatedSign := strings.Split(parts[3], "=")[1]
|
||||
expectedSign := strings.Split(authParts[2], "=")[1]
|
||||
|
||||
if expectedSign != calculatedSign {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
@@ -164,6 +207,39 @@ func (a accounts) getAccount(access string) (auth.Account, error) {
|
||||
return a.iam.GetUserAccount(access)
|
||||
}
|
||||
|
||||
func sendResponse(ctx *fiber.Ctx, err error, logger s3log.AuditLogger) error {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
func isSpecialPayload(str string) bool {
|
||||
specialValues := map[string]bool{
|
||||
"UNSIGNED-PAYLOAD": true,
|
||||
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
|
||||
}
|
||||
|
||||
return specialValues[str]
|
||||
}
|
||||
|
||||
func validateDate(date time.Time) error {
|
||||
now := time.Now().UTC()
|
||||
diff := date.Unix() - now.Unix()
|
||||
|
||||
// Checks the dates difference to be less than a minute
|
||||
if math.Abs(float64(diff)) > 60 {
|
||||
if diff > 0 {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature not yet current: %s is still later than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
} else {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature expired: %s is now earlier than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func wrapBodyReader(ctx *fiber.Ctx, wr func(io.Reader) io.Reader) {
|
||||
r, ok := ctx.Locals("body-reader").(io.Reader)
|
||||
if !ok {
|
||||
r = ctx.Request().BodyStream()
|
||||
}
|
||||
|
||||
r = wr(r)
|
||||
ctx.Locals("body-reader", r)
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
// ProcessChunkedBody initializes the chunked upload stream if the
|
||||
// request appears to be a chunked upload
|
||||
func ProcessChunkedBody(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, region string) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
decodedLength := ctx.Get("X-Amz-Decoded-Content-Length")
|
||||
if decodedLength == "" {
|
||||
return ctx.Next()
|
||||
}
|
||||
// TODO: validate content length
|
||||
|
||||
authData, err := utils.ParseAuthorization(ctx.Get("Authorization"))
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
}
|
||||
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
amzdate := ctx.Get("X-Amz-Date")
|
||||
date, _ := time.Parse(iso8601Format, amzdate)
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr *utils.ChunkReader
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, acct.Secret, date)
|
||||
return cr
|
||||
})
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
}
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -16,11 +16,10 @@ package middlewares
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"io"
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
@@ -32,20 +31,8 @@ func VerifyMD5Body(logger s3log.AuditLogger) fiber.Handler {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
r, err = utils.NewHashReader(r, incomingSum, utils.HashTypeMd5)
|
||||
return r
|
||||
})
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
sum := md5.Sum(ctx.Body())
|
||||
calculatedSum := utils.Md5SumString(sum[:])
|
||||
calculatedSum := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if incomingSum != calculatedSum {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest), &controllers.MetaOpts{Logger: logger})
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, region string, debug bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
if ctx.Query("X-Amz-Signature") == "" {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
ctx.Locals("region", region)
|
||||
ctx.Locals("startTime", time.Now())
|
||||
|
||||
authData, err := utils.ParsePresignedURIParts(ctx)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
}
|
||||
|
||||
ctx.Locals("isRoot", authData.Access == root.Access)
|
||||
account, err := acct.getAccount(authData.Access)
|
||||
if err == auth.ErrNoSuchUser {
|
||||
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), logger)
|
||||
}
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
}
|
||||
ctx.Locals("account", account)
|
||||
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
return utils.NewPresignedAuthReader(ctx, r, authData, account.Secret, debug)
|
||||
})
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
err = utils.CheckPresignedSignature(ctx, authData, account.Secret, debug)
|
||||
if err != nil {
|
||||
return sendResponse(ctx, err, logger)
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,6 @@ type S3ApiServer struct {
|
||||
router *S3ApiRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
quiet bool
|
||||
debug bool
|
||||
}
|
||||
|
||||
@@ -49,16 +48,12 @@ func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, po
|
||||
}
|
||||
|
||||
// Logging middlewares
|
||||
if !server.quiet {
|
||||
app.Use(logger.New())
|
||||
}
|
||||
app.Use(logger.New())
|
||||
app.Use(middlewares.DecodeURL(l))
|
||||
app.Use(middlewares.RequestLogger(server.debug))
|
||||
|
||||
// Authentication middlewares
|
||||
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, region, server.debug))
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, l, region, server.debug))
|
||||
app.Use(middlewares.ProcessChunkedBody(root, iam, l, region))
|
||||
app.Use(middlewares.VerifyMD5Body(l))
|
||||
app.Use(middlewares.AclParser(be, l))
|
||||
|
||||
@@ -85,11 +80,6 @@ func WithDebug() Option {
|
||||
return func(s *S3ApiServer) { s.debug = true }
|
||||
}
|
||||
|
||||
// WithQuiet silences default logging output
|
||||
func WithQuiet() Option {
|
||||
return func(s *S3ApiServer) { s.quiet = true }
|
||||
}
|
||||
|
||||
func (sa *S3ApiServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
|
||||
@@ -1,278 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
yyyymmdd = "20060102"
|
||||
)
|
||||
|
||||
// AuthReader is an io.Reader that validates the request authorization
|
||||
// once the underlying reader returns io.EOF. This is needed for streaming
|
||||
// data requests where the data size and checksum are not known until
|
||||
// the data is completely read.
|
||||
type AuthReader struct {
|
||||
ctx *fiber.Ctx
|
||||
auth AuthData
|
||||
secret string
|
||||
size int
|
||||
r *HashReader
|
||||
debug bool
|
||||
}
|
||||
|
||||
// NewAuthReader initializes an io.Reader that will verify the request
|
||||
// v4 auth when the underlying reader returns io.EOF. This postpones the
|
||||
// authorization check until the reader is consumed. So it is important that
|
||||
// the consumer of this reader checks for the auth errors while reading.
|
||||
func NewAuthReader(ctx *fiber.Ctx, r io.Reader, auth AuthData, secret string, debug bool) *AuthReader {
|
||||
var hr *HashReader
|
||||
hashPayload := ctx.Get("X-Amz-Content-Sha256")
|
||||
if !IsSpecialPayload(hashPayload) {
|
||||
hr, _ = NewHashReader(r, "", HashTypeSha256)
|
||||
} else {
|
||||
hr, _ = NewHashReader(r, "", HashTypeNone)
|
||||
}
|
||||
|
||||
return &AuthReader{
|
||||
ctx: ctx,
|
||||
r: hr,
|
||||
auth: auth,
|
||||
secret: secret,
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
// Read allows *AuthReader to be used as an io.Reader
|
||||
func (ar *AuthReader) Read(p []byte) (int, error) {
|
||||
n, err := ar.r.Read(p)
|
||||
ar.size += n
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
verr := ar.validateSignature()
|
||||
if verr != nil {
|
||||
return n, verr
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ar *AuthReader) validateSignature() error {
|
||||
date := ar.ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMissingDateHeader)
|
||||
}
|
||||
|
||||
hashPayload := ar.ctx.Get("X-Amz-Content-Sha256")
|
||||
if !IsSpecialPayload(hashPayload) {
|
||||
hexPayload := ar.r.Sum()
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if hashPayload != hexPayload {
|
||||
return s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedDate)
|
||||
}
|
||||
|
||||
return CheckValidSignature(ar.ctx, ar.auth, ar.secret, hashPayload, tdate, int64(ar.size), ar.debug)
|
||||
}
|
||||
|
||||
const (
|
||||
service = "s3"
|
||||
)
|
||||
|
||||
// CheckValidSignature validates the ctx v4 auth signature
|
||||
func CheckValidSignature(ctx *fiber.Ctx, auth AuthData, secret, checksum string, tdate time.Time, contentLen int64, debug bool) error {
|
||||
signedHdrs := strings.Split(auth.SignedHeaders, ";")
|
||||
|
||||
// Create a new http request instance from fasthttp request
|
||||
req, err := createHttpRequestFromCtx(ctx, signedHdrs, contentLen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create http request from context: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(),
|
||||
aws.Credentials{
|
||||
AccessKeyID: auth.Access,
|
||||
SecretAccessKey: secret,
|
||||
},
|
||||
req, checksum, service, auth.Region, tdate,
|
||||
func(options *v4.SignerOptions) {
|
||||
options.DisableURIPathEscaping = true
|
||||
if debug {
|
||||
options.LogSigning = true
|
||||
options.Logger = logging.NewStandardLogger(os.Stderr)
|
||||
}
|
||||
})
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("sign generated http request: %w", err)
|
||||
}
|
||||
|
||||
genAuth, err := ParseAuthorization(req.Header.Get("Authorization"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if auth.Signature != genAuth.Signature {
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AuthData is the parsed authorization data from the header
|
||||
type AuthData struct {
|
||||
Algorithm string
|
||||
Access string
|
||||
Region string
|
||||
SignedHeaders string
|
||||
Signature string
|
||||
Date string
|
||||
}
|
||||
|
||||
// ParseAuthorization returns the parsed fields for the aws v4 auth header
|
||||
// example authorization string from aws docs:
|
||||
// Authorization: AWS4-HMAC-SHA256
|
||||
// Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,
|
||||
// SignedHeaders=host;range;x-amz-date,
|
||||
// Signature=fe5f80f77d5fa3beca038a248ff027d0445342fe2855ddc963176630326f1024
|
||||
func ParseAuthorization(authorization string) (AuthData, error) {
|
||||
a := AuthData{}
|
||||
|
||||
// authorization must start with:
|
||||
// Authorization: <ALGORITHM>
|
||||
// followed by key=value pairs separated by ","
|
||||
authParts := strings.SplitN(authorization, " ", 2)
|
||||
for i, el := range authParts {
|
||||
if strings.Contains(el, " ") {
|
||||
authParts[i] = removeSpace(el)
|
||||
}
|
||||
}
|
||||
|
||||
if len(authParts) < 2 {
|
||||
return a, s3err.GetAPIError(s3err.ErrMissingFields)
|
||||
}
|
||||
|
||||
algo := authParts[0]
|
||||
|
||||
kvData := authParts[1]
|
||||
kvPairs := strings.Split(kvData, ",")
|
||||
// we are expecting at least Credential, SignedHeaders, and Signature
|
||||
// key value pairs here
|
||||
if len(kvPairs) < 3 {
|
||||
return a, s3err.GetAPIError(s3err.ErrMissingFields)
|
||||
}
|
||||
|
||||
var access, region, signedHeaders, signature, date string
|
||||
|
||||
for _, kv := range kvPairs {
|
||||
keyValue := strings.Split(kv, "=")
|
||||
if len(keyValue) != 2 {
|
||||
switch {
|
||||
case strings.HasPrefix(kv, "Credential"):
|
||||
return a, s3err.GetAPIError(s3err.ErrCredMalformed)
|
||||
case strings.HasPrefix(kv, "SignedHeaders"):
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
return a, s3err.GetAPIError(s3err.ErrMissingFields)
|
||||
}
|
||||
key := strings.TrimSpace(keyValue[0])
|
||||
value := strings.TrimSpace(keyValue[1])
|
||||
|
||||
switch key {
|
||||
case "Credential":
|
||||
creds := strings.Split(value, "/")
|
||||
if len(creds) != 5 {
|
||||
return a, s3err.GetAPIError(s3err.ErrCredMalformed)
|
||||
}
|
||||
if creds[3] != "s3" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureIncorrService)
|
||||
}
|
||||
if creds[4] != "aws4_request" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureTerminationStr)
|
||||
}
|
||||
_, err := time.Parse(yyyymmdd, creds[1])
|
||||
if err != nil {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
}
|
||||
access = creds[0]
|
||||
date = creds[1]
|
||||
region = creds[2]
|
||||
case "SignedHeaders":
|
||||
signedHeaders = value
|
||||
case "Signature":
|
||||
signature = value
|
||||
}
|
||||
}
|
||||
|
||||
return AuthData{
|
||||
Algorithm: algo,
|
||||
Access: access,
|
||||
Region: region,
|
||||
SignedHeaders: signedHeaders,
|
||||
Signature: signature,
|
||||
Date: date,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func removeSpace(str string) string {
|
||||
var b strings.Builder
|
||||
b.Grow(len(str))
|
||||
for _, ch := range str {
|
||||
if !unicode.IsSpace(ch) {
|
||||
b.WriteRune(ch)
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
var (
|
||||
specialValues = map[string]bool{
|
||||
"UNSIGNED-PAYLOAD": true,
|
||||
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
|
||||
}
|
||||
)
|
||||
|
||||
// IsSpecialPayload checks for streaming/unsigned authorization types
|
||||
func IsSpecialPayload(str string) bool {
|
||||
return specialValues[str]
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp/fasthttputil"
|
||||
)
|
||||
|
||||
func TestAuthParse(t *testing.T) {
|
||||
vectors := []struct {
|
||||
name string // name of test string
|
||||
authstr string // Authorization string
|
||||
algo string
|
||||
sig string
|
||||
}{
|
||||
{
|
||||
name: "restic",
|
||||
authstr: "AWS4-HMAC-SHA256 Credential=user/20240116/us-east-1/s3/aws4_request,SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=d5199fc7f3aa35dd3d400427be2ae4c98bfad390785280cbb9eea015b51e12ac",
|
||||
algo: "AWS4-HMAC-SHA256",
|
||||
sig: "d5199fc7f3aa35dd3d400427be2ae4c98bfad390785280cbb9eea015b51e12ac",
|
||||
},
|
||||
{
|
||||
name: "aws eaxample",
|
||||
authstr: "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, SignedHeaders=host;range;x-amz-date, Signature=fe5f80f77d5fa3beca038a248ff027d0445342fe2855ddc963176630326f1024",
|
||||
algo: "AWS4-HMAC-SHA256",
|
||||
sig: "fe5f80f77d5fa3beca038a248ff027d0445342fe2855ddc963176630326f1024",
|
||||
},
|
||||
{
|
||||
name: "s3browser",
|
||||
authstr: "AWS4-HMAC-SHA256 Credential=access_key/20240206/us-east-1/s3/aws4_request,SignedHeaders=host;user-agent;x-amz-content-sha256;x-amz-date, Signature=37a35d96998d786113ad420c57c22c5433f6aca74f88f26566caa047fc3601c6",
|
||||
algo: "AWS4-HMAC-SHA256",
|
||||
sig: "37a35d96998d786113ad420c57c22c5433f6aca74f88f26566caa047fc3601c6",
|
||||
},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
t.Run(v.name, func(t *testing.T) {
|
||||
data, err := ParseAuthorization(v.authstr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if data.Algorithm != v.algo {
|
||||
t.Errorf("algo got %v, expected %v", data.Algorithm, v.algo)
|
||||
}
|
||||
if data.Signature != v.sig {
|
||||
t.Errorf("signature got %v, expected %v", data.Signature, v.sig)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// 2024/02/06 21:03:28 Request headers:
|
||||
// 2024/02/06 21:03:28 Host: 172.21.0.160:11000
|
||||
// 2024/02/06 21:03:28 User-Agent: S3 Browser/11.5.7 (https://s3browser.com)
|
||||
// 2024/02/06 21:03:28 Authorization: AWS4-HMAC-SHA256 Credential=access_key/20240206/us-east-1/s3/aws4_request,SignedHeaders=host;user-agent;x-amz-content-sha256;x-amz-date, Signature=37a35d96998d786113ad420c57c22c5433f6aca74f88f26566caa047fc3601c6
|
||||
// 2024/02/06 21:03:28 X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
|
||||
// 2024/02/06 21:03:28 X-Amz-Date: 20240206T210328Z
|
||||
func Test_Client_UserAgent(t *testing.T) {
|
||||
signedHdrs := []string{"host", "user-agent", "x-amz-content-sha256", "x-amz-date"}
|
||||
access := "access_key"
|
||||
secret := "secret_key"
|
||||
region := "us-east-1"
|
||||
host := "172.21.0.160:11000"
|
||||
agent := "S3 Browser/11.5.7 (https://s3browser.com)"
|
||||
expectedSig := "37a35d96998d786113ad420c57c22c5433f6aca74f88f26566caa047fc3601c6"
|
||||
dateStr := "20240206T210328Z"
|
||||
|
||||
app := fiber.New(fiber.Config{DisableStartupMessage: true})
|
||||
|
||||
tdate, err := time.Parse(iso8601Format, dateStr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
app.Get("/", func(c *fiber.Ctx) error {
|
||||
req, err := createHttpRequestFromCtx(c, signedHdrs, int64(c.Request().Header.ContentLength()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req.Host = host
|
||||
req.Header.Add("X-Amz-Content-Sha256", zeroLenSig)
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(),
|
||||
aws.Credentials{
|
||||
AccessKeyID: access,
|
||||
SecretAccessKey: secret,
|
||||
},
|
||||
req, zeroLenSig, service, region, tdate,
|
||||
func(options *v4.SignerOptions) {
|
||||
options.DisableURIPathEscaping = true
|
||||
})
|
||||
if signErr != nil {
|
||||
t.Fatalf("sign generated http request: %v", err)
|
||||
}
|
||||
|
||||
genAuth, err := ParseAuthorization(req.Header.Get("Authorization"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if genAuth.Signature != expectedSig {
|
||||
t.Errorf("SIG: %v\nexpected: %v\n", genAuth.Signature, expectedSig)
|
||||
}
|
||||
|
||||
return c.Send(c.Request().Header.UserAgent())
|
||||
})
|
||||
|
||||
ln := fasthttputil.NewInmemoryListener()
|
||||
go func() {
|
||||
err := app.Listener(ln)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
c := fiber.AcquireClient()
|
||||
c.UserAgent = agent
|
||||
a := c.Get("http://example.com")
|
||||
a.HostClient.Dial = func(_ string) (net.Conn, error) { return ln.Dial() }
|
||||
a.String()
|
||||
fiber.ReleaseClient(c)
|
||||
}
|
||||
@@ -1,269 +0,0 @@
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// chunked uploads described in:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
|
||||
const (
|
||||
chunkHdrStr = ";chunk-signature="
|
||||
chunkHdrDelim = "\r\n"
|
||||
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
awsV4Request = "aws4_request"
|
||||
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||
)
|
||||
|
||||
// ChunkReader reads from chunked upload request body, and returns
|
||||
// object data stream
|
||||
type ChunkReader struct {
|
||||
r io.Reader
|
||||
signingKey []byte
|
||||
prevSig string
|
||||
parsedSig string
|
||||
currentChunkSize int64
|
||||
chunkDataLeft int64
|
||||
trailerExpected int
|
||||
stash []byte
|
||||
chunkHash hash.Hash
|
||||
strToSignPrefix string
|
||||
skipcheck bool
|
||||
}
|
||||
|
||||
// NewChunkReader reads from request body io.Reader and parses out the
|
||||
// chunk metadata in stream. The headers are validated for proper signatures.
|
||||
// Reading from the chunk reader will read only the object data stream
|
||||
// without the chunk headers/trailers.
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (*ChunkReader, error) {
|
||||
return &ChunkReader{
|
||||
r: r,
|
||||
signingKey: getSigningKey(secret, region, date),
|
||||
// the authdata.Signature is validated in the auth-reader,
|
||||
// so we can use that here without any other checks
|
||||
prevSig: authdata.Signature,
|
||||
chunkHash: sha256.New(),
|
||||
strToSignPrefix: getStringToSignPrefix(date, region),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader for this type
|
||||
func (cr *ChunkReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.r.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if cr.chunkDataLeft < int64(n) {
|
||||
chunkSize := cr.chunkDataLeft
|
||||
if chunkSize > 0 {
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
}
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
n += int(chunkSize)
|
||||
return n, err
|
||||
}
|
||||
|
||||
cr.chunkDataLeft -= int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
|
||||
// This part is the same for all chunks,
|
||||
// only the previous signature and hash of current chunk changes
|
||||
func getStringToSignPrefix(date time.Time, region string) string {
|
||||
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
|
||||
date.Format("20060102"),
|
||||
region,
|
||||
awsS3Service,
|
||||
awsV4Request)
|
||||
|
||||
return fmt.Sprintf("%s\n%s\n%s",
|
||||
streamPayloadAlgo,
|
||||
date.Format("20060102T150405Z"),
|
||||
credentialScope)
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
|
||||
// signature For each chunk, you calculate the signature using the following
|
||||
// string to sign. For the first chunk, you use the seed-signature as the
|
||||
// previous signature.
|
||||
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s",
|
||||
prefix,
|
||||
prevSig,
|
||||
zeroLenSig,
|
||||
hex.EncodeToString(chunkHash))
|
||||
}
|
||||
|
||||
// The provided p should have all of the previous chunk data and trailer
|
||||
// consumed already. The positioning here is expected that p[0] starts the
|
||||
// new chunk size with the ";chunk-signature=" following. The only exception
|
||||
// is if we started consuming the trailer, but hit the end of the read buffer.
|
||||
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
|
||||
// finish consuming the final trailer bytes.
|
||||
// This parses the chunk metadata in situ without allocating an extra buffer.
|
||||
// It will just read and validate the chunk metadata and then move the
|
||||
// following chunk data to overwrite the metadata in the provided buffer.
|
||||
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
n := len(p)
|
||||
|
||||
if !cr.skipcheck && cr.parsedSig != "" {
|
||||
chunkhash := cr.chunkHash.Sum(nil)
|
||||
cr.chunkHash.Reset()
|
||||
|
||||
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
|
||||
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
|
||||
|
||||
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
}
|
||||
|
||||
if cr.trailerExpected != 0 {
|
||||
if len(p) < len(chunkHdrDelim) {
|
||||
// This is the special case where we need to consume the
|
||||
// trailer, but instead hit the end of the buffer. The
|
||||
// subsequent call will finish consuming the trailer.
|
||||
cr.chunkDataLeft = 0
|
||||
cr.trailerExpected -= len(p)
|
||||
cr.skipcheck = true
|
||||
return 0, nil
|
||||
}
|
||||
// move data up to remove trailer
|
||||
copy(p, p[cr.trailerExpected:])
|
||||
n -= cr.trailerExpected
|
||||
}
|
||||
|
||||
cr.skipcheck = false
|
||||
|
||||
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
|
||||
cr.currentChunkSize = chunkSize
|
||||
cr.parsedSig = sig
|
||||
if err == errskipHeader {
|
||||
cr.chunkDataLeft = 0
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if chunkSize == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
cr.trailerExpected = len(chunkHdrDelim)
|
||||
|
||||
// move data up to remove chunk header
|
||||
copy(p, p[bufOffset:n])
|
||||
n -= bufOffset
|
||||
|
||||
// if remaining buffer larger than chunk data,
|
||||
// parse next header in buffer
|
||||
if int64(n) > chunkSize {
|
||||
cr.chunkDataLeft = 0
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
return n + int(chunkSize), err
|
||||
} else {
|
||||
cr.chunkDataLeft = chunkSize - int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
||||
// Task 3: Calculate Signature
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
|
||||
func getSigningKey(secret, region string, date time.Time) []byte {
|
||||
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
|
||||
dateRegionKey := hmac256(dateKey, []byte(region))
|
||||
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
|
||||
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
|
||||
return signingKey
|
||||
}
|
||||
|
||||
func hmac256(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidChunkFormat = errors.New("invalid chunk header format")
|
||||
errskipHeader = errors.New("skip to next header")
|
||||
)
|
||||
|
||||
const (
|
||||
maxHeaderSize = 1024
|
||||
)
|
||||
|
||||
// Theis returns the chunk payload size, signature, data start offset, and
|
||||
// error if any. See the AWS documentation for the chunk header format. The
|
||||
// header[0] byte is expected to be the first byte of the chunk size here.
|
||||
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
|
||||
if cr.stash != nil {
|
||||
tmp := make([]byte, maxHeaderSize)
|
||||
copy(tmp, cr.stash)
|
||||
copy(tmp[len(cr.stash):], header)
|
||||
header = tmp
|
||||
cr.stash = nil
|
||||
}
|
||||
|
||||
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
|
||||
if semicolonIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
sigIndex := semicolonIndex + len(chunkHdrStr)
|
||||
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
|
||||
if sigEndIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
chunkSizeBytes := header[:semicolonIndex]
|
||||
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
|
||||
if err != nil {
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
|
||||
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
|
||||
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
|
||||
|
||||
return chunkSize, signature, dataStartOffset, nil
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// HashType identifies the checksum algorithm to be used
|
||||
type HashType string
|
||||
|
||||
const (
|
||||
// HashTypeMd5 generates MD5 checksum for the data stream
|
||||
HashTypeMd5 = "md5"
|
||||
// HashTypeSha256 generates SHA256 checksum for the data stream
|
||||
HashTypeSha256 = "sha256"
|
||||
// HashTypeNone is a no-op checksum for the data stream
|
||||
HashTypeNone = "none"
|
||||
)
|
||||
|
||||
// HashReader is an io.Reader that calculates the checksum
|
||||
// as the data is read
|
||||
type HashReader struct {
|
||||
hashType HashType
|
||||
hash hash.Hash
|
||||
r io.Reader
|
||||
sum string
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidHashType = errors.New("unsupported or invalid checksum type")
|
||||
)
|
||||
|
||||
// NewHashReader intializes an io.Reader from an underlying io.Reader that
|
||||
// calculates the checksum while the reader is being read from. If the
|
||||
// sum provided is not "", the reader will return an error when the underlying
|
||||
// reader returns io.EOF if the checksum does not match the provided expected
|
||||
// checksum. If the provided sum is "", then the Sum() method can still
|
||||
// be used to get the current checksum for the data read so far.
|
||||
func NewHashReader(r io.Reader, expectedSum string, ht HashType) (*HashReader, error) {
|
||||
var hash hash.Hash
|
||||
switch ht {
|
||||
case HashTypeMd5:
|
||||
hash = md5.New()
|
||||
case HashTypeSha256:
|
||||
hash = sha256.New()
|
||||
case HashTypeNone:
|
||||
hash = noop{}
|
||||
default:
|
||||
return nil, errInvalidHashType
|
||||
}
|
||||
|
||||
return &HashReader{
|
||||
hash: hash,
|
||||
r: r,
|
||||
sum: expectedSum,
|
||||
hashType: ht,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read allows *HashReader to be used as an io.Reader
|
||||
func (hr *HashReader) Read(p []byte) (int, error) {
|
||||
n, readerr := hr.r.Read(p)
|
||||
_, err := hr.hash.Write(p[:n])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if errors.Is(readerr, io.EOF) && hr.sum != "" {
|
||||
switch hr.hashType {
|
||||
case HashTypeMd5:
|
||||
sum := base64.StdEncoding.EncodeToString(hr.hash.Sum(nil))
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetAPIError(s3err.ErrInvalidDigest)
|
||||
}
|
||||
case HashTypeSha256:
|
||||
sum := hex.EncodeToString(hr.hash.Sum(nil))
|
||||
if sum != hr.sum {
|
||||
return n, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
|
||||
}
|
||||
default:
|
||||
return n, errInvalidHashType
|
||||
}
|
||||
}
|
||||
return n, readerr
|
||||
}
|
||||
|
||||
// Sum returns the checksum hash of the data read so far
|
||||
func (hr *HashReader) Sum() string {
|
||||
switch hr.hashType {
|
||||
case HashTypeMd5:
|
||||
return Md5SumString(hr.hash.Sum(nil))
|
||||
case HashTypeSha256:
|
||||
return hex.EncodeToString(hr.hash.Sum(nil))
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Md5SumString converts the hash bytes to the string checksum value
|
||||
func Md5SumString(b []byte) string {
|
||||
return base64.StdEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
type noop struct{}
|
||||
|
||||
func (n noop) Write(p []byte) (int, error) { return 0, nil }
|
||||
func (n noop) Sum(b []byte) []byte { return []byte{} }
|
||||
func (n noop) Reset() {}
|
||||
func (n noop) Size() int { return 0 }
|
||||
func (n noop) BlockSize() int { return 1 }
|
||||
@@ -1,243 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
unsignedPayload string = "UNSIGNED-PAYLOAD"
|
||||
)
|
||||
|
||||
// PresignedAuthReader is an io.Reader that validates presigned request authorization
|
||||
// once the underlying reader returns io.EOF. This is needed for streaming
|
||||
// data requests where the data size is not known until
|
||||
// the data is completely read.
|
||||
type PresignedAuthReader struct {
|
||||
ctx *fiber.Ctx
|
||||
auth AuthData
|
||||
secret string
|
||||
r io.Reader
|
||||
debug bool
|
||||
}
|
||||
|
||||
func NewPresignedAuthReader(ctx *fiber.Ctx, r io.Reader, auth AuthData, secret string, debug bool) *PresignedAuthReader {
|
||||
return &PresignedAuthReader{
|
||||
ctx: ctx,
|
||||
r: r,
|
||||
auth: auth,
|
||||
secret: secret,
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
// Read allows *PresignedAuthReader to be used as an io.Reader
|
||||
func (pr *PresignedAuthReader) Read(p []byte) (int, error) {
|
||||
n, err := pr.r.Read(p)
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
cerr := CheckPresignedSignature(pr.ctx, pr.auth, pr.secret, pr.debug)
|
||||
if cerr != nil {
|
||||
return n, cerr
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// CheckPresignedSignature validates presigned request signature
|
||||
func CheckPresignedSignature(ctx *fiber.Ctx, auth AuthData, secret string, debug bool) error {
|
||||
signedHdrs := strings.Split(auth.SignedHeaders, ";")
|
||||
|
||||
var contentLength int64
|
||||
var err error
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new http request instance from fasthttp request
|
||||
req, err := createPresignedHttpRequestFromCtx(ctx, signedHdrs, contentLength)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create http request from context: %w", err)
|
||||
}
|
||||
|
||||
date, _ := time.Parse(iso8601Format, auth.Date)
|
||||
|
||||
signer := v4.NewSigner()
|
||||
uri, _, signErr := signer.PresignHTTP(ctx.Context(), aws.Credentials{
|
||||
AccessKeyID: auth.Access,
|
||||
SecretAccessKey: secret,
|
||||
}, req, unsignedPayload, service, auth.Region, date, func(options *v4.SignerOptions) {
|
||||
options.DisableURIPathEscaping = true
|
||||
if debug {
|
||||
options.LogSigning = true
|
||||
options.Logger = logging.NewStandardLogger(os.Stderr)
|
||||
}
|
||||
})
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("presign generated http request: %w", err)
|
||||
}
|
||||
|
||||
urlParts, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse presigned url: %w", err)
|
||||
}
|
||||
|
||||
signature := urlParts.Query().Get("X-Amz-Signature")
|
||||
if signature != auth.Signature {
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
|
||||
//
|
||||
// # ParsePresignedURIParts parses and validates request URL query parameters
|
||||
//
|
||||
// ?X-Amz-Algorithm=AWS4-HMAC-SHA256
|
||||
// &X-Amz-Credential=access-key-id/20130721/us-east-1/s3/aws4_request
|
||||
// &X-Amz-Date=20130721T201207Z
|
||||
// &X-Amz-Expires=86400
|
||||
// &X-Amz-SignedHeaders=host
|
||||
// &X-Amz-Signature=1e68ad45c1db540284a4a1eca3884c293ba1a0ff63ab9db9a15b5b29dfa02cd8
|
||||
func ParsePresignedURIParts(ctx *fiber.Ctx) (AuthData, error) {
|
||||
a := AuthData{}
|
||||
|
||||
// Get and verify algorithm query parameter
|
||||
algo := ctx.Query("X-Amz-Algorithm")
|
||||
if algo == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
if algo != "AWS4-HMAC-SHA256" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQuerySignatureAlgo)
|
||||
}
|
||||
|
||||
// Parse and validate credentials query parameter
|
||||
credsQuery := ctx.Query("X-Amz-Credential")
|
||||
if credsQuery == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
|
||||
creds := strings.Split(credsQuery, "/")
|
||||
if len(creds) != 5 {
|
||||
return a, s3err.GetAPIError(s3err.ErrCredMalformed)
|
||||
}
|
||||
if creds[3] != "s3" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureIncorrService)
|
||||
}
|
||||
if creds[4] != "aws4_request" {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureTerminationStr)
|
||||
}
|
||||
_, err := time.Parse(yyyymmdd, creds[1])
|
||||
if err != nil {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
}
|
||||
|
||||
// Parse and validate Date query param
|
||||
date := ctx.Query("X-Amz-Date")
|
||||
if date == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return a, s3err.GetAPIError(s3err.ErrMalformedDate)
|
||||
}
|
||||
|
||||
if date[:8] != creds[1] {
|
||||
return a, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
}
|
||||
|
||||
if ctx.Locals("region") != creds[2] {
|
||||
return a, s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", creds[2]),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
signature := ctx.Query("X-Amz-Signature")
|
||||
if signature == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
|
||||
signedHdrs := ctx.Query("X-Amz-SignedHeaders")
|
||||
if signedHdrs == "" {
|
||||
return a, s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
|
||||
// Validate X-Amz-Expires query param and check if request is expired
|
||||
err = validateExpiration(ctx.Query("X-Amz-Expires"), tdate)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
|
||||
a.Signature = signature
|
||||
a.Access = creds[0]
|
||||
a.Algorithm = algo
|
||||
a.Region = creds[2]
|
||||
a.SignedHeaders = signedHdrs
|
||||
a.Date = date
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func validateExpiration(str string, date time.Time) error {
|
||||
if str == "" {
|
||||
return s3err.GetAPIError(s3err.ErrInvalidQueryParams)
|
||||
}
|
||||
|
||||
exp, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedExpires)
|
||||
}
|
||||
|
||||
if exp < 0 {
|
||||
return s3err.GetAPIError(s3err.ErrNegativeExpires)
|
||||
}
|
||||
|
||||
if exp > 604800 {
|
||||
return s3err.GetAPIError(s3err.ErrMaximumExpires)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
passed := int(now.Sub(date).Seconds())
|
||||
|
||||
if passed > exp {
|
||||
return s3err.GetAPIError(s3err.ErrExpiredPresignRequest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func Test_validateExpiration(t *testing.T) {
|
||||
type args struct {
|
||||
str string
|
||||
date time.Time
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty-expiration",
|
||||
args: args{
|
||||
str: "",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrInvalidQueryParams),
|
||||
},
|
||||
{
|
||||
name: "invalid-expiration",
|
||||
args: args{
|
||||
str: "invalid_expiration",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrMalformedExpires),
|
||||
},
|
||||
{
|
||||
name: "negative-expiration",
|
||||
args: args{
|
||||
str: "-320",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrNegativeExpires),
|
||||
},
|
||||
{
|
||||
name: "exceeding-expiration",
|
||||
args: args{
|
||||
str: "6048000",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrMaximumExpires),
|
||||
},
|
||||
{
|
||||
name: "expired value",
|
||||
args: args{
|
||||
str: "200",
|
||||
date: time.Now().AddDate(0, 0, -1),
|
||||
},
|
||||
err: s3err.GetAPIError(s3err.ErrExpiredPresignRequest),
|
||||
},
|
||||
{
|
||||
name: "valid expiration",
|
||||
args: args{
|
||||
str: "300",
|
||||
date: time.Now(),
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateExpiration(tt.args.str, tt.args.date)
|
||||
// Check for nil case
|
||||
if tt.err == nil && err != nil {
|
||||
t.Errorf("Expected nil error, got: %v", err)
|
||||
return
|
||||
} else if tt.err == nil && err == nil {
|
||||
// Both are nil, no need for further comparison
|
||||
return
|
||||
}
|
||||
|
||||
if err.Error() != tt.err.Error() {
|
||||
t.Errorf("Expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// This is a hack to replace the default IgnoredHeaders in the aws-sdk-go-v2
|
||||
// internal/v4 package. Some AWS applications
|
||||
// (e.g. AWS Java SDK v1, Athena JDBC driver, s3 browser) sign the requests
|
||||
// including the User-Agent header. The aws sdk doesn't allow directly
|
||||
// modifying the ignored header list. Below is a hack to replace this list
|
||||
// with our own.
|
||||
|
||||
type Rule interface {
|
||||
IsValid(value string) bool
|
||||
}
|
||||
type Rules []Rule
|
||||
|
||||
//go:linkname __ignoredHeaders github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4.IgnoredHeaders
|
||||
var __ignoredHeaders unsafe.Pointer
|
||||
|
||||
func init() {
|
||||
// Avoids "go.info.github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4.IgnoredHeaders:
|
||||
// relocation target go.info.github.com/xxx/xxx/xxx.Rules not defined"
|
||||
var ignoredHeaders = (*Rules)(unsafe.Pointer(&__ignoredHeaders))
|
||||
|
||||
// clear the map, and set just the ignored headers we want
|
||||
reflect.ValueOf((*ignoredHeaders)[0]).FieldByName("Rule").Elem().Clear()
|
||||
reflect.ValueOf((*ignoredHeaders)[0]).FieldByName("Rule").Elem().SetMapIndex(
|
||||
reflect.ValueOf("Authorization"), reflect.ValueOf(struct{}{}))
|
||||
reflect.ValueOf((*ignoredHeaders)[0]).FieldByName("Rule").Elem().SetMapIndex(
|
||||
reflect.ValueOf("Expect"), reflect.ValueOf(struct{}{}))
|
||||
}
|
||||
@@ -18,12 +18,10 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
@@ -51,16 +49,10 @@ func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]strin
|
||||
return
|
||||
}
|
||||
|
||||
func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength int64) (*http.Request, error) {
|
||||
func CreateHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string) (*http.Request, error) {
|
||||
req := ctx.Request()
|
||||
var body io.Reader
|
||||
if IsBigDataAction(ctx) {
|
||||
body = req.BodyStream()
|
||||
} else {
|
||||
body = bytes.NewReader(req.Body())
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), string(ctx.Context().RequestURI()), body)
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), string(ctx.Context().RequestURI()), bytes.NewReader(req.Body()))
|
||||
if err != nil {
|
||||
return nil, errors.New("error in creating an http request")
|
||||
}
|
||||
@@ -77,68 +69,6 @@ func createHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength
|
||||
// If content length is non 0, then the header will be included
|
||||
if !includeHeader("Content-Length", signedHdrs) {
|
||||
httpReq.ContentLength = 0
|
||||
} else {
|
||||
httpReq.ContentLength = contentLength
|
||||
}
|
||||
|
||||
// Set the Host header
|
||||
httpReq.Host = string(req.Header.Host())
|
||||
|
||||
return httpReq, nil
|
||||
}
|
||||
|
||||
var (
|
||||
signedQueryArgs = map[string]bool{
|
||||
"X-Amz-Algorithm": true,
|
||||
"X-Amz-Credential": true,
|
||||
"X-Amz-Date": true,
|
||||
"X-Amz-SignedHeaders": true,
|
||||
"X-Amz-Signature": true,
|
||||
}
|
||||
)
|
||||
|
||||
func createPresignedHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string, contentLength int64) (*http.Request, error) {
|
||||
req := ctx.Request()
|
||||
var body io.Reader
|
||||
if IsBigDataAction(ctx) {
|
||||
body = req.BodyStream()
|
||||
} else {
|
||||
body = bytes.NewReader(req.Body())
|
||||
}
|
||||
|
||||
uri := string(ctx.Request().URI().Path())
|
||||
isFirst := true
|
||||
|
||||
ctx.Request().URI().QueryArgs().VisitAll(func(key, value []byte) {
|
||||
_, ok := signedQueryArgs[string(key)]
|
||||
if !ok {
|
||||
if isFirst {
|
||||
uri += fmt.Sprintf("?%s=%s", key, value)
|
||||
isFirst = false
|
||||
} else {
|
||||
uri += fmt.Sprintf("&%s=%s", key, value)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), uri, body)
|
||||
if err != nil {
|
||||
return nil, errors.New("error in creating an http request")
|
||||
}
|
||||
// Set the request headers
|
||||
req.Header.VisitAll(func(key, value []byte) {
|
||||
keyStr := string(key)
|
||||
if includeHeader(keyStr, signedHdrs) {
|
||||
httpReq.Header.Add(keyStr, string(value))
|
||||
}
|
||||
})
|
||||
|
||||
// Check if Content-Length in signed headers
|
||||
// If content length is non 0, then the header will be included
|
||||
if !includeHeader("Content-Length", signedHdrs) {
|
||||
httpReq.ContentLength = 0
|
||||
} else {
|
||||
httpReq.ContentLength = contentLength
|
||||
}
|
||||
|
||||
// Set the Host header
|
||||
@@ -201,35 +131,3 @@ func includeHeader(hdr string, signedHdrs []string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsBigDataAction(ctx *fiber.Ctx) bool {
|
||||
if ctx.Method() == http.MethodPut && len(strings.Split(ctx.Path(), "/")) >= 3 {
|
||||
if !ctx.Request().URI().QueryArgs().Has("tagging") && ctx.Get("X-Amz-Copy-Source") == "" && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ValidateDate(date time.Time) error {
|
||||
now := time.Now().UTC()
|
||||
diff := date.Unix() - now.Unix()
|
||||
|
||||
// Checks the dates difference to be less than a minute
|
||||
if diff > 60 {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature not yet current: %s is still later than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
if diff < -60 {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature expired: %s is now earlier than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestCreateHttpRequestFromCtx(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := createHttpRequestFromCtx(tt.args.ctx, []string{"X-Amz-Mfa"}, 0)
|
||||
got, err := CreateHttpRequestFromCtx(tt.args.ctx, []string{"X-Amz-Mfa"})
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("CreateHttpRequestFromCtx() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
6
s3response/README.txt
Normal file
6
s3response/README.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
https://doc.s3.amazonaws.com/2006-03-01/AmazonS3.xsd
|
||||
|
||||
see https://blog.aqwari.net/xml-schema-go/
|
||||
|
||||
go install aqwari.net/xml/cmd/xsdgen@latest
|
||||
xsdgen -o s3api_xsd_generated.go -pkg s3response AmazonS3.xsd
|
||||
1007
s3response/s3api_xsd_generated.go
Normal file
1007
s3response/s3api_xsd_generated.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,6 @@ package s3response
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
@@ -74,7 +73,7 @@ type ListMultipartUploadsResult struct {
|
||||
CommonPrefixes []CommonPrefix
|
||||
}
|
||||
|
||||
// Upload describes in progress multipart upload
|
||||
// Upload desribes in progress multipart upload
|
||||
type Upload struct {
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
@@ -108,8 +107,7 @@ type TagSet struct {
|
||||
}
|
||||
|
||||
type Tagging struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging" json:"-"`
|
||||
TagSet TagSet `xml:"TagSet"`
|
||||
TagSet TagSet `xml:"TagSet"`
|
||||
}
|
||||
|
||||
type DeleteObjects struct {
|
||||
@@ -141,58 +139,3 @@ type Bucket struct {
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner"`
|
||||
}
|
||||
|
||||
type ListAllMyBucketsResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"`
|
||||
Owner CanonicalUser
|
||||
Buckets ListAllMyBucketsList
|
||||
}
|
||||
|
||||
type ListAllMyBucketsEntry struct {
|
||||
Name string
|
||||
CreationDate time.Time
|
||||
}
|
||||
|
||||
type ListAllMyBucketsList struct {
|
||||
Bucket []ListAllMyBucketsEntry
|
||||
}
|
||||
|
||||
type CanonicalUser struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
type CopyObjectResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
}
|
||||
|
||||
type AccessControlPolicy struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy" json:"-"`
|
||||
Owner CanonicalUser
|
||||
AccessControlList AccessControlList
|
||||
}
|
||||
|
||||
type AccessControlList struct {
|
||||
Grant []Grant
|
||||
}
|
||||
|
||||
type Grant struct {
|
||||
Grantee Grantee
|
||||
Permission string
|
||||
}
|
||||
|
||||
// Set the following to encode correctly:
|
||||
//
|
||||
// Grantee: s3response.Grantee{
|
||||
// Xsi: "http://www.w3.org/2001/XMLSchema-instance",
|
||||
// Type: "CanonicalUser",
|
||||
// },
|
||||
type Grantee struct {
|
||||
XMLName xml.Name `xml:"Grantee"`
|
||||
Xsi string `xml:"xmlns:xsi,attr,omitempty"`
|
||||
Type string `xml:"xsi:type,attr,omitempty"`
|
||||
ID string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
@@ -1,358 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3select
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Protocol definition for messages can be found here:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html
|
||||
|
||||
var (
|
||||
// From ptotocol def:
|
||||
// Enum indicating the header value type.
|
||||
// For Amazon S3 Select, this is always 7.
|
||||
headerValueType = byte(7)
|
||||
)
|
||||
|
||||
func intToTwoBytes(i int) []byte {
|
||||
return []byte{byte(i >> 8), byte(i)}
|
||||
}
|
||||
|
||||
func generateHeader(messages ...string) []byte {
|
||||
var header []byte
|
||||
|
||||
for i, message := range messages {
|
||||
if i%2 == 1 {
|
||||
header = append(header, headerValueType)
|
||||
header = append(header, intToTwoBytes(len(message))...)
|
||||
} else {
|
||||
header = append(header, byte(len(message)))
|
||||
}
|
||||
header = append(header, message...)
|
||||
}
|
||||
|
||||
return header
|
||||
}
|
||||
|
||||
func generateOctetHeader(message string) []byte {
|
||||
return generateHeader(
|
||||
":message-type",
|
||||
"event",
|
||||
":content-type",
|
||||
"application/octet-stream",
|
||||
":event-type",
|
||||
message)
|
||||
}
|
||||
|
||||
func generateTextHeader(message string) []byte {
|
||||
return generateHeader(
|
||||
":message-type",
|
||||
"event",
|
||||
":content-type",
|
||||
"text/xml",
|
||||
":event-type",
|
||||
message)
|
||||
}
|
||||
|
||||
func generateNoContentHeader(message string) []byte {
|
||||
return generateHeader(
|
||||
":message-type",
|
||||
"event",
|
||||
":event-type",
|
||||
message)
|
||||
}
|
||||
|
||||
const (
|
||||
// 4 bytes total byte len +
|
||||
// 4 bytes headers bytes len +
|
||||
// 4 bytes prelude CRC
|
||||
preludeLen = 12
|
||||
// CRC is uint32
|
||||
msgCrcLen = 4
|
||||
)
|
||||
|
||||
var (
|
||||
recordsHeader = generateOctetHeader("Records")
|
||||
continuationHeader = generateNoContentHeader("Cont")
|
||||
continuationMessage = genMessage(continuationHeader, []byte{})
|
||||
progressHeader = generateTextHeader("Progress")
|
||||
statsHeader = generateTextHeader("Stats")
|
||||
endHeader = generateNoContentHeader("End")
|
||||
endMessage = genMessage(endHeader, []byte{})
|
||||
)
|
||||
|
||||
func uintToBytes(n uint32) []byte {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(b, n)
|
||||
return b
|
||||
}
|
||||
|
||||
func generatePrelude(msgLen int, headerLen int) []byte {
|
||||
prelude := make([]byte, 0, preludeLen)
|
||||
|
||||
// 4 bytes total byte len
|
||||
prelude = append(prelude, uintToBytes(uint32(msgLen+headerLen+preludeLen+msgCrcLen))...)
|
||||
// 4 bytes headers bytes len
|
||||
prelude = append(prelude, uintToBytes(uint32(headerLen))...)
|
||||
// 4 bytes prelude CRC
|
||||
prelude = append(prelude, uintToBytes(crc32.ChecksumIEEE(prelude))...)
|
||||
|
||||
return prelude
|
||||
}
|
||||
|
||||
const (
|
||||
maxHeaderSize = 1024 * 1024
|
||||
maxMessageSize = 5 * 1024 * 1024 * 1024
|
||||
)
|
||||
|
||||
func genMessage(header, payload []byte) []byte {
|
||||
var msg []byte
|
||||
// below is always true since the size is validated
|
||||
// in the send record
|
||||
if len(header) <= maxHeaderSize && len(payload) <= maxMessageSize {
|
||||
msglen := preludeLen + len(header) + len(payload) + msgCrcLen
|
||||
msg = make([]byte, 0, msglen)
|
||||
}
|
||||
|
||||
msg = append(msg, generatePrelude(len(payload), len(header))...)
|
||||
msg = append(msg, header...)
|
||||
msg = append(msg, payload...)
|
||||
msg = append(msg, uintToBytes(crc32.ChecksumIEEE(msg))...)
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
func genRecordsMessage(payload []byte) []byte {
|
||||
return genMessage(recordsHeader, payload)
|
||||
}
|
||||
|
||||
type progress struct {
|
||||
XMLName xml.Name `xml:"Progress"`
|
||||
BytesScanned int64 `xml:"BytesScanned"`
|
||||
BytesProcessed int64 `xml:"BytesProcessed"`
|
||||
BytesReturned int64 `xml:"BytesReturned"`
|
||||
}
|
||||
|
||||
func genProgressMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
|
||||
progress := progress{
|
||||
BytesScanned: bytesScanned,
|
||||
BytesProcessed: bytesProcessed,
|
||||
BytesReturned: bytesReturned,
|
||||
}
|
||||
|
||||
xmlData, _ := xml.MarshalIndent(progress, "", " ")
|
||||
payload := []byte(xml.Header + string(xmlData))
|
||||
return genMessage(progressHeader, payload)
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
XMLName xml.Name `xml:"Stats"`
|
||||
BytesScanned int64 `xml:"BytesScanned"`
|
||||
BytesProcessed int64 `xml:"BytesProcessed"`
|
||||
BytesReturned int64 `xml:"BytesReturned"`
|
||||
}
|
||||
|
||||
func genStatsMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
|
||||
stats := stats{
|
||||
BytesScanned: bytesScanned,
|
||||
BytesProcessed: bytesProcessed,
|
||||
BytesReturned: bytesReturned,
|
||||
}
|
||||
|
||||
xmlData, _ := xml.MarshalIndent(stats, "", " ")
|
||||
payload := []byte(xml.Header + string(xmlData))
|
||||
return genMessage(statsHeader, payload)
|
||||
}
|
||||
|
||||
func genErrorMessage(errorCode, errorMessage string) []byte {
|
||||
return genMessage(generateHeader(
|
||||
":error-code",
|
||||
errorCode,
|
||||
":error-message",
|
||||
errorMessage,
|
||||
":message-type",
|
||||
"error",
|
||||
), []byte{})
|
||||
}
|
||||
|
||||
// GetProgress is a callback function that periodically retrieves the current
|
||||
// values for the following if not nil. This is used to send Progress
|
||||
// messages back to client.
|
||||
// BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
|
||||
// BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
|
||||
type GetProgress func() (bytesScanned int64, bytesProcessed int64)
|
||||
|
||||
type MessageHandler struct {
|
||||
sync.Mutex
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
writer *bufio.Writer
|
||||
data chan []byte
|
||||
getProgress GetProgress
|
||||
stopCh chan bool
|
||||
resetCh chan bool
|
||||
bytesReturned int64
|
||||
}
|
||||
|
||||
// NewMessageHandler creates a new MessageHandler instance and starts the event streaming
|
||||
func NewMessageHandler(ctx context.Context, w *bufio.Writer, getProgressFunc GetProgress) *MessageHandler {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
mh := &MessageHandler{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
writer: w,
|
||||
data: make(chan []byte),
|
||||
getProgress: getProgressFunc,
|
||||
resetCh: make(chan bool),
|
||||
stopCh: make(chan bool),
|
||||
}
|
||||
|
||||
go mh.sendBackgroundMessages(mh.resetCh, mh.stopCh)
|
||||
return mh
|
||||
}
|
||||
|
||||
func (mh *MessageHandler) write(data []byte) error {
|
||||
mh.Lock()
|
||||
defer mh.Unlock()
|
||||
|
||||
mh.stopCh <- true
|
||||
defer func() { mh.resetCh <- true }()
|
||||
|
||||
_, err := mh.writer.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mh.writer.Flush()
|
||||
}
|
||||
|
||||
const (
|
||||
continuationInterval = time.Second
|
||||
progressInterval = time.Minute
|
||||
)
|
||||
|
||||
func (mh *MessageHandler) sendBackgroundMessages(resetCh, stopCh <-chan bool) {
|
||||
continuationTicker := time.NewTicker(continuationInterval)
|
||||
defer continuationTicker.Stop()
|
||||
|
||||
var progressTicker *time.Ticker
|
||||
var progressTickerChan <-chan time.Time
|
||||
if mh.getProgress != nil {
|
||||
progressTicker = time.NewTicker(progressInterval)
|
||||
progressTickerChan = progressTicker.C
|
||||
defer progressTicker.Stop()
|
||||
}
|
||||
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-mh.ctx.Done():
|
||||
break Loop
|
||||
|
||||
case <-continuationTicker.C:
|
||||
err := mh.write(continuationMessage)
|
||||
if err != nil {
|
||||
mh.cancel()
|
||||
break Loop
|
||||
}
|
||||
|
||||
case <-resetCh:
|
||||
continuationTicker.Reset(continuationInterval)
|
||||
|
||||
case <-stopCh:
|
||||
continuationTicker.Stop()
|
||||
|
||||
case <-progressTickerChan:
|
||||
var bytesScanned, bytesProcessed int64
|
||||
if mh.getProgress != nil {
|
||||
bytesScanned, bytesProcessed = mh.getProgress()
|
||||
}
|
||||
bytesReturned := atomic.LoadInt64(&mh.bytesReturned)
|
||||
err := mh.write(genProgressMessage(bytesScanned, bytesProcessed, bytesReturned))
|
||||
if err != nil {
|
||||
mh.cancel()
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SendRecord sends a single Records message
|
||||
func (mh *MessageHandler) SendRecord(payload []byte) error {
|
||||
if mh.ctx.Err() != nil {
|
||||
return mh.ctx.Err()
|
||||
}
|
||||
|
||||
if len(payload) > maxMessageSize {
|
||||
return fmt.Errorf("record max size exceeded")
|
||||
}
|
||||
|
||||
err := mh.write(genRecordsMessage(payload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&mh.bytesReturned, int64(len(payload)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finish terminates message stream with Stats and End message
|
||||
// generates stats and end message using function args based on:
|
||||
// BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
|
||||
// BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
|
||||
func (mh *MessageHandler) Finish(bytesScanned, bytesProcessed int64) error {
|
||||
if mh.ctx.Err() != nil {
|
||||
return mh.ctx.Err()
|
||||
}
|
||||
|
||||
bytesReturned := atomic.LoadInt64(&mh.bytesReturned)
|
||||
err := mh.write(genStatsMessage(bytesScanned, bytesProcessed, bytesReturned))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mh.write(endMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mh.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// FinishWithError terminates event stream with error
|
||||
func (mh *MessageHandler) FinishWithError(errorCode, errorMessage string) error {
|
||||
if mh.ctx.Err() != nil {
|
||||
return mh.ctx.Err()
|
||||
}
|
||||
err := mh.write(genErrorMessage(errorCode, errorMessage))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mh.cancel()
|
||||
return nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
AWS_REGION=us-east-1
|
||||
AWS_PROFILE=versity
|
||||
VERSITY_EXE=./versitygw
|
||||
BACKEND=posix
|
||||
LOCAL_FOLDER=/tmp/gw
|
||||
AWS_ENDPOINT_URL=http://127.0.0.1:7070
|
||||
BUCKET_ONE_NAME=versity-gwtest-bucket-one
|
||||
BUCKET_TWO_NAME=versity-gwtest-bucket-two
|
||||
RECREATE_BUCKETS=true
|
||||
@@ -1,9 +0,0 @@
|
||||
AWS_REGION=us-east-1
|
||||
AWS_PROFILE=versity
|
||||
VERSITY_EXE=./versitygw
|
||||
BACKEND=posix
|
||||
LOCAL_FOLDER=/tmp/gw
|
||||
AWS_ENDPOINT_URL=http://127.0.0.1:7070
|
||||
BUCKET_ONE_NAME=versity-gwtest-bucket-one
|
||||
BUCKET_TWO_NAME=versity-gwtest-bucket-two
|
||||
RECREATE_BUCKETS=true
|
||||
@@ -1,13 +0,0 @@
|
||||
# Command-Line Tests
|
||||
|
||||
Instructions:
|
||||
1. Build the `versitygw` binary.
|
||||
2. Create a local AWS profile for connection to S3, and add the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` values above to the profile.
|
||||
3. Create an environment file (`.env`) similar to the ones in this folder, setting the `AWS_PROFILE` parameter to the name of the profile you created.
|
||||
4. In the root repo folder, run with `VERSITYGW_TEST_ENV=<env file> tests/s3_bucket_tests.sh`.
|
||||
5. If running/testing the GitHub workflow locally, create a `.secrets` file, and set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` parameters here to the values of your AWS S3 IAM account.
|
||||
```
|
||||
AWS_ACCESS_KEY_ID=<key_id>
|
||||
AWS_SECRET_ACCESS_KEY=<secret_key>
|
||||
```
|
||||
6. To run the workflow locally, install **act** and run with `act -W .github/workflows/system.yml`.
|
||||
@@ -1,88 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_posix.sh
|
||||
|
||||
# test that changes to local folders and files are reflected on S3
|
||||
@test "test_local_creation_deletion" {
|
||||
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local object_name="test-object"
|
||||
|
||||
mkdir "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
local object="$BUCKET_ONE_NAME"/"$object_name"
|
||||
touch "$LOCAL_FOLDER"/"$object"
|
||||
|
||||
bucket_exists_remote_and_local "$BUCKET_ONE_NAME" || local bucket_exists_two=$?
|
||||
[[ $bucket_exists_two -eq 0 ]] || fail "Failed bucket existence check"
|
||||
object_exists_remote_and_local "$object" || local object_exists_two=$?
|
||||
[[ $object_exists_two -eq 0 ]] || fail "Failed object existence check"
|
||||
|
||||
rm "$LOCAL_FOLDER"/"$object"
|
||||
sleep 1
|
||||
object_not_exists_remote_and_local "$object" || local object_deleted=$?
|
||||
[[ $object_deleted -eq 0 ]] || fail "Failed object deletion check"
|
||||
|
||||
rmdir "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
bucket_not_exists_remote_and_local "$BUCKET_ONE_NAME" || local bucket_deleted=$?
|
||||
[[ $bucket_deleted -eq 0 ]] || fail "Failed bucket deletion check"
|
||||
}
|
||||
|
||||
# test head-object command
|
||||
@test "test_head_object" {
|
||||
|
||||
local bucket_name=$BUCKET_ONE_NAME
|
||||
local object_name="object-one"
|
||||
|
||||
create_test_files $object_name
|
||||
if [ -e "$LOCAL_FOLDER"/"$bucket_name"/$object_name ]; then
|
||||
chmod 755 "$LOCAL_FOLDER"/"$bucket_name"/$object_name
|
||||
fi
|
||||
setup_bucket "$bucket_name" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating bucket"
|
||||
put_object "$test_file_folder"/"$object_name" "$bucket_name"/"$object_name" || local result="$?"
|
||||
[[ result -eq 0 ]] || fail "Error adding object one"
|
||||
|
||||
chmod 000 "$LOCAL_FOLDER"/"$bucket_name"/$object_name
|
||||
sleep 1
|
||||
object_is_accessible "$bucket_name" $object_name || local accessible=$?
|
||||
[[ $accessible -eq 1 ]] || fail "Object should be inaccessible"
|
||||
|
||||
chmod 755 "$LOCAL_FOLDER"/"$bucket_name"/$object_name
|
||||
sleep 1
|
||||
object_is_accessible "$bucket_name" $object_name || local accessible_two=$?
|
||||
[[ $accessible_two -eq 0 ]] || fail "Object should be accessible"
|
||||
|
||||
delete_object "$bucket_name"/$object_name
|
||||
delete_bucket_or_contents "$bucket_name"
|
||||
delete_test_files $object_name
|
||||
}
|
||||
|
||||
# check info, accessiblity of bucket
|
||||
@test "test_get_bucket_info" {
|
||||
|
||||
if [ -e "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME" ]; then
|
||||
chmod 755 "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
else
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating bucket"
|
||||
fi
|
||||
|
||||
chmod 000 "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
bucket_is_accessible "$BUCKET_ONE_NAME" || local accessible=$?
|
||||
[[ $accessible -eq 1 ]] || fail "Bucket should be inaccessible"
|
||||
|
||||
chmod 755 "$LOCAL_FOLDER"/"$BUCKET_ONE_NAME"
|
||||
sleep 1
|
||||
bucket_is_accessible "$BUCKET_ONE_NAME" || local accessible_two=$?
|
||||
[[ $accessible_two -eq 0 ]] || fail "Bucket should be accessible"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
}
|
||||
@@ -1,403 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
|
||||
# test creation and deletion of bucket on versitygw
|
||||
@test "test_create_delete_bucket" {
|
||||
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Failed to create bucket"
|
||||
|
||||
bucket_exists "$BUCKET_ONE_NAME" || local exists_three=$?
|
||||
[[ $exists_three -eq 0 ]] || fail "Failed bucket existence check"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME" || local delete_result_two=$?
|
||||
[[ $delete_result_two -eq 0 ]] || fail "Failed to delete bucket"
|
||||
}
|
||||
|
||||
# test adding and removing an object on versitygw
|
||||
@test "test_put_object" {
|
||||
|
||||
local object_name="test-object"
|
||||
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
create_test_files "$object_name" || local create_result=$?
|
||||
|
||||
object="$BUCKET_ONE_NAME"/$object_name
|
||||
put_object "$test_file_folder"/"$object_name" "$object" || local put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
object_exists "$object" || local exists_result_one=$?
|
||||
[[ $exists_result_one -eq 0 ]] || fail "Object not added to bucket"
|
||||
|
||||
delete_object "$object" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "Failed to delete object"
|
||||
object_exists "$object" || local exists_result_two=$?
|
||||
[[ $exists_result_two -eq 1 ]] || fail "Object not removed from bucket"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_name"
|
||||
}
|
||||
|
||||
# test listing buckets on versitygw
|
||||
@test "test_list_buckets" {
|
||||
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local setup_result_one=$?
|
||||
[[ $setup_result_one -eq 0 ]] || fail "Bucket one setup error"
|
||||
setup_bucket "$BUCKET_TWO_NAME" || local setup_result_two=$?
|
||||
[[ $setup_result_two -eq 0 ]] || fail "Bucket two setup error"
|
||||
|
||||
list_buckets
|
||||
local bucket_one_found=false
|
||||
local bucket_two_found=false
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ]; then
|
||||
bucket_one_found=true
|
||||
elif [ "$bucket" == "$BUCKET_TWO_NAME" ]; then
|
||||
bucket_two_found=true
|
||||
fi
|
||||
if [ $bucket_one_found == true ] && [ $bucket_two_found == true ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "$BUCKET_TWO_NAME"
|
||||
|
||||
if [ $bucket_one_found != true ] || [ $bucket_two_found != true ]; then
|
||||
fail "'$BUCKET_ONE_NAME' and/or '$BUCKET_TWO_NAME' not listed (all buckets: ${bucket_array[*]})"
|
||||
fi
|
||||
}
|
||||
|
||||
# test listing a bucket's objects on versitygw
|
||||
@test "test_list_objects" {
|
||||
|
||||
object_one="test-file-one"
|
||||
object_two="test-file-two"
|
||||
|
||||
create_test_files $object_one $object_two
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result_one=$?
|
||||
[[ result_one -eq 0 ]] || fail "Error creating bucket"
|
||||
put_object "$test_file_folder"/$object_one "$BUCKET_ONE_NAME"/"$object_one" || local result_two=$?
|
||||
[[ result_two -eq 0 ]] || fail "Error adding object one"
|
||||
put_object "$test_file_folder"/$object_two "$BUCKET_ONE_NAME"/"$object_two" || local result_three=$?
|
||||
[[ result_three -eq 0 ]] || fail "Error adding object two"
|
||||
|
||||
list_objects "$BUCKET_ONE_NAME"
|
||||
local object_one_found=false
|
||||
local object_two_found=false
|
||||
for object in "${object_array[@]}"; do
|
||||
if [ "$object" == $object_one ]; then
|
||||
object_one_found=true
|
||||
elif [ "$object" == $object_two ]; then
|
||||
object_two_found=true
|
||||
fi
|
||||
done
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files $object_one $object_two
|
||||
|
||||
if [ $object_one_found != true ] || [ $object_two_found != true ]; then
|
||||
fail "$object_one and/or $object_two not listed (all objects: ${object_array[*]})"
|
||||
fi
|
||||
}
|
||||
|
||||
# test ability to retrieve bucket ACLs
|
||||
@test "test_get_bucket_acl" {
|
||||
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating bucket"
|
||||
|
||||
get_bucket_acl "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error retrieving acl"
|
||||
|
||||
id=$(echo "$acl" | jq '.Owner.ID')
|
||||
[[ $id == '"'"$AWS_ACCESS_KEY_ID"'"' ]] || fail "Acl mismatch"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
# test ability to delete multiple objects from bucket
|
||||
@test "test_delete_objects" {
|
||||
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result_one=$?
|
||||
[[ $result_one -eq 0 ]] || fail "Error creating bucket"
|
||||
|
||||
put_object "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME"/"$object_one" || local result_two=$?
|
||||
[[ $result_two -eq 0 ]] || fail "Error adding object one"
|
||||
put_object "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME"/"$object_two" || local result_three=$?
|
||||
[[ $result_three -eq 0 ]] || fail "Error adding object two"
|
||||
|
||||
error=$(aws s3api delete-objects --bucket "$BUCKET_ONE_NAME" --delete '{
|
||||
"Objects": [
|
||||
{"Key": "test-file-one"},
|
||||
{"Key": "test-file-two"}
|
||||
]
|
||||
}') || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error deleting objects: $error"
|
||||
|
||||
object_exists "$BUCKET_ONE_NAME"/"$object_one" || local exists_one=$?
|
||||
[[ $exists_one -eq 1 ]] || fail "Object one not deleted"
|
||||
object_exists "$BUCKET_ONE_NAME"/"$object_two" || local exists_two=$?
|
||||
[[ $exists_two -eq 1 ]] || fail "Object two not deleted"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
# test abilty to set and retrieve bucket tags
|
||||
@test "test-set-get-bucket-tags" {
|
||||
|
||||
local key="test_key"
|
||||
local value="test_value"
|
||||
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
get_bucket_tags "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting bucket tags"
|
||||
tag_set=$(echo "$tags" | jq '.TagSet')
|
||||
[[ $tag_set == "[]" ]] || fail "Error: tags not empty"
|
||||
|
||||
put_bucket_tag "$BUCKET_ONE_NAME" $key $value
|
||||
get_bucket_tags "$BUCKET_ONE_NAME" || local get_result_two=$?
|
||||
[[ $get_result_two -eq 0 ]] || fail "Error getting bucket tags"
|
||||
tag_set_key=$(echo "$tags" | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq '.TagSet[0].Value')
|
||||
[[ $tag_set_key == '"'$key'"' ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == '"'$value'"' ]] || fail "Value mismatch"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
# test v1 s3api list objects command
|
||||
@test "test-s3api-list-objects-v1" {
|
||||
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME"/"$object_one" || local put_object_one=$?
|
||||
[[ $put_object_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME"/"$object_two" || local put_object_two=$?
|
||||
[[ $put_object_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
|
||||
list_objects_s3api_v1 "$BUCKET_ONE_NAME"
|
||||
key_one=$(echo "$objects" | jq '.Contents[0].Key')
|
||||
[[ $key_one == '"'$object_one'"' ]] || fail "Object one mismatch"
|
||||
size_one=$(echo "$objects" | jq '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch"
|
||||
key_two=$(echo "$objects" | jq '.Contents[1].Key')
|
||||
[[ $key_two == '"'$object_two'"' ]] || fail "Object two mismatch"
|
||||
size_two=$(echo "$objects" | jq '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
# test v2 s3api list objects command
|
||||
@test "test-s3api-list-objects-v2" {
|
||||
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
printf "%s" "$object_two_data" > "$test_file_folder"/"$object_two"
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
put_object "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME"/"$object_one" || local put_object_one=$?
|
||||
[[ $put_object_one -eq 0 ]] || fail "Failed to add object $object_one"
|
||||
put_object "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME"/"$object_two" || local put_object_two=$?
|
||||
[[ $put_object_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
|
||||
list_objects_s3api_v2 "$BUCKET_ONE_NAME"
|
||||
key_one=$(echo "$objects" | jq '.Contents[0].Key')
|
||||
[[ $key_one == '"'$object_one'"' ]] || fail "Object one mismatch"
|
||||
size_one=$(echo "$objects" | jq '.Contents[0].Size')
|
||||
[[ $size_one -eq 0 ]] || fail "Object one size mismatch"
|
||||
key_two=$(echo "$objects" | jq '.Contents[1].Key')
|
||||
[[ $key_two == '"'$object_two'"' ]] || fail "Object two mismatch"
|
||||
size_two=$(echo "$objects" | jq '.Contents[1].Size')
|
||||
[[ $size_two -eq ${#object_two_data} ]] || fail "Object two size mismatch"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
# test abilty to set and retrieve object tags
|
||||
@test "test-set-get-object-tags" {
|
||||
|
||||
local bucket_file="bucket-file"
|
||||
local key="test_key"
|
||||
local value="test_value"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
local object_path="$BUCKET_ONE_NAME"/"$bucket_file"
|
||||
put_object "$test_file_folder"/"$bucket_file" "$object_path" || local put_object=$?
|
||||
[[ $put_object -eq 0 ]] || fail "Failed to add object to bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
get_object_tags "$BUCKET_ONE_NAME" $bucket_file || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting object tags"
|
||||
tag_set=$(echo "$tags" | jq '.TagSet')
|
||||
[[ $tag_set == "[]" ]] || fail "Error: tags not empty"
|
||||
|
||||
put_object_tag "$BUCKET_ONE_NAME" $bucket_file $key $value
|
||||
get_object_tags "$BUCKET_ONE_NAME" $bucket_file || local get_result_two=$?
|
||||
[[ $get_result_two -eq 0 ]] || fail "Error getting object tags"
|
||||
tag_set_key=$(echo "$tags" | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq '.TagSet[0].Value')
|
||||
[[ $tag_set_key == '"'$key'"' ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == '"'$value'"' ]] || fail "Value mismatch"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
# test multi-part upload
|
||||
@test "test-multi-part-upload" {
|
||||
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
printf "%s" "$bucket_file_data" > "$test_file_folder"/$bucket_file
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || upload_result=$?
|
||||
[[ $upload_result -eq 0 ]] || fail "Error performing multipart upload"
|
||||
|
||||
copy_file "s3://$BUCKET_ONE_NAME/$bucket_file" "$test_file_folder/$bucket_file-copy"
|
||||
copy_data=$(<"$test_file_folder"/$bucket_file-copy)
|
||||
[[ $bucket_file_data == "$copy_data" ]] || fail "Data doesn't match"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
# test multi-part upload abort
|
||||
@test "test-multi-part-upload-abort" {
|
||||
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
printf "%s" "$bucket_file_data" > "$test_file_folder"/$bucket_file
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
abort_multipart_upload "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || abort_result=$?
|
||||
[[ $abort_result -eq 0 ]] || fail "Abort failed"
|
||||
|
||||
object_exists "$BUCKET_ONE_NAME/$bucket_file" || exists=$?
|
||||
[[ $exists -eq 1 ]] || fail "Upload file exists after abort"
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
# test multi-part upload list parts command
|
||||
@test "test-multipart-upload-list-parts" {
|
||||
|
||||
local bucket_file="bucket-file"
|
||||
local bucket_file_data="test file\n"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
printf "%s" "$bucket_file_data" > "$test_file_folder"/$bucket_file
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
list_parts "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 || list_result=$?
|
||||
[[ list_result -eq 0 ]] || fail "Listing multipart upload parts failed"
|
||||
|
||||
declare -a parts_map
|
||||
for ((i=0;i<$4;i++)) {
|
||||
local part_number
|
||||
local etag
|
||||
part_number=$(echo "$parts" | jq ".[$i].PartNumber")
|
||||
if [[ $part_number -eq "" ]]; then
|
||||
echo "error: blank part number"
|
||||
return 1
|
||||
fi
|
||||
etag=$(echo "$parts" | jq ".[$i].ETag")
|
||||
if [[ $etag == "" ]]; then
|
||||
echo "error: blank etag"
|
||||
return 1
|
||||
fi
|
||||
parts_map[$part_number]=$etag
|
||||
}
|
||||
|
||||
for ((i=0;i<$4;i++)) {
|
||||
local part_number
|
||||
local etag
|
||||
part_number=$(echo "$listed_parts" | jq ".Parts[$i].PartNumber")
|
||||
etag=$(echo "$listed_parts" | jq ".Parts[$i].ETag")
|
||||
if [[ ${parts_map[$part_number]} != "$etag" ]]; then
|
||||
echo "error: etags don't match (part number: $part_number, etags ${parts_map[$part_number]},$etag)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
# test listing of active uploads
|
||||
@test "test-multipart-upload-list-uploads" {
|
||||
|
||||
local bucket_file_one="bucket-file-one"
|
||||
local bucket_file_two="bucket-file-two"
|
||||
|
||||
create_test_files "$bucket_file_one" "$bucket_file_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
list_multipart_uploads "$BUCKET_ONE_NAME" "$test_file_folder"/"$bucket_file_one" "$test_file_folder"/"$bucket_file_two"
|
||||
[[ $? -eq 0 ]] || fail "failed to list multipart uploads"
|
||||
|
||||
local key_one
|
||||
local key_two
|
||||
key_one=$(echo "$uploads" | jq '.Uploads[0].Key')
|
||||
key_two=$(echo "$uploads" | jq '.Uploads[1].Key')
|
||||
key_one=${key_one//\"/}
|
||||
key_two=${key_two//\"/}
|
||||
echo "$test_file_folder/${bucket_file_one}abc"
|
||||
echo "${key_one}abc"
|
||||
echo "Length of test_file_folder/bucket_file_one: ${#test_file_folder}/${#bucket_file_one}"
|
||||
echo "Length of key_one: ${#key_one}"
|
||||
if [[ "$test_file_folder/$bucket_file_one" != *"$key_one" ]]; then
|
||||
fail "Key mismatch ($test_file_folder/$bucket_file_one, $key_one)"
|
||||
fi
|
||||
if [[ "$test_file_folder/$bucket_file_two" != *"$key_two" ]]; then
|
||||
fail "Key mismatch ($test_file_folder/$bucket_file_two, $key_two)"
|
||||
fi
|
||||
|
||||
delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$bucket_file_one" "$bucket_file_two"
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup() {
|
||||
|
||||
if [ "$GITHUB_ACTIONS" != "true" ] && [ -r .secrets ]; then
|
||||
source .secrets
|
||||
else
|
||||
echo "Warning: no secrets file found"
|
||||
fi
|
||||
if [ -z "$VERSITYGW_TEST_ENV" ]; then
|
||||
if [ -r tests/.env ]; then
|
||||
source tests/.env
|
||||
else
|
||||
echo "Warning: no .env file found in tests folder"
|
||||
fi
|
||||
else
|
||||
echo "$VERSITYGW_TEST_ENV"
|
||||
# shellcheck source=./.env.default
|
||||
source "$VERSITYGW_TEST_ENV"
|
||||
fi
|
||||
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||
echo "No AWS access key set"
|
||||
return 1
|
||||
elif [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||
echo "No AWS secret access key set"
|
||||
return 1
|
||||
elif [ -z "$VERSITY_EXE" ]; then
|
||||
echo "No versity executable location set"
|
||||
return 1
|
||||
elif [ -z "$BACKEND" ]; then
|
||||
echo "No backend parameter set (options: 'posix')"
|
||||
return 1
|
||||
elif [ -z "$AWS_REGION" ]; then
|
||||
echo "No AWS region set"
|
||||
return 1
|
||||
elif [ -z "$AWS_PROFILE" ]; then
|
||||
echo "No AWS profile set"
|
||||
return 1
|
||||
elif [ -z "$LOCAL_FOLDER" ]; then
|
||||
echo "No local storage folder set"
|
||||
return 1
|
||||
elif [ -z "$AWS_ENDPOINT_URL" ]; then
|
||||
echo "No AWS endpoint URL set"
|
||||
return 1
|
||||
elif [ -z "$BUCKET_ONE_NAME" ]; then
|
||||
echo "No bucket one name set"
|
||||
return 1
|
||||
elif [ -z "$BUCKET_TWO_NAME" ]; then
|
||||
echo "No bucket two name set"
|
||||
return 1
|
||||
elif [ -z "$RECREATE_BUCKETS" ]; then
|
||||
echo "No recreate buckets parameter set"
|
||||
return 1
|
||||
elif [[ $RECREATE_BUCKETS != "true" ]] && [[ $RECREATE_BUCKETS != "false" ]]; then
|
||||
echo "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
ROOT_ACCESS_KEY="$AWS_ACCESS_KEY_ID" ROOT_SECRET_KEY="$AWS_SECRET_ACCESS_KEY" "$VERSITY_EXE" "$BACKEND" "$LOCAL_FOLDER" &
|
||||
|
||||
export AWS_REGION
|
||||
export AWS_PROFILE
|
||||
export AWS_ENDPOINT_URL
|
||||
export LOCAL_FOLDER
|
||||
export BUCKET_ONE_NAME
|
||||
export BUCKET_TWO_NAME
|
||||
|
||||
versitygw_pid=$!
|
||||
export versitygw_pid
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo "$1"
|
||||
return 1
|
||||
}
|
||||
|
||||
teardown() {
|
||||
if [ -n "$versitygw_pid" ]; then
|
||||
if ps -p "$versitygw_pid" > /dev/null; then
|
||||
kill "$versitygw_pid"
|
||||
wait "$versitygw_pid" || true
|
||||
else
|
||||
echo "Process with PID $versitygw_pid does not exist."
|
||||
fi
|
||||
else
|
||||
echo "versitygw_pid is not set or empty."
|
||||
fi
|
||||
}
|
||||
720
tests/util.sh
720
tests/util.sh
@@ -1,720 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# create an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
create_bucket() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "create bucket missing bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 mb s3://"$1" 2>&1) || exit_code=$?
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error creating bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# delete an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_bucket() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "delete bucket missing bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 rb s3://"$1" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "error deleting bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# recursively delete an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_bucket_recursive() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "delete bucket missing bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 rb s3://"$1" --force 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "error deleting bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# delete contents of a bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_bucket_contents() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "delete bucket missing bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 rm s3://"$1" --recursive 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error deleting bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if bucket exists
|
||||
# param: bucket name
|
||||
# return 0 for true, 1 for false, 2 for error
|
||||
bucket_exists() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket exists check missing bucket name"
|
||||
return 2
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 ls s3://"$1" 2>&1) || exit_code="$?"
|
||||
echo "Exit code: $exit_code, error: $error"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == *"The specified bucket does not exist"* ]] || [[ "$error" == *"Access Denied"* ]]; then
|
||||
return 1
|
||||
else
|
||||
echo "error checking if bucket exists: $error"
|
||||
return 2
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# delete buckets or just the contents depending on RECREATE_BUCKETS parameter
|
||||
# param: bucket name
|
||||
# return: 0 for success, 1 for failure
|
||||
delete_bucket_or_contents() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "delete bucket or contents function requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
delete_bucket_contents "$1" || local delete_result=$?
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "error deleting bucket contents"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
delete_bucket_recursive "$1" || local delete_result=$?
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "Bucket deletion error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# if RECREATE_BUCKETS is set to true create bucket, deleting it if it exists to clear state. If not,
|
||||
# check to see if it exists and return an error if it does not.
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
setup_bucket() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket creation function requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
local exists_result
|
||||
bucket_exists "$1" || exists_result=$?
|
||||
if [[ $exists_result -eq 2 ]]; then
|
||||
echo "Bucket existence check error"
|
||||
return 1
|
||||
fi
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
delete_bucket_or_contents "$1" || delete_result=$?
|
||||
if [[ delete_result -ne 0 ]]; then
|
||||
echo "error deleting bucket or contents"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
echo "When RECREATE_BUCKETS isn't set to \"true\", buckets should be pre-created by user"
|
||||
return 1
|
||||
fi
|
||||
local create_result
|
||||
create_bucket "$1" || create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
echo "Error creating bucket"
|
||||
return 1
|
||||
fi
|
||||
echo "Bucket creation success"
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if object exists on S3 via gateway
|
||||
# param: object path
|
||||
# return 0 for true, 1 for false, 2 for error
|
||||
object_exists() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "object exists check missing object name"
|
||||
return 2
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 ls s3://"$1" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == "" ]]; then
|
||||
return 1
|
||||
else
|
||||
echo "error checking if object exists: $error"
|
||||
return 2
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# add object to versitygw
|
||||
# params: source file, destination copy location
|
||||
# return 0 for success, 1 for failure
|
||||
put_object() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "put object command requires source, destination"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 cp "$1" s3://"$2" 2>&1) || exit_code=$?
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error copying object to bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# add object to versitygw if it doesn't exist
|
||||
# params: source file, destination copy location
|
||||
# return 0 for success or already exists, 1 for failure
|
||||
check_and_put_object() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "check and put object function requires source, destination"
|
||||
return 1
|
||||
fi
|
||||
object_exists "$2" || local exists_result=$?
|
||||
if [ "$exists_result" -eq 2 ]; then
|
||||
echo "error checking if object exists"
|
||||
return 1
|
||||
fi
|
||||
if [ "$exists_result" -eq 1 ]; then
|
||||
put_object "$1" "$2" || local put_result=$?
|
||||
if [ "$put_result" -ne 0 ]; then
|
||||
echo "error adding object"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# delete object from versitygw
|
||||
# param: object path, including bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
delete_object() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "delete object command requires object parameter"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3 rm s3://"$1" 2>&1) || exit_code=$?
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error deleting object: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# list buckets on versitygw
|
||||
# no params
|
||||
# export bucket_array (bucket names) on success, return 1 for failure
|
||||
list_buckets() {
|
||||
local exit_code=0
|
||||
local output
|
||||
output=$(aws s3 ls 2>&1) || exit_code=$?
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing buckets: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
bucket_array=()
|
||||
while IFS= read -r line; do
|
||||
bucket_name=$(echo "$line" | awk '{print $NF}')
|
||||
bucket_array+=("$bucket_name")
|
||||
done <<< "$output"
|
||||
|
||||
export bucket_array
|
||||
}
|
||||
|
||||
# list objects on versitygw, in bucket or folder
|
||||
# param: path of bucket or folder
|
||||
# export object_array (object names) on success, return 1 for failure
|
||||
list_objects() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "list objects command requires bucket or folder"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local output
|
||||
output=$(aws s3 ls s3://"$1" 2>&1) || exit_code=$?
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing objects: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
object_array=()
|
||||
while IFS= read -r line; do
|
||||
object_name=$(echo "$line" | awk '{print $NF}')
|
||||
object_array+=("$object_name")
|
||||
done <<< "$output"
|
||||
|
||||
export object_array
|
||||
}
|
||||
|
||||
# check if bucket info can be retrieved
|
||||
# param: path of bucket or folder
|
||||
# return 0 for yes, 1 for no, 2 for error
|
||||
bucket_is_accessible() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket accessibility check missing bucket name"
|
||||
return 2
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
error=$(aws s3api head-bucket --bucket "$1" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
if [[ "$error" == *"500"* ]]; then
|
||||
return 1
|
||||
fi
|
||||
echo "Error checking bucket accessibility: $error"
|
||||
return 2
|
||||
}
|
||||
|
||||
# check if object info (etag) is accessible
|
||||
# param: path of object
|
||||
# return 0 for yes, 1 for no, 2 for error
|
||||
object_is_accessible() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "object accessibility check missing bucket and/or key"
|
||||
return 2
|
||||
fi
|
||||
local exit_code=0
|
||||
object_data=$(aws s3api head-object --bucket "$1" --key "$2" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "Error obtaining object data: $object_data"
|
||||
return 2
|
||||
fi
|
||||
etag=$(echo "$object_data" | jq '.ETag')
|
||||
if [[ "$etag" == '""' ]]; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# get bucket acl
|
||||
# param: bucket path
|
||||
# export acl for success, return 1 for error
|
||||
get_bucket_acl() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket ACL command missing bucket name"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
acl=$(aws s3api get-bucket-acl --bucket "$1" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "Error getting bucket ACLs: $acl"
|
||||
return 1
|
||||
fi
|
||||
export acl
|
||||
}
|
||||
|
||||
# add tags to bucket
|
||||
# params: bucket, key, value
|
||||
# return: 0 for success, 1 for error
|
||||
put_bucket_tag() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "bucket tag command missing bucket name, key, value"
|
||||
return 1
|
||||
fi
|
||||
local error
|
||||
local result
|
||||
error=$(aws s3api put-bucket-tagging --bucket "$1" --tagging "TagSet=[{Key=$2,Value=$3}]") || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "Error adding bucket tag: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# get bucket tags
|
||||
# params: bucket
|
||||
# export 'tags' on success, return 1 for error
|
||||
get_bucket_tags() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "get bucket tag command missing bucket name"
|
||||
return 1
|
||||
fi
|
||||
local result
|
||||
tags=$(aws s3api get-bucket-tagging --bucket "$1") || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error getting bucket tags: $tags"
|
||||
return 1
|
||||
fi
|
||||
export tags
|
||||
}
|
||||
|
||||
# add tags to object
|
||||
# params: object, key, value
|
||||
# return: 0 for success, 1 for error
|
||||
put_object_tag() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "object tag command missing object name, file, key, and/or value"
|
||||
return 1
|
||||
fi
|
||||
local error
|
||||
local result
|
||||
error=$(aws s3api put-object-tagging --bucket "$1" --key "$2" --tagging "TagSet=[{Key=$3,Value=$4}]") || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "Error adding object tag: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# get object tags
|
||||
# params: bucket
|
||||
# export 'tags' on success, return 1 for error
|
||||
get_object_tags() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "get object tag command missing bucket and/or key"
|
||||
return 1
|
||||
fi
|
||||
local result
|
||||
tags=$(aws s3api get-object-tagging --bucket "$1" --key "$2") || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error getting object tags: $tags"
|
||||
return 1
|
||||
fi
|
||||
export tags
|
||||
}
|
||||
|
||||
# create a test file and export folder. do so in temp folder
|
||||
# params: filename
|
||||
# export test file folder on success, return 1 for error
|
||||
create_test_files() {
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "create test files command missing filename"
|
||||
return 1
|
||||
fi
|
||||
test_file_folder=.
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
test_file_folder=${TMPDIR}versity-gwtest
|
||||
mkdir -p "$test_file_folder" || local mkdir_result=$?
|
||||
if [[ $mkdir_result -ne 0 ]]; then
|
||||
echo "error creating test file folder"
|
||||
fi
|
||||
fi
|
||||
for name in "$@"; do
|
||||
touch "$test_file_folder"/"$name" || local touch_result=$?
|
||||
if [[ $touch_result -ne 0 ]]; then
|
||||
echo "error creating file $name"
|
||||
fi
|
||||
done
|
||||
export test_file_folder
|
||||
}
|
||||
|
||||
# delete a test file
|
||||
# params: filename
|
||||
# return: 0 for success, 1 for error
|
||||
delete_test_files() {
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "delete test files command missing filenames"
|
||||
return 1
|
||||
fi
|
||||
if [ -z "$test_file_folder" ]; then
|
||||
echo "no test file folder defined, not deleting"
|
||||
return 1
|
||||
fi
|
||||
for name in "$@"; do
|
||||
rm "$test_file_folder"/"$name" || rm_result=$?
|
||||
if [[ $rm_result -ne 0 ]]; then
|
||||
echo "error deleting file $name"
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
# list objects in bucket, v1
|
||||
# param: bucket
|
||||
# export objects on success, return 1 for failure
|
||||
list_objects_s3api_v1() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "list objects command missing bucket"
|
||||
return 1
|
||||
fi
|
||||
objects=$(aws s3api list-objects --bucket "$1") || local result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error listing objects: $objects"
|
||||
return 1
|
||||
fi
|
||||
export objects
|
||||
}
|
||||
|
||||
# list objects in bucket, v2
|
||||
# param: bucket
|
||||
# export objects on success, return 1 for failure
|
||||
list_objects_s3api_v2() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "list objects command missing bucket and/or path"
|
||||
return 1
|
||||
fi
|
||||
objects=$(aws s3api list-objects-v2 --bucket "$1") || local result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error listing objects: $objects"
|
||||
return 1
|
||||
fi
|
||||
export objects
|
||||
}
|
||||
|
||||
# initialize a multipart upload
|
||||
# params: bucket, key
|
||||
# return 0 for success, 1 for failure
|
||||
create_multipart_upload() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "create multipart upload function must have bucket, key"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local multipart_data
|
||||
multipart_data=$(aws s3api create-multipart-upload --bucket "$1" --key "$2") || local created=$?
|
||||
if [[ $created -ne 0 ]]; then
|
||||
echo "Error creating multipart upload: $upload_id"
|
||||
return 1
|
||||
fi
|
||||
|
||||
upload_id=$(echo "$multipart_data" | jq '.UploadId')
|
||||
upload_id="${upload_id//\"/}"
|
||||
export upload_id
|
||||
}
|
||||
|
||||
# upload a single part of a multipart upload
|
||||
# params: bucket, key, upload ID, original (unsplit) file name, part number
|
||||
# return: 0 for success, 1 for failure
|
||||
upload_part() {
|
||||
if [ $# -ne 5 ]; then
|
||||
echo "upload multipart part function must have bucket, key, upload ID, file name, part number"
|
||||
return 1
|
||||
fi
|
||||
local etag_json
|
||||
etag_json=$(aws s3api upload-part --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --body "$4-$(($5-1))") || local uploaded=$?
|
||||
if [[ $uploaded -ne 0 ]]; then
|
||||
echo "Error uploading part $5: $etag_json"
|
||||
return 1
|
||||
fi
|
||||
etag=$(echo "$etag_json" | jq '.ETag')
|
||||
export etag
|
||||
}
|
||||
|
||||
# perform all parts of a multipart upload before completion command
|
||||
# params: bucket, key, file to split and upload, number of file parts to upload
|
||||
# return: 0 for success, 1 for failure
|
||||
multipart_upload_before_completion() {
|
||||
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "multipart upload pre-completion command missing bucket, key, file, and/or part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
file_size=$(stat -c %s "$3" 2>/dev/null || stat -f %z "$3" 2>/dev/null)
|
||||
part_size=$((file_size / $4))
|
||||
remainder=$((file_size % $4))
|
||||
if [[ remainder -ne 0 ]]; then
|
||||
part_size=$((part_size+1))
|
||||
fi
|
||||
local error
|
||||
local split_result
|
||||
error=$(split -a 1 -d -b "$part_size" "$3" "$3"-) || split_result=$?
|
||||
if [[ $split_result -ne 0 ]]; then
|
||||
echo "error splitting file: $error"
|
||||
return 1
|
||||
fi
|
||||
|
||||
create_multipart_upload "$1" "$2" || create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
echo "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
upload_part "$1" "$2" "$upload_id" "$3" "$i" || local upload_result=$?
|
||||
if [[ $upload_result -ne 0 ]]; then
|
||||
echo "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
export parts
|
||||
}
|
||||
|
||||
# perform a multi-part upload
|
||||
# params: bucket, key, source file location, number of parts
|
||||
# return 0 for success, 1 for failure
|
||||
multipart_upload() {
|
||||
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "multipart upload command missing bucket, key, file, and/or part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
multipart_upload_before_completion "$1" "$2" "$3" "$4" || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
error=$(aws s3api complete-multipart-upload --bucket "$1" --key "$2" --upload-id "$upload_id" --multipart-upload '{"Parts": '"$parts"'}') || local completed=$?
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
echo "Error completing upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# run the abort multipart command
|
||||
# params: bucket, key, upload ID
|
||||
# return 0 for success, 1 for failure
|
||||
run_abort_command() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "command to run abort requires bucket, key, upload ID"
|
||||
return 1
|
||||
fi
|
||||
|
||||
error=$(aws s3api abort-multipart-upload --bucket "$1" --key "$2" --upload-id "$3") || local aborted=$?
|
||||
if [[ $aborted -ne 0 ]]; then
|
||||
echo "Error aborting upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# run upload, then abort it
|
||||
# params: bucket, key, local file location, number of parts to split into before uploading
|
||||
# return 0 for success, 1 for failure
|
||||
abort_multipart_upload() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "abort multipart upload command missing bucket, key, file, and/or part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
multipart_upload_before_completion "$1" "$2" "$3" "$4" || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
run_abort_command "$1" "$2" "$upload_id"
|
||||
return $?
|
||||
}
|
||||
|
||||
# copy a file to/from S3
|
||||
# params: source, destination
|
||||
# return 0 for success, 1 for failure
|
||||
copy_file() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "copy file command requires src and dest"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local result
|
||||
error=$(aws s3 cp "$1" "$2") || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error copying file: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# list parts of an unfinished multipart upload
|
||||
# params: bucket, key, local file location, and parts to split into before upload
|
||||
# export parts on success, return 1 for error
|
||||
list_parts() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "list multipart upload parts command missing bucket, key, file, and/or part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
multipart_upload_before_completion "$1" "$2" "$3" "$4" || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
echo "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
listed_parts=$(aws s3api list-parts --bucket "$1" --key "$2" --upload-id "$upload_id") || local listed=$?
|
||||
if [[ $listed -ne 0 ]]; then
|
||||
echo "Error aborting upload: $parts"
|
||||
return 1
|
||||
fi
|
||||
export listed_parts
|
||||
}
|
||||
|
||||
# list unfinished multipart uploads
|
||||
# params: bucket, key one, key two
|
||||
# export current two uploads on success, return 1 for error
|
||||
list_multipart_uploads() {
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "list multipart uploads command requires bucket and two keys"
|
||||
return 1
|
||||
fi
|
||||
|
||||
create_multipart_upload "$1" "$2" || local create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
echo "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
create_multipart_upload "$1" "$3" || local create_result_two=$?
|
||||
if [[ $create_result_two -ne 0 ]]; then
|
||||
echo "error creating multpart upload two"
|
||||
return 1
|
||||
fi
|
||||
|
||||
uploads=$(aws s3api list-multipart-uploads --bucket "$1") || local list_result=$?
|
||||
if [[ $list_result -ne 0 ]]; then
|
||||
echo "error listing uploads: $uploads"
|
||||
return 1
|
||||
fi
|
||||
export uploads
|
||||
}
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# check if object exists both on S3 and locally
|
||||
# param: object path
|
||||
# 0 for yes, 1 for no, 2 for error
|
||||
object_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "object existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
object_exists "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if object exists"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 1 ]]; then
|
||||
echo "Error: object doesn't exist remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: object doesn't exist locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if object doesn't exist both on S3 and locally
|
||||
# param: object path
|
||||
# return 0 for doesn't exist, 1 for still exists, 2 for error
|
||||
object_not_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "object non-existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
object_exists "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if object doesn't exist"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 0 ]]; then
|
||||
echo "Error: object exists remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: object exists locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if a bucket doesn't exist both on S3 and on gateway
|
||||
# param: bucket name
|
||||
# return: 0 for doesn't exist, 1 for does, 2 for error
|
||||
bucket_not_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
bucket_exists "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if bucket exists"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 0 ]]; then
|
||||
echo "Error: bucket exists remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: bucket exists locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# check if a bucket exists both on S3 and on gateway
|
||||
# param: bucket name
|
||||
# return: 0 for yes, 1 for no, 2 for error
|
||||
bucket_exists_remote_and_local() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "bucket existence check requires single name parameter"
|
||||
return 2
|
||||
fi
|
||||
bucket_exists "$1" || local exist_result=$?
|
||||
if [[ $exist_result -eq 2 ]]; then
|
||||
echo "Error checking if bucket exists"
|
||||
return 2
|
||||
fi
|
||||
if [[ $exist_result -eq 1 ]]; then
|
||||
echo "Error: bucket doesn't exist remotely"
|
||||
return 1
|
||||
fi
|
||||
if [[ ! -e "$LOCAL_FOLDER"/"$1" ]]; then
|
||||
echo "Error: bucket doesn't exist locally"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
Reference in New Issue
Block a user