mirror of
https://github.com/versity/versitygw.git
synced 2026-01-29 22:42:02 +00:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4cf0132e5 | ||
|
|
ab98dc0c12 | ||
|
|
0c08f9f1bc | ||
|
|
b4fe47310a | ||
|
|
bd56f15733 | ||
|
|
bdcdce4cff | ||
|
|
69a2a2a54b | ||
|
|
afc8b9f072 | ||
|
|
2aa223e3d9 | ||
|
|
cfe367da99 | ||
|
|
867dadd117 | ||
|
|
576dfc5884 | ||
|
|
7322309ea9 | ||
|
|
6ad3d05c37 | ||
|
|
1930733cb6 | ||
|
|
8267a7ad12 | ||
|
|
0d5cc61064 | ||
|
|
f1106491f2 | ||
|
|
d5ecb97edc | ||
|
|
f6755cb011 | ||
|
|
557a8b683a | ||
|
|
8f8dbae6d7 | ||
|
|
fe4c9dff76 | ||
|
|
714dd6eb86 | ||
|
|
5d5381e688 | ||
|
|
a7110c28b6 | ||
|
|
20cef53fd8 | ||
|
|
1383a27dea | ||
|
|
282ef71867 | ||
|
|
a896b3660b | ||
|
|
0fb6bf6267 | ||
|
|
ab0feac383 | ||
|
|
dde30943f1 | ||
|
|
8d1b5c4339 | ||
|
|
83136aa40f | ||
|
|
3abde8126d | ||
|
|
b7cc7feffa | ||
|
|
eb4c03c10e |
16
.github/workflows/shellcheck.yml
vendored
Normal file
16
.github/workflows/shellcheck.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: shellcheck
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Run shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run checks
|
||||
run: |
|
||||
shellcheck --version
|
||||
shellcheck -e SC1091 tests/*.sh tests/*/*.sh
|
||||
87
.github/workflows/system.yml
vendored
87
.github/workflows/system.yml
vendored
@@ -4,16 +4,60 @@ jobs:
|
||||
build:
|
||||
name: RunTests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- set: 1
|
||||
LOCAL_FOLDER: /tmp/gw1
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-1
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-1
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam1
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7070
|
||||
RUN_SET: "s3cmd"
|
||||
PORT: 7070
|
||||
- set: 2
|
||||
LOCAL_FOLDER: /tmp/gw2
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-2
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-2
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam2
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7071
|
||||
RUN_SET: "s3"
|
||||
PORT: 7071
|
||||
- set: 3
|
||||
LOCAL_FOLDER: /tmp/gw3
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-3
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-3
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam3
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7072
|
||||
RUN_SET: "s3api"
|
||||
PORT: 7072
|
||||
- set: 4
|
||||
LOCAL_FOLDER: /tmp/gw4
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-4
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-4
|
||||
IAM_TYPE: folder
|
||||
USERS_FOLDER: /tmp/iam4
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7073
|
||||
RUN_SET: "mc"
|
||||
PORT: 7073
|
||||
- set: 5
|
||||
LOCAL_FOLDER: /tmp/gw4
|
||||
BUCKET_ONE_NAME: versity-gwtest-bucket-one-4
|
||||
BUCKET_TWO_NAME: versity-gwtest-bucket-two-4
|
||||
IAM_TYPE: s3
|
||||
USERS_BUCKET: versity-gwtest-iam
|
||||
AWS_ENDPOINT_URL: https://127.0.0.1:7074
|
||||
RUN_SET: "aws-user"
|
||||
PORT: 7074
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install ShellCheck and md5
|
||||
run: sudo apt-get install shellcheck
|
||||
|
||||
- name: Run ShellCheck
|
||||
run: shellcheck -S warning ./tests/*.sh
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -39,6 +83,27 @@ jobs:
|
||||
chmod 755 /usr/local/bin/mc
|
||||
|
||||
- name: Build and run, posix backend
|
||||
env:
|
||||
LOCAL_FOLDER: ${{ matrix.LOCAL_FOLDER }}
|
||||
BUCKET_ONE_NAME: ${{ matrix.BUCKET_ONE_NAME }}
|
||||
BUCKET_TWO_NAME: ${{ matrix.BUCKET_TWO_NAME }}
|
||||
USERS_FOLDER: ${{ matrix.USERS_FOLDER }}
|
||||
USERS_BUCKET: ${{ matrix.USERS_BUCKET }}
|
||||
IAM_TYPE: ${{ matrix.IAM_TYPE }}
|
||||
AWS_ENDPOINT_URL: ${{ matrix.AWS_ENDPOINT_URL }}
|
||||
RUN_SET: ${{ matrix.RUN_SET }}
|
||||
PORT: ${{ matrix.PORT }}
|
||||
AWS_PROFILE: versity
|
||||
VERSITY_EXE: ${{ github.workspace }}/versitygw
|
||||
RUN_VERSITYGW: true
|
||||
BACKEND: posix
|
||||
RECREATE_BUCKETS: true
|
||||
CERT: ${{ github.workspace }}/cert.pem
|
||||
KEY: ${{ github.workspace }}/versitygw.pem
|
||||
S3CMD_CONFIG: tests/s3cfg.local.default
|
||||
MC_ALIAS: versity
|
||||
LOG_LEVEL: 4
|
||||
GOCOVERDIR: ${{ github.workspace }}/cover
|
||||
run: |
|
||||
make testbin
|
||||
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
|
||||
@@ -47,12 +112,12 @@ jobs:
|
||||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity
|
||||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity
|
||||
aws configure set aws_region $AWS_REGION --profile versity
|
||||
mkdir /tmp/gw
|
||||
mkdir $LOCAL_FOLDER
|
||||
export WORKSPACE=$GITHUB_WORKSPACE
|
||||
openssl genpkey -algorithm RSA -out versitygw.pem -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key versitygw.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
mkdir cover iam
|
||||
VERSITYGW_TEST_ENV=./tests/.env.default ./tests/run_all.sh
|
||||
openssl genpkey -algorithm RSA -out $KEY -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key $KEY -out $CERT -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
mkdir $GOCOVERDIR $USERS_FOLDER
|
||||
BYPASS_ENV_FILE=true ${{ github.workspace }}/tests/run.sh $RUN_SET
|
||||
|
||||
#- name: Build and run, s3 backend
|
||||
# run: |
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
<a href="https://www.versity.com"><img alt="Versity Software logo image." src="https://github.com/versity/versitygw/blob/assets/assets/logo.svg"></a>
|
||||
</picture>
|
||||
|
||||
[](https://github.com/versity/versitygw/blob/main/LICENSE)
|
||||
[](https://github.com/versity/versitygw/blob/main/LICENSE) [](https://goreportcard.com/report/github.com/versity/versitygw) [](https://pkg.go.dev/github.com/versity/versitygw)
|
||||
|
||||
### Binary release builds
|
||||
Download [latest release](https://github.com/versity/versitygw/releases)
|
||||
@@ -22,8 +22,7 @@ Download [latest release](https://github.com/versity/versitygw/releases)
|
||||
* Simplified interface for adding new storage system support
|
||||
|
||||
### News
|
||||
* New performance (scale up) analysis article [https://github.com/versity/versitygw/wiki/Performance](https://github.com/versity/versitygw/wiki/Performance)
|
||||
* New performance (scale out) Part 2 analysis article [https://github.com/versity/versitygw/wiki/Performance-Part-2](https://github.com/versity/versitygw/wiki/Performance-Part-2)
|
||||
Check out latest wiki articles: [https://github.com/versity/versitygw/wiki/Articles](https://github.com/versity/versitygw/wiki/Articles)
|
||||
|
||||
### Mailing List
|
||||
Keep up to date with latest gateway announcements by signing up to the [versitygw mailing list](https://www.versity.com/products/versitygw#signup).
|
||||
|
||||
59
auth/iam.go
59
auth/iam.go
@@ -48,28 +48,42 @@ type IAMService interface {
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
var ErrNoSuchUser = errors.New("user not found")
|
||||
var (
|
||||
// ErrUserExists is returned when the user already exists
|
||||
ErrUserExists = errors.New("user already exists")
|
||||
// ErrNoSuchUser is returned when the user does not exist
|
||||
ErrNoSuchUser = errors.New("user not found")
|
||||
)
|
||||
|
||||
type Opts struct {
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
S3Debug bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
VaultEndpointURL string
|
||||
VaultSecretStoragePath string
|
||||
VaultMountPath string
|
||||
VaultRootToken string
|
||||
VaultRoleId string
|
||||
VaultRoleSecret string
|
||||
VaultServerCert string
|
||||
VaultClientCert string
|
||||
VaultClientCertKey string
|
||||
S3Access string
|
||||
S3Secret string
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
S3Endpoint string
|
||||
S3DisableSSlVerfiy bool
|
||||
S3Debug bool
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
@@ -90,6 +104,11 @@ func New(o *Opts) (IAMService, error) {
|
||||
o.S3Endpoint, o.S3DisableSSlVerfiy, o.S3Debug)
|
||||
fmt.Printf("initializing S3 IAM with '%v/%v'\n",
|
||||
o.S3Endpoint, o.S3Bucket)
|
||||
case o.VaultEndpointURL != "":
|
||||
svc, err = NewVaultIAMService(o.VaultEndpointURL, o.VaultSecretStoragePath,
|
||||
o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
|
||||
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
|
||||
default:
|
||||
// if no iam options selected, default to the single user mode
|
||||
fmt.Println("No IAM service configured, enabling single account mode")
|
||||
|
||||
@@ -70,7 +70,7 @@ func (s *IAMServiceInternal) CreateAccount(account Account) error {
|
||||
|
||||
_, ok := conf.AccessAccounts[account.Access]
|
||||
if ok {
|
||||
return nil, fmt.Errorf("account already exists")
|
||||
return nil, ErrUserExists
|
||||
}
|
||||
conf.AccessAccounts[account.Access] = account
|
||||
|
||||
@@ -188,6 +188,10 @@ func parseIAM(b []byte) (iAMConfig, error) {
|
||||
return iAMConfig{}, fmt.Errorf("failed to parse the config file: %w", err)
|
||||
}
|
||||
|
||||
if conf.AccessAccounts == nil {
|
||||
conf.AccessAccounts = make(map[string]Account)
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -85,6 +85,13 @@ func NewS3(access, secret, region, bucket, endpoint string, sslSkipVerify, debug
|
||||
return nil, fmt.Errorf("init s3 IAM: %v", err)
|
||||
}
|
||||
|
||||
if endpoint != "" {
|
||||
i.client = s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = &endpoint
|
||||
})
|
||||
return i, nil
|
||||
}
|
||||
|
||||
i.client = s3.NewFromConfig(cfg)
|
||||
return i, nil
|
||||
}
|
||||
@@ -97,7 +104,7 @@ func (s *IAMServiceS3) CreateAccount(account Account) error {
|
||||
|
||||
_, ok := conf.AccessAccounts[account.Access]
|
||||
if ok {
|
||||
return fmt.Errorf("account already exists")
|
||||
return ErrUserExists
|
||||
}
|
||||
conf.AccessAccounts[account.Access] = account
|
||||
|
||||
@@ -159,16 +166,6 @@ func (s *IAMServiceS3) ListUserAccounts() ([]Account, error) {
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
// ResolveEndpoint is used for on prem or non-aws endpoints
|
||||
func (s *IAMServiceS3) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: s.endpoint,
|
||||
SigningRegion: s.region,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceS3) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
@@ -187,11 +184,6 @@ func (s *IAMServiceS3) getConfig() (aws.Config, error) {
|
||||
config.WithHTTPClient(client),
|
||||
}
|
||||
|
||||
if s.endpoint != "" {
|
||||
opts = append(opts,
|
||||
config.WithEndpointResolverWithOptions(s))
|
||||
}
|
||||
|
||||
if s.debug {
|
||||
opts = append(opts,
|
||||
config.WithClientLogMode(aws.LogSigning|aws.LogRetries|aws.LogRequest|aws.LogResponse|aws.LogRequestEventMessage|aws.LogResponseEventMessage))
|
||||
@@ -212,12 +204,12 @@ func (s *IAMServiceS3) getAccounts() (iAMConfig, error) {
|
||||
// init empty accounts stuct and return that
|
||||
var nsk *types.NoSuchKey
|
||||
if errors.As(err, &nsk) {
|
||||
return iAMConfig{}, nil
|
||||
return iAMConfig{AccessAccounts: map[string]Account{}}, nil
|
||||
}
|
||||
var apiErr smithy.APIError
|
||||
if errors.As(err, &apiErr) {
|
||||
if apiErr.ErrorCode() == "NotFound" {
|
||||
return iAMConfig{}, nil
|
||||
return iAMConfig{AccessAccounts: map[string]Account{}}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
226
auth/iam_vault.go
Normal file
226
auth/iam_vault.go
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
vault "github.com/hashicorp/vault-client-go"
|
||||
"github.com/hashicorp/vault-client-go/schema"
|
||||
)
|
||||
|
||||
type VaultIAMService struct {
|
||||
client *vault.Client
|
||||
reqOpts []vault.RequestOption
|
||||
secretStoragePath string
|
||||
}
|
||||
|
||||
var _ IAMService = &VaultIAMService{}
|
||||
|
||||
func NewVaultIAMService(endpoint, secretStoragePath, mountPath, rootToken, roleID, roleSecret, serverCert, clientCert, clientCertKey string) (IAMService, error) {
|
||||
opts := []vault.ClientOption{
|
||||
vault.WithAddress(endpoint),
|
||||
// set request timeout to 10 secs
|
||||
vault.WithRequestTimeout(10 * time.Second),
|
||||
}
|
||||
if serverCert != "" {
|
||||
tls := vault.TLSConfiguration{}
|
||||
|
||||
tls.ServerCertificate.FromBytes = []byte(serverCert)
|
||||
if clientCert != "" {
|
||||
if clientCertKey == "" {
|
||||
return nil, fmt.Errorf("client certificate and client certificate should both be specified")
|
||||
}
|
||||
|
||||
tls.ClientCertificate.FromBytes = []byte(clientCert)
|
||||
tls.ClientCertificateKey.FromBytes = []byte(clientCertKey)
|
||||
}
|
||||
|
||||
opts = append(opts, vault.WithTLS(tls))
|
||||
}
|
||||
|
||||
client, err := vault.New(opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init vault client: %w", err)
|
||||
}
|
||||
|
||||
reqOpts := []vault.RequestOption{}
|
||||
// if mount path is not specified, it defaults to "approle"
|
||||
if mountPath != "" {
|
||||
reqOpts = append(reqOpts, vault.WithMountPath(mountPath))
|
||||
}
|
||||
|
||||
// Authentication
|
||||
switch {
|
||||
case rootToken != "":
|
||||
err := client.SetToken(rootToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("root token authentication failure: %w", err)
|
||||
}
|
||||
case roleID != "":
|
||||
if roleSecret == "" {
|
||||
return nil, fmt.Errorf("role id and role secret must both be specified")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
resp, err := client.Auth.AppRoleLogin(ctx, schema.AppRoleLoginRequest{
|
||||
RoleId: roleID,
|
||||
SecretId: roleSecret,
|
||||
}, reqOpts...)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("approle authentication failure: %w", err)
|
||||
}
|
||||
|
||||
if err := client.SetToken(resp.Auth.ClientToken); err != nil {
|
||||
return nil, fmt.Errorf("approle authentication set token failure: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("vault authentication requires either roleid/rolesecret or root token")
|
||||
}
|
||||
|
||||
return &VaultIAMService{
|
||||
client: client,
|
||||
reqOpts: reqOpts,
|
||||
secretStoragePath: secretStoragePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) CreateAccount(account Account) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
_, err := vt.client.Secrets.KvV2Write(ctx, vt.secretStoragePath+"/"+account.Access, schema.KvV2WriteRequest{
|
||||
Data: map[string]any{
|
||||
account.Access: account,
|
||||
},
|
||||
Options: map[string]interface{}{
|
||||
"cas": 0,
|
||||
},
|
||||
}, vt.reqOpts...)
|
||||
cancel()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "check-and-set") {
|
||||
return ErrUserExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) GetUserAccount(access string) (Account, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
resp, err := vt.client.Secrets.KvV2Read(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
acc, err := parseVaultUserAccount(resp.Data.Data, access)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) DeleteUserAccount(access string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
_, err := vt.client.Secrets.KvV2DeleteMetadataAndAllVersions(ctx, vt.secretStoragePath+"/"+access, vt.reqOpts...)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vt *VaultIAMService) ListUserAccounts() ([]Account, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
resp, err := vt.client.Secrets.KvV2List(ctx, vt.secretStoragePath, vt.reqOpts...)
|
||||
cancel()
|
||||
if err != nil {
|
||||
if vault.IsErrorStatus(err, 404) {
|
||||
return []Account{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accs := []Account{}
|
||||
|
||||
for _, acss := range resp.Data.Keys {
|
||||
acc, err := vt.GetUserAccount(acss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accs = append(accs, acc)
|
||||
}
|
||||
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
// the client doesn't have explicit shutdown, as it uses http.Client
|
||||
func (vt *VaultIAMService) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errInvalidUser error = errors.New("invalid user account entry in secrets engine")
|
||||
|
||||
func parseVaultUserAccount(data map[string]interface{}, access string) (acc Account, err error) {
|
||||
usrAcc, ok := data[access].(map[string]interface{})
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
acss, ok := usrAcc["access"].(string)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
secret, ok := usrAcc["secret"].(string)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
role, ok := usrAcc["role"].(string)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
userIdJson, ok := usrAcc["userID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
userId, err := userIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
groupIdJson, ok := usrAcc["groupID"].(json.Number)
|
||||
if !ok {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
groupId, err := groupIdJson.Int64()
|
||||
if err != nil {
|
||||
return acc, errInvalidUser
|
||||
}
|
||||
|
||||
return Account{
|
||||
Access: acss,
|
||||
Secret: secret,
|
||||
Role: Role(role),
|
||||
UserID: int(userId),
|
||||
GroupID: int(groupId),
|
||||
}, nil
|
||||
}
|
||||
@@ -87,13 +87,13 @@ func TestStandaloneSign(t *testing.T) {
|
||||
|
||||
actual := req.Header.Get("Authorization")
|
||||
if e, a := c.ExpSig, actual; e != a {
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
}
|
||||
if e, a := c.OrigURI, req.URL.Path; e != a {
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
}
|
||||
if e, a := c.EscapedURI, req.URL.EscapedPath(); e != a {
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -127,13 +127,13 @@ func TestStandaloneSign_RawPath(t *testing.T) {
|
||||
|
||||
actual := req.Header.Get("Authorization")
|
||||
if e, a := c.ExpSig, actual; e != a {
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
}
|
||||
if e, a := c.OrigURI, req.URL.Path; e != a {
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
}
|
||||
if e, a := c.EscapedURI, req.URL.EscapedPath(); e != a {
|
||||
t.Errorf("expected %v, but recieved %v", e, a)
|
||||
t.Errorf("expected %v, but received %v", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var (
|
||||
// Any newly created directory is set to provided uid/gid ownership.
|
||||
// If path is already a directory, MkdirAll does nothing
|
||||
// and returns nil.
|
||||
// Any directoy created will be set to provided uid/gid ownership
|
||||
// Any directory created will be set to provided uid/gid ownership
|
||||
// if doChown is true.
|
||||
func MkdirAll(path string, uid, gid int, doChown bool) error {
|
||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -508,6 +509,10 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
partsize := int64(0)
|
||||
var totalsize int64
|
||||
for i, part := range parts {
|
||||
if part.PartNumber == nil || *part.PartNumber < 1 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
|
||||
fullPartPath := filepath.Join(bucket, partObjPath)
|
||||
fi, err := os.Lstat(fullPartPath)
|
||||
@@ -529,7 +534,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
if etag != *parts[i].ETag {
|
||||
if parts[i].ETag == nil || etag != *parts[i].ETag {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
}
|
||||
@@ -545,6 +550,10 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
defer f.cleanup()
|
||||
|
||||
for _, part := range parts {
|
||||
if part.PartNumber == nil || *part.PartNumber < 1 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
|
||||
fullPartPath := filepath.Join(bucket, partObjPath)
|
||||
pf, err := os.Open(fullPartPath)
|
||||
@@ -992,7 +1001,11 @@ func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3respon
|
||||
|
||||
var parts []s3response.Part
|
||||
for _, e := range ents {
|
||||
pn, _ := strconv.Atoi(e.Name())
|
||||
pn, err := strconv.Atoi(e.Name())
|
||||
if err != nil {
|
||||
// file is not a valid part file
|
||||
continue
|
||||
}
|
||||
if pn <= partNumberMarker {
|
||||
continue
|
||||
}
|
||||
@@ -1569,7 +1582,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
}
|
||||
|
||||
if startOffset+length > objSize+1 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
length = objSize - startOffset + 1
|
||||
}
|
||||
|
||||
var contentRange string
|
||||
@@ -1816,6 +1829,11 @@ func (p *Posix) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttr
|
||||
parts := []types.ObjectPart{}
|
||||
|
||||
for _, p := range resp.Parts {
|
||||
if !(p.PartNumber > 0 && p.PartNumber <= math.MaxInt32) {
|
||||
return s3response.GetObjectAttributesResult{},
|
||||
s3err.GetAPIError(s3err.ErrInvalidPartNumber)
|
||||
}
|
||||
|
||||
partNumber := int32(p.PartNumber)
|
||||
size := p.Size
|
||||
|
||||
@@ -2506,7 +2524,7 @@ func (p *Posix) PutObjectRetention(_ context.Context, bucket, object, versionId
|
||||
}
|
||||
|
||||
switch lockCfg.Mode {
|
||||
// Compliance mode can't be overriden
|
||||
// Compliance mode can't be overridden
|
||||
case types.ObjectLockRetentionModeCompliance:
|
||||
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
// To override governance mode user should have "s3:BypassGovernanceRetention" permission
|
||||
|
||||
@@ -33,6 +33,12 @@ func (s *S3Proxy) getClientWithCtx(ctx context.Context) (*s3.Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.endpoint != "" {
|
||||
return s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = &s.endpoint
|
||||
}), nil
|
||||
}
|
||||
|
||||
return s3.NewFromConfig(cfg), nil
|
||||
}
|
||||
|
||||
@@ -50,11 +56,6 @@ func (s *S3Proxy) getConfig(ctx context.Context, access, secret string) (aws.Con
|
||||
config.WithHTTPClient(client),
|
||||
}
|
||||
|
||||
if s.endpoint != "" {
|
||||
opts = append(opts,
|
||||
config.WithEndpointResolverWithOptions(s))
|
||||
}
|
||||
|
||||
if s.disableChecksum {
|
||||
opts = append(opts,
|
||||
config.WithAPIOptions([]func(*middleware.Stack) error{v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware}))
|
||||
@@ -67,13 +68,3 @@ func (s *S3Proxy) getConfig(ctx context.Context, access, secret string) (aws.Con
|
||||
|
||||
return config.LoadDefaultConfig(ctx, opts...)
|
||||
}
|
||||
|
||||
// ResolveEndpoint is used for on prem or non-aws endpoints
|
||||
func (s *S3Proxy) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: s.endpoint,
|
||||
SigningRegion: s.awsRegion,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -26,12 +26,14 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
@@ -47,6 +49,9 @@ type ScoutFS struct {
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
// bucket/object metadata storage facility
|
||||
meta meta.MetadataStorer
|
||||
|
||||
// glaciermode enables the following behavior:
|
||||
// GET object: if file offline, return invalid object state
|
||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||
@@ -75,8 +80,13 @@ const (
|
||||
metaTmpDir = ".sgwtmp"
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
tagHdr = "X-Amz-Tagging"
|
||||
metaHdr = "X-Amz-Meta"
|
||||
contentTypeHdr = "content-type"
|
||||
contentEncHdr = "content-encoding"
|
||||
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
etagkey = "user.etag"
|
||||
etagkey = "etag"
|
||||
objectRetentionKey = "object-retention"
|
||||
objectLegalHoldKey = "object-legal-hold"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -87,11 +97,12 @@ var (
|
||||
|
||||
const (
|
||||
// ScoutFS special xattr types
|
||||
|
||||
systemPrefix = "scoutfs.hide."
|
||||
onameAttr = systemPrefix + "objname"
|
||||
flagskey = systemPrefix + "sam_flags"
|
||||
stagecopykey = systemPrefix + "sam_stagereq"
|
||||
|
||||
fsBlocksize = 4096
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -179,18 +190,20 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
// check all parts ok
|
||||
last := len(parts) - 1
|
||||
partsize := int64(0)
|
||||
var totalsize int64
|
||||
for i, p := range parts {
|
||||
if p.PartNumber == nil {
|
||||
for i, part := range parts {
|
||||
if part.PartNumber == nil || *part.PartNumber < 1 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber))
|
||||
fi, err := os.Lstat(partPath)
|
||||
|
||||
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
|
||||
fullPartPath := filepath.Join(bucket, partObjPath)
|
||||
fi, err := os.Lstat(fullPartPath)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
@@ -198,23 +211,25 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
|
||||
if i == 0 {
|
||||
partsize = fi.Size()
|
||||
}
|
||||
|
||||
// partsize must be a multiple of the filesystem blocksize
|
||||
// except for last part
|
||||
if i < last && partsize%fsBlocksize != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
totalsize += fi.Size()
|
||||
// all parts except the last need to be the same size
|
||||
if i < last && partsize != fi.Size() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
// non-last part sizes need to be multiples of 4k for move blocks
|
||||
// TODO: fallback to no move blocks if not 4k aligned?
|
||||
if i == 0 && i < last && fi.Size()%4096 != 0 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(partPath, "user.etag")
|
||||
b, err := s.meta.RetrieveAttribute(bucket, partObjPath, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
if etag != *parts[i].ETag {
|
||||
if parts[i].ETag == nil || etag != *parts[i].ETag {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
}
|
||||
@@ -230,10 +245,16 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
|
||||
}
|
||||
defer f.cleanup()
|
||||
|
||||
for _, p := range parts {
|
||||
pf, err := os.Open(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber)))
|
||||
for _, part := range parts {
|
||||
if part.PartNumber == nil || *part.PartNumber < 1 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
|
||||
fullPartPath := filepath.Join(bucket, partObjPath)
|
||||
pf, err := os.Open(fullPartPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open part %v: %v", *p.PartNumber, err)
|
||||
return nil, fmt.Errorf("open part %v: %v", *part.PartNumber, err)
|
||||
}
|
||||
|
||||
// scoutfs move data is a metadata only operation that moves the data
|
||||
@@ -242,13 +263,13 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
|
||||
err = moveData(pf, f.f)
|
||||
pf.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move blocks part %v: %v", *p.PartNumber, err)
|
||||
return nil, fmt.Errorf("move blocks part %v: %v", *part.PartNumber, err)
|
||||
}
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
upiddir := filepath.Join(objdir, uploadID)
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
cType, _ := s.loadUserMetaData(bucket, upiddir, userMetaData)
|
||||
|
||||
objname := filepath.Join(bucket, object)
|
||||
dir := filepath.Dir(objname)
|
||||
@@ -265,7 +286,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
|
||||
}
|
||||
|
||||
for k, v := range userMetaData {
|
||||
err = xattr.Set(objname, "user."+k, []byte(v))
|
||||
err = s.meta.StoreAttribute(bucket, object, fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.Remove(objname)
|
||||
@@ -273,10 +294,58 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
|
||||
}
|
||||
}
|
||||
|
||||
// load and set tagging
|
||||
tagging, err := s.meta.RetrieveAttribute(bucket, upiddir, tagHdr)
|
||||
if err == nil {
|
||||
if err := s.meta.StoreAttribute(bucket, object, tagHdr, tagging); err != nil {
|
||||
// cleanup object
|
||||
os.Remove(objname)
|
||||
return nil, fmt.Errorf("set object tagging: %w", err)
|
||||
}
|
||||
}
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil, fmt.Errorf("get object tagging: %w", err)
|
||||
}
|
||||
|
||||
// set content-type
|
||||
if cType != "" {
|
||||
if err := s.meta.StoreAttribute(bucket, object, contentTypeHdr, []byte(cType)); err != nil {
|
||||
// cleanup object
|
||||
os.Remove(objname)
|
||||
return nil, fmt.Errorf("set object content type: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// load and set legal hold
|
||||
lHold, err := s.meta.RetrieveAttribute(bucket, upiddir, objectLegalHoldKey)
|
||||
if err == nil {
|
||||
if err := s.meta.StoreAttribute(bucket, object, objectLegalHoldKey, lHold); err != nil {
|
||||
// cleanup object
|
||||
os.Remove(objname)
|
||||
return nil, fmt.Errorf("set object legal hold: %w", err)
|
||||
}
|
||||
}
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil, fmt.Errorf("get object legal hold: %w", err)
|
||||
}
|
||||
|
||||
// load and set retention
|
||||
ret, err := s.meta.RetrieveAttribute(bucket, upiddir, objectRetentionKey)
|
||||
if err == nil {
|
||||
if err := s.meta.StoreAttribute(bucket, object, objectRetentionKey, ret); err != nil {
|
||||
// cleanup object
|
||||
os.Remove(objname)
|
||||
return nil, fmt.Errorf("set object retention: %w", err)
|
||||
}
|
||||
}
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil, fmt.Errorf("get object retention: %w", err)
|
||||
}
|
||||
|
||||
// Calculate s3 compatible md5sum for complete multipart.
|
||||
s3MD5 := backend.GetMultipartMD5(parts)
|
||||
|
||||
err = xattr.Set(objname, "user.etag", []byte(s3MD5))
|
||||
err = s.meta.StoreAttribute(bucket, object, etagkey, []byte(s3MD5))
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.Remove(objname)
|
||||
@@ -310,61 +379,104 @@ func (s *ScoutFS) checkUploadIDExists(bucket, object, uploadID string) ([32]byte
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func loadUserMetaData(path string, m map[string]string) (contentType, contentEncoding string) {
|
||||
ents, err := xattr.List(path)
|
||||
// fll out the user metadata map with the metadata for the object
|
||||
// and return the content type and encoding
|
||||
func (s *ScoutFS) loadUserMetaData(bucket, object string, m map[string]string) (string, string) {
|
||||
ents, err := s.meta.ListAttributes(bucket, object)
|
||||
if err != nil || len(ents) == 0 {
|
||||
return
|
||||
return "", ""
|
||||
}
|
||||
for _, e := range ents {
|
||||
if !isValidMeta(e) {
|
||||
continue
|
||||
}
|
||||
b, err := xattr.Get(path, e)
|
||||
if err == errNoData {
|
||||
m[strings.TrimPrefix(e, "user.")] = ""
|
||||
continue
|
||||
}
|
||||
b, err := s.meta.RetrieveAttribute(bucket, object, e)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m[strings.TrimPrefix(e, "user.")] = string(b)
|
||||
if b == nil {
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("%v.", metaHdr))] = ""
|
||||
continue
|
||||
}
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("%v.", metaHdr))] = string(b)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(path, "user.content-type")
|
||||
var contentType, contentEncoding string
|
||||
b, _ := s.meta.RetrieveAttribute(bucket, object, contentTypeHdr)
|
||||
contentType = string(b)
|
||||
if err != nil {
|
||||
contentType = ""
|
||||
}
|
||||
if contentType != "" {
|
||||
m["content-type"] = contentType
|
||||
m[contentTypeHdr] = contentType
|
||||
}
|
||||
|
||||
b, err = xattr.Get(path, "user.content-encoding")
|
||||
b, _ = s.meta.RetrieveAttribute(bucket, object, contentEncHdr)
|
||||
contentEncoding = string(b)
|
||||
if err != nil {
|
||||
contentEncoding = ""
|
||||
}
|
||||
if contentEncoding != "" {
|
||||
m["content-encoding"] = contentEncoding
|
||||
m[contentEncHdr] = contentEncoding
|
||||
}
|
||||
|
||||
return
|
||||
return contentType, contentEncoding
|
||||
}
|
||||
|
||||
func isValidMeta(val string) bool {
|
||||
if strings.HasPrefix(val, "user.X-Amz-Meta") {
|
||||
if strings.HasPrefix(val, metaHdr) {
|
||||
return true
|
||||
}
|
||||
if strings.EqualFold(val, "user.Expires") {
|
||||
if strings.EqualFold(val, "Expires") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
if input.Bucket == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
|
||||
}
|
||||
if input.Key == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
if input.PartNumber != nil {
|
||||
uploadId, sum, err := s.retrieveUploadId(bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ents, err := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read parts: %w", err)
|
||||
}
|
||||
|
||||
partPath := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId, fmt.Sprintf("%v", *input.PartNumber))
|
||||
|
||||
part, err := os.Stat(filepath.Join(bucket, partPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat part: %w", err)
|
||||
}
|
||||
|
||||
b, err := s.meta.RetrieveAttribute(bucket, partPath, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
partsCount := int32(len(ents))
|
||||
size := part.Size()
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
LastModified: backend.GetTimePtr(part.ModTime()),
|
||||
ETag: &etag,
|
||||
PartsCount: &partsCount,
|
||||
ContentLength: &size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -383,9 +495,14 @@ func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
contentType, contentEncoding := s.loadUserMetaData(bucket, object, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
if fi.IsDir() {
|
||||
// this is the media type for directories in AWS and Nextcloud
|
||||
contentType = "application/x-directory"
|
||||
}
|
||||
|
||||
b, err := s.meta.RetrieveAttribute(bucket, object, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
@@ -424,18 +541,54 @@ func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
|
||||
contentLength := fi.Size()
|
||||
|
||||
var objectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
|
||||
status, err := s.Posix.GetObjectLegalHold(ctx, bucket, object, "")
|
||||
if err == nil {
|
||||
if *status {
|
||||
objectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOn
|
||||
} else {
|
||||
objectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOff
|
||||
}
|
||||
}
|
||||
|
||||
var objectLockMode types.ObjectLockMode
|
||||
var objectLockRetainUntilDate *time.Time
|
||||
retention, err := s.Posix.GetObjectRetention(ctx, bucket, object, "")
|
||||
if err == nil {
|
||||
var config types.ObjectLockRetention
|
||||
if err := json.Unmarshal(retention, &config); err == nil {
|
||||
objectLockMode = types.ObjectLockMode(config.Mode)
|
||||
objectLockRetainUntilDate = config.RetainUntilDate
|
||||
}
|
||||
}
|
||||
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: &contentLength,
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
StorageClass: stclass,
|
||||
Restore: &requestOngoing,
|
||||
ContentLength: &contentLength,
|
||||
ContentType: &contentType,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
StorageClass: stclass,
|
||||
Restore: &requestOngoing,
|
||||
ObjectLockLegalHoldStatus: objectLockLegalHoldStatus,
|
||||
ObjectLockMode: objectLockMode,
|
||||
ObjectLockRetainUntilDate: objectLockRetainUntilDate,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) retrieveUploadId(bucket, object string) (string, [32]byte, error) {
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
entries, err := os.ReadDir(objdir)
|
||||
if err != nil || len(entries) == 0 {
|
||||
return "", [32]byte{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
return entries[0].Name(), sum, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
@@ -515,9 +668,9 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
contentType, contentEncoding := s.loadUserMetaData(bucket, object, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
b, err := s.meta.RetrieveAttribute(bucket, object, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
@@ -671,14 +824,11 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
if d.IsDir() {
|
||||
// directory object only happens if directory empty
|
||||
// check to see if this is a directory object by checking etag
|
||||
etagBytes, err := xattr.Get(objPath, etagkey)
|
||||
if isNoAttr(err) || errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
b, err := s.meta.RetrieveAttribute(bucket, path, etagkey)
|
||||
if err != nil {
|
||||
return types.Object{}, fmt.Errorf("get etag: %w", err)
|
||||
}
|
||||
etag := string(etagBytes)
|
||||
etag := string(b)
|
||||
|
||||
fi, err := d.Info()
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -698,14 +848,14 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
}
|
||||
|
||||
// file object, get object info and fill out object data
|
||||
etagBytes, err := xattr.Get(objPath, etagkey)
|
||||
b, err := s.meta.RetrieveAttribute(bucket, path, etagkey)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil && !isNoAttr(err) {
|
||||
if err != nil {
|
||||
return types.Object{}, fmt.Errorf("get etag: %w", err)
|
||||
}
|
||||
etag := string(etagBytes)
|
||||
etag := string(b)
|
||||
|
||||
fi, err := d.Info()
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
@@ -35,7 +34,9 @@ import (
|
||||
)
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
p, err := posix.New(rootdir, meta.XattrMeta{}, posix.PosixOpts{
|
||||
metastore := meta.XattrMeta{}
|
||||
|
||||
p, err := posix.New(rootdir, metastore, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
})
|
||||
@@ -52,6 +53,7 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
Posix: p,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
meta: metastore,
|
||||
chownuid: opts.ChownUID,
|
||||
chowngid: opts.ChownGID,
|
||||
}, nil
|
||||
@@ -100,11 +102,6 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
|
||||
gid: gid,
|
||||
}
|
||||
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 {
|
||||
tmp.falloc()
|
||||
}
|
||||
|
||||
if doChown {
|
||||
err := f.Chown(uid, gid)
|
||||
if err != nil {
|
||||
@@ -115,14 +112,6 @@ func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Acc
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) falloc() error {
|
||||
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fallocate: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
// We use Linkat/Rename as the atomic operation for object puts. The
|
||||
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
|
||||
|
||||
@@ -35,37 +35,42 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL string
|
||||
accessLog string
|
||||
healthPath string
|
||||
debug bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify, s3IamDebug bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
eventWebhookURL string
|
||||
eventConfigFilePath string
|
||||
logWebhookURL string
|
||||
accessLog string
|
||||
healthPath string
|
||||
debug bool
|
||||
pprof string
|
||||
quiet bool
|
||||
readonly bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
vaultEndpointURL, vaultSecretStoragePath string
|
||||
vaultMountPath, vaultRootToken string
|
||||
vaultRoleId, vaultRoleSecret string
|
||||
vaultServerCert, vaultClientCert string
|
||||
vaultClientCertKey string
|
||||
s3IamAccess, s3IamSecret string
|
||||
s3IamRegion, s3IamBucket string
|
||||
s3IamEndpoint string
|
||||
s3IamSslNoVerify, s3IamDebug bool
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -326,6 +331,60 @@ func initFlags() []cli.Flag {
|
||||
EnvVars: []string{"VGW_IAM_LDAP_ROLE_ATR"},
|
||||
Destination: &ldapRoleAtr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-endpoint-url",
|
||||
Usage: "vault server url",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_ENDPOINT_URL"},
|
||||
Destination: &vaultEndpointURL,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-secret-storage-path",
|
||||
Usage: "vault server secret storage path",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SECRET_STORAGE_PATH"},
|
||||
Destination: &vaultSecretStoragePath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-mount-path",
|
||||
Usage: "vault server mount path",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_MOUNT_PATH"},
|
||||
Destination: &vaultMountPath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-root-token",
|
||||
Usage: "vault server root token",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_ROOT_TOKEN"},
|
||||
Destination: &vaultRootToken,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-role-id",
|
||||
Usage: "vault server user role id",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_ROLE_ID"},
|
||||
Destination: &vaultRoleId,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-role-secret",
|
||||
Usage: "vault server user role secret",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_ROLE_SECRET"},
|
||||
Destination: &vaultRoleSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-server_cert",
|
||||
Usage: "vault server TLS certificate",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_SERVER_CERT"},
|
||||
Destination: &vaultServerCert,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-client_cert",
|
||||
Usage: "vault client TLS certificate",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_CLIENT_CERT"},
|
||||
Destination: &vaultClientCert,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-vault-client_cert_key",
|
||||
Usage: "vault client TLS certificate key",
|
||||
EnvVars: []string{"VGW_IAM_VAULT_CLIENT_CERT_KEY"},
|
||||
Destination: &vaultClientCertKey,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-iam-access",
|
||||
Usage: "s3 IAM access key",
|
||||
@@ -501,25 +560,34 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
}
|
||||
|
||||
iam, err := auth.New(&auth.Opts{
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
S3Debug: s3IamDebug,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
VaultEndpointURL: vaultEndpointURL,
|
||||
VaultSecretStoragePath: vaultSecretStoragePath,
|
||||
VaultMountPath: vaultMountPath,
|
||||
VaultRootToken: vaultRootToken,
|
||||
VaultRoleId: vaultRoleId,
|
||||
VaultRoleSecret: vaultRoleSecret,
|
||||
VaultServerCert: vaultServerCert,
|
||||
VaultClientCert: vaultClientCert,
|
||||
VaultClientCertKey: vaultClientCertKey,
|
||||
S3Access: s3IamAccess,
|
||||
S3Secret: s3IamSecret,
|
||||
S3Region: s3IamRegion,
|
||||
S3Bucket: s3IamBucket,
|
||||
S3Endpoint: s3IamEndpoint,
|
||||
S3DisableSSlVerfiy: s3IamSslNoVerify,
|
||||
S3Debug: s3IamDebug,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
|
||||
@@ -71,7 +71,7 @@ func generateEventFiltersConfig(ctx *cli.Context) error {
|
||||
s3event.EventObjectRestoreCompleted: true,
|
||||
}
|
||||
|
||||
configBytes, err := json.Marshal(config)
|
||||
configBytes, err := json.MarshalIndent(config, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse event config: %w", err)
|
||||
}
|
||||
|
||||
@@ -153,6 +153,14 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# specified, all configured bucket events will be sent to the webhook.
|
||||
#VGW_EVENT_WEBHOOK_URL=
|
||||
|
||||
# Bucket events can be filtered for any of the above event types. The
|
||||
# VGW_EVENT_FILTER option specifies a config file that contains the event
|
||||
# filter rules. The event filter rules are used to determine which events are
|
||||
# sent to the configured event services. Run:
|
||||
# versitygw utils gen-event-filter-config --path .
|
||||
# to generate a default rules file "event_config.json" in the current directory.
|
||||
#VGW_EVENT_FILTER=
|
||||
|
||||
#######################
|
||||
# Debug / Diagnostics #
|
||||
#######################
|
||||
@@ -182,20 +190,25 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
# as a dedicated IAM service.
|
||||
#VGW_IAM_DIR=
|
||||
|
||||
# The ldap options will enable the LDAP IAM service with accounts stored in an
|
||||
# external LDAP service. The VGW_IAM_LDAP_ACCESS_ATR, VGW_IAM_LDAP_SECRET_ATR,
|
||||
# and VGW_IAM_LDAP_ROLE_ATR define the LDAP attributes that map to access,
|
||||
# secret credentials and role respectively. The other options are used to
|
||||
# connect to the LDAP service.
|
||||
#VGW_IAM_LDAP_URL=
|
||||
#VGW_IAM_LDAP_BASE_DN=
|
||||
#VGW_IAM_LDAP_BIND_DN=
|
||||
#VGW_IAM_LDAP_BIND_PASS=
|
||||
#VGW_IAM_LDAP_QUERY_BASE=
|
||||
#VGW_IAM_LDAP_OBJECT_CLASSES=
|
||||
#VGW_IAM_LDAP_ACCESS_ATR=
|
||||
#VGW_IAM_LDAP_SECRET_ATR=
|
||||
#VGW_IAM_LDAP_ROLE_ATR=
|
||||
# The Vault options will enable the Vault IAM service with accounts stored in
|
||||
# the HashiCorp Vault service. The Vault URL is the address and port of the
|
||||
# Vault server with the format <IP/host>:<port>. A root taken can be used for
|
||||
# testing, but it is recommended to use the role based authentication in
|
||||
# production. The Vault server certificate, client certificate, and client
|
||||
# certificate key are optional, and will default to not verifying the server
|
||||
# certificate and not using client certificates. The Vault server certificate
|
||||
# is used to verify the Vault server, and the client certificate and key are
|
||||
# used to authenticate the gateway to the Vault server. See wiki documentation
|
||||
# for an example of using Vault in dev mode with the gateway.
|
||||
#VGW_IAM_VAULT_ENDPOINT_URL=
|
||||
#VGW_IAM_VAULT_SECRET_STORAGE_PATH=
|
||||
#VGW_IAM_VAULT_MOUNT_PATH=
|
||||
#VGW_IAM_VAULT_ROOT_TOKEN=
|
||||
#VGW_IAM_VAULT_ROLE_ID=
|
||||
#VGW_IAM_VAULT_ROLE_SECRET=
|
||||
#VGW_IAM_VAULT_SERVER_CERT=
|
||||
#VGW_IAM_VAULT_CLIENT_CERT=
|
||||
#VGW_IAM_VAULT_CLIENT_CERT_KEY=
|
||||
|
||||
# The VGW_S3 IAM service is similar to the internal IAM service, but instead
|
||||
# stores the account information JSON encoded in an S3 object. This should use
|
||||
@@ -210,6 +223,21 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_S3_IAM_BUCKET=
|
||||
#VGW_S3_IAM_NO_VERIFY=
|
||||
|
||||
# The LDAP options will enable the LDAP IAM service with accounts stored in an
|
||||
# external LDAP service. The VGW_IAM_LDAP_ACCESS_ATR, VGW_IAM_LDAP_SECRET_ATR,
|
||||
# and VGW_IAM_LDAP_ROLE_ATR define the LDAP attributes that map to access,
|
||||
# secret credentials and role respectively. The other options are used to
|
||||
# connect to the LDAP service.
|
||||
#VGW_IAM_LDAP_URL=
|
||||
#VGW_IAM_LDAP_BASE_DN=
|
||||
#VGW_IAM_LDAP_BIND_DN=
|
||||
#VGW_IAM_LDAP_BIND_PASS=
|
||||
#VGW_IAM_LDAP_QUERY_BASE=
|
||||
#VGW_IAM_LDAP_OBJECT_CLASSES=
|
||||
#VGW_IAM_LDAP_ACCESS_ATR=
|
||||
#VGW_IAM_LDAP_SECRET_ATR=
|
||||
#VGW_IAM_LDAP_ROLE_ATR=
|
||||
|
||||
###############
|
||||
# IAM caching #
|
||||
###############
|
||||
@@ -228,6 +256,29 @@ ROOT_SECRET_ACCESS_KEY=
|
||||
#VGW_IAM_CACHE_TTL=120
|
||||
#VGW_IAM_CACHE_PRUNE=3600
|
||||
|
||||
###########
|
||||
# Metrics #
|
||||
###########
|
||||
|
||||
# The metrics service name is a tag that is added to all metrics to help
|
||||
# identify the source of the metrics. This is especially useful when multiple
|
||||
# gateways are running. The default is the hostname of the system.
|
||||
#VGW_METRICS_SERVICE_NAME=$HOSTNAME
|
||||
|
||||
# The metrics service will send metrics to the configured statsd servers. The
|
||||
# servers are specified as a comma separated list of host:port pairs. The
|
||||
# default is to not send metrics to any statsd servers. The gateway uses
|
||||
# InfluxDB flavor of statsd metrics tags for the StatsD metrics type.
|
||||
#VGW_METRICS_STATSD_SERVERS=
|
||||
|
||||
# The metrics service will send metrics to the configured dogstatsd servers.
|
||||
# The servers are specified as a comma separated list of host:port pairs. The
|
||||
# default is to not send metrics to any dogstatsd servers. Generally
|
||||
# DataDog recommends installing a local agent to collect metrics and forward
|
||||
# them to the DataDog service. In this case the option value would be the
|
||||
# local agent address: 127.0.0.1:8125.
|
||||
#VGW_METRICS_DOGSTATS_SERVERS=
|
||||
|
||||
######################################
|
||||
# VersityGW Backend Specific Options #
|
||||
######################################
|
||||
|
||||
52
go.mod
52
go.mod
@@ -3,16 +3,18 @@ module github.com/versity/versitygw
|
||||
go 1.21.0
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.2
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1
|
||||
github.com/aws/smithy-go v1.20.2
|
||||
github.com/go-ldap/ldap/v3 v3.4.8
|
||||
github.com/gofiber/fiber/v2 v2.52.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/nats-io/nats.go v1.35.0
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
@@ -20,46 +22,52 @@ require (
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
github.com/valyala/fasthttp v1.54.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/sys v0.21.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.16
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.16
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.18
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.18
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
|
||||
105
go.sum
105
go.sum
@@ -1,9 +1,9 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM=
|
||||
@@ -21,42 +21,42 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8=
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.16 h1:knpCuH7laFVGYTNd99Ns5t+8PuRjDn4HnnZK48csipM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.16/go.mod h1:vutqgRhDUktwSge3hrC3nkuirzkJ4E/mLj5GvI0BQas=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.16 h1:7d2QxY83uYl0l58ceyiSpxg9bSbStqBC6BeEeHEchwo=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.16/go.mod h1:Ae6li/6Yc6eMzysRL2BXlPYvnrLLBg3D11/AmOjw50k=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 h1:dQLK4TjtnlRGb0czOht2CevZ5l6RSyRWAnKeGd7VAFE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3/go.mod h1:TL79f2P6+8Q7dTsILpiVST+AL9lkF6PPGI167Ny0Cjw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21 h1:1v8Ii0MRVGYB/sdhkbxrtolCA7Tp+lGh+5OJTs5vmZ8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21/go.mod h1:cxdd1rc8yxCjKz28hi30XN1jDXr2DxZvD44vLxTz/bg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 h1:lf/8VTF2cM+N4SLzaYJERKEWAXq8MOMpZfU6wEPWsPk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7/go.mod h1:4SjkU7QiqK2M9oozyMzfZ/23LmUY+h3oFqhdeP5OMiI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 h1:4OYVp0705xu8yjdyoWix0r9wPIRXnIzzOoUpQVHIJ/g=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7/go.mod h1:vd7ESTEvI76T2Na050gODNmNU7+OyKrIKroYTu4ABiI=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.18 h1:D/ALDWqK4JdY3OFgA2thcPO1c9aYTT5STS/CvnkqY1c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.18/go.mod h1:JuitCWq+F5QGUrmMPsk945rop6bB57jdscu+Glozdnc=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 h1:dDgptDO9dxeFkXy+tEgVkzSClHZje/6JkPW5aZyEvrQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24 h1:FzNwpVTZDCvm597Ty6mGYvxTolyC1oup0waaKntZI4E=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24/go.mod h1:wM9NElT/Wn6n3CT1eyVcXtfCy8lSVjjQXfdawQbSShc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 h1:cy8ahBJuhtM8GTTSyOkfy6WVPV1IE+SS5/wfXUYuulw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9/go.mod h1:CZBXGLaJnEZI6EVNcPd7a6B5IC5cA/GkRWtu9fp3S6Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 h1:A4SYk07ef04+vxZToz9LWvAXl9LW0NClpPpMsi31cz0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9/go.mod h1:5jJcHuwDagxN+ErjQ3PU3ocf6Ylc/p9x+BLO/+X4iXw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 h1:/FUtT3xsoHO3cfh+I/kCbcMCN98QZRsiFet/V8QkWSs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7/go.mod h1:MaCAgWpGooQoCWZnMur97rGn5dp350w2+CeiV5406wE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 h1:vHyZxoLVOgrI8GqX7OMHLXp4YYoxeEsrjweXKpye+ds=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9/go.mod h1:z9VXZsWA2BvZNH1dT0ToUYwMu/CR9Skkj/TBX+mceZw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 h1:UXqEWQI0n+q0QixzU0yUUQBZXRd5037qdInTIHFTl98=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9/go.mod h1:xP6Gq6fzGZT8w/ZN+XvGMZ2RU1LeEs7b2yUP5DN8NY4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 h1:uO5XR6QGBcmPyo2gxofYJLFkcVQ4izOoGDNenlZhTEk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7/go.mod h1:feeeAYfAcwTReM6vbwjEyDmiGho+YgBhaFULuXDW8kc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3 h1:57NtjG+WLims0TxIQbjTqebZUKDM03DfM11ANAekW0s=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3/go.mod h1:739CllldowZiPPsDFcJHNF4FXrVxaSGVnZ9Ez9Iz9hc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 h1:aD7AGQhvPuAxlSUfo0CWU7s6FpkbyykMhGYMvlqTjVs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.9/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 h1:Pav5q3cA260Zqez42T9UhIlsd9QeypszRPwC9LdSSsQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3/go.mod h1:9lmoVDVLz/yUZwLaQ676TK02fhCu4+PgRSmMaKR1ozk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 h1:69tpbPED7jKPyzMcrwSvhWcJ9bPnZsZs18NT40JwM0g=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.10/go.mod h1:0Aqn1MnEuitqfsCNyKsdKLhDUOr4txD/g19EfiUqgws=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11 h1:4vt9Sspk59EZyHCAEMaktHKiq0C09noRTQorXD/qV+s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11/go.mod h1:5jHR79Tv+Ccq6rwYh+W7Nptmw++WiFafMfR42XhwNl8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 h1:o4T+fKxA3gTMcluBNZZXE9DNaMkJuUL1O3mffCUjoJo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11/go.mod h1:84oZdJ+VjuJKs9v1UTC9NaodRZRseOXCTgku+vQJWR8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9 h1:TE2i0A9ErH1YfRSvXfCr2SQwfnqsoJT9nPQ9kj0lkxM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9/go.mod h1:9TzXX3MehQNGPwCZ3ka4CpwQsoAMWSF48/b+De9rfVM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1 h1:UAxBuh0/8sFJk1qOkvOKewP5sWeWaTPDknbQz0ZkDm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1/go.mod h1:hWjsYGjVuqCgfoveVcVFPXIWgz0aByzwaxKlN1StKcM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 h1:gEYM2GSpr4YNWc6hCd5nod4+d4kd9vWIAWrmGuLdlMw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 h1:iXjh3uaH3vsVcnyZX7MqCoCfcyxIrVE9iOQruRaWPrQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5/go.mod h1:5ZXesEuy/QcO0WUnt+4sDkxhdXRHTu2yG0uCSH8B6os=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 h1:M/1u4HBpwLuMtjlxuI2y6HoVLzF5e2mfxHCg7ZVMYmk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.12/go.mod h1:kcfd+eTdEi/40FIbLq4Hif3XMXnl5b/+t/KTfLt9xIk=
|
||||
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
|
||||
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
@@ -64,6 +64,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
@@ -80,9 +82,21 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
|
||||
github.com/hashicorp/vault-client-go v0.4.3/go.mod h1:4tDw7Uhq5XOxS1fO+oMtotHL7j4sB9cp0T7U6m4FzDY=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
|
||||
@@ -111,6 +125,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.35.0 h1:XFNqNM7v5B+MQMKqVGAyHwYhyKb48jrenXNxIU20ULk=
|
||||
github.com/nats-io/nats.go v1.35.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
@@ -132,6 +148,8 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
|
||||
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
@@ -139,6 +157,7 @@ github.com/smira/go-statsd v1.3.3 h1:WnMlmGTyMpzto+HvOJWRPoLaLlk5EGfzsnlQBcvj4yI
|
||||
github.com/smira/go-statsd v1.3.3/go.mod h1:RjdsESPgDODtg1VpVVf9MJrEW2Hw0wtRNbmB1CAhu6A=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
@@ -174,8 +193,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
@@ -191,8 +210,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -216,8 +235,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -233,8 +252,10 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
|
||||
@@ -1379,6 +1379,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
contentLengthStr = "0"
|
||||
}
|
||||
bucketOwner := ctx.Get("X-Amz-Expected-Bucket-Owner")
|
||||
storageClass := ctx.Get("X-Amz-Storage-Class")
|
||||
|
||||
grants := grantFullControl + grantRead + grantReadACP + granWrite + grantWriteACP
|
||||
|
||||
@@ -1882,6 +1883,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
CopySourceIfUnmodifiedSince: umtime,
|
||||
ExpectedBucketOwner: &acct.Access,
|
||||
Metadata: metadata,
|
||||
StorageClass: types.StorageClass(storageClass),
|
||||
})
|
||||
if err == nil {
|
||||
return SendXMLResponse(ctx, res.CopyObjectResult, err,
|
||||
|
||||
@@ -14,4 +14,7 @@ SECRETS_FILE=./tests/.secrets
|
||||
MC_ALIAS=versity
|
||||
LOG_LEVEL=2
|
||||
GOCOVERDIR=$PWD/cover
|
||||
USERS_FOLDER=$PWD/iam
|
||||
USERS_FOLDER=$PWD/iam
|
||||
#TEST_LOG_FILE=test.log
|
||||
#VERSITY_LOG_FILE=versity.log
|
||||
IAM_TYPE=folder
|
||||
@@ -12,6 +12,7 @@ copy_object() {
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api copy-object --copy-source "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate cp s3://$2 s3://$3/$4"
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate cp "s3://$2" s3://"$3/$4" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure cp "$MC_ALIAS/$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
|
||||
|
||||
@@ -5,26 +5,28 @@
|
||||
# return 0 for success, 1 for failure
|
||||
create_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "create bucket missing command type, bucket name"
|
||||
log 2 "create bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
log 6 "create bucket"
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api create-bucket --bucket "$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate mb s3://$2"
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error creating bucket: $error"
|
||||
log 2 "error creating bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
@@ -56,7 +56,7 @@ create_multipart_upload_custom() {
|
||||
local multipart_data
|
||||
log 5 "additional create multipart params"
|
||||
for i in "$@"; do
|
||||
log 5 $i
|
||||
log 5 "$i"
|
||||
done
|
||||
log 5 "${*:3}"
|
||||
log 5 "aws --no-verify-ssl s3api create-multipart-upload --bucket $1 --key $2 ${*:3}"
|
||||
|
||||
@@ -6,11 +6,11 @@ delete_bucket_policy() {
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2") || delete_result=$?
|
||||
error=$(aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2" 2>&1) || delete_result=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate delpolicy "s3://$2") || delete_result=$?
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate delpolicy "s3://$2" 2>&1) || delete_result=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure anonymous set none "$MC_ALIAS/$2") || delete_result=$?
|
||||
error=$(mc --insecure anonymous set none "$MC_ALIAS/$2" 2>&1) || delete_result=$?
|
||||
else
|
||||
log 2 "command 'get bucket policy' not implemented for '$1'"
|
||||
return 1
|
||||
|
||||
@@ -6,22 +6,22 @@ delete_object() {
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure rm "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(mc --insecure rm "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
log 5 "delete object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "error deleting object: $error"
|
||||
log 2 "error deleting object: $delete_object_error"
|
||||
export delete_object_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
@@ -34,18 +34,18 @@ delete_object_with_user() {
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(AWS_ACCESS_KEY_ID="$4" AWS_SECRET_ACCESS_KEY="$5" aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
delete_object_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm --access_key="$4" --secret_key="$5" "s3://$2/$3" 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "command 'delete object with user' not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "error deleting object: $error"
|
||||
export error
|
||||
log 2 "error deleting object: $delete_object_error"
|
||||
export delete_object_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
@@ -30,9 +30,8 @@ get_bucket_policy_aws() {
|
||||
return 1
|
||||
fi
|
||||
policy_json=$(aws --no-verify-ssl s3api get-bucket-policy --bucket "$1" 2>&1) || get_result=$?
|
||||
if [[ $policy_json == *"InsecureRequestWarning"* ]]; then
|
||||
policy_json=$(awk 'NR>2' <<< "$policy_json")
|
||||
fi
|
||||
policy_json=$(echo "$policy_json" | grep -v "InsecureRequestWarning")
|
||||
log 5 "$policy_json"
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
if [[ "$policy_json" == *"(NoSuchBucketPolicy)"* ]]; then
|
||||
bucket_policy=
|
||||
@@ -41,7 +40,7 @@ get_bucket_policy_aws() {
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
bucket_policy=$(echo "{$policy_json}" | jq -r '.Policy')
|
||||
bucket_policy=$(echo "$policy_json" | jq -r '.Policy')
|
||||
fi
|
||||
export bucket_policy
|
||||
return 0
|
||||
|
||||
@@ -7,7 +7,6 @@ list_buckets() {
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == 's3' ]]; then
|
||||
buckets=$(aws --no-verify-ssl s3 ls 2>&1 s3://) || exit_code=$?
|
||||
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
|
||||
@@ -34,15 +34,15 @@ put_object_with_user() {
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
|
||||
error=$(AWS_ACCESS_KEY_ID="$5" AWS_SECRET_ACCESS_KEY="$6" aws --no-verify-ssl s3api put-object --body "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
|
||||
put_object_error=$(AWS_ACCESS_KEY_ID="$5" AWS_SECRET_ACCESS_KEY="$6" aws --no-verify-ssl s3api put-object --body "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
|
||||
else
|
||||
log 2 "'put object with user' command not implemented for '$1'"
|
||||
return 1
|
||||
fi
|
||||
log 5 "put object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "error putting object into bucket: $error"
|
||||
export error
|
||||
log 2 "error putting object into bucket: $put_object_error"
|
||||
export put_object_error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
34
tests/commands/upload_part_copy.sh
Normal file
34
tests/commands/upload_part_copy.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
upload_part_copy() {
|
||||
if [ $# -ne 5 ]; then
|
||||
echo "upload multipart part copy function must have bucket, key, upload ID, file name, part number"
|
||||
return 1
|
||||
fi
|
||||
local etag_json
|
||||
echo "$1 $2 $3 $4 $5"
|
||||
etag_json=$(aws --no-verify-ssl s3api upload-part-copy --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --copy-source "$1/$4-$(($5-1))") || local uploaded=$?
|
||||
if [[ $uploaded -ne 0 ]]; then
|
||||
echo "Error uploading part $5: $etag_json"
|
||||
return 1
|
||||
fi
|
||||
etag=$(echo "$etag_json" | jq '.CopyPartResult.ETag')
|
||||
export etag
|
||||
}
|
||||
|
||||
upload_part_copy_with_range() {
|
||||
if [ $# -ne 6 ]; then
|
||||
log 2 "upload multipart part copy function must have bucket, key, upload ID, file name, part number, range"
|
||||
return 1
|
||||
fi
|
||||
local etag_json
|
||||
log 5 "bucket: $1, key: $2, upload ID: $3, file name: $4, range: $5, copy source range: $6"
|
||||
etag_json=$(aws --no-verify-ssl s3api upload-part-copy --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --copy-source "$1/$4-$(($5-1))" --copy-source-range "$6" 2>&1) || local uploaded=$?
|
||||
if [[ $uploaded -ne 0 ]]; then
|
||||
log 2 "Error uploading part $5: $etag_json"
|
||||
export upload_part_copy_error=$etag_json
|
||||
return 1
|
||||
fi
|
||||
etag=$(echo "$etag_json" | grep -v "InsecureRequestWarning" | jq '.CopyPartResult.ETag')
|
||||
export etag
|
||||
}
|
||||
155
tests/env.sh
Normal file
155
tests/env.sh
Normal file
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
check_env_vars() {
|
||||
if ! check_universal_vars; then
|
||||
log 2 "error checking universal params"
|
||||
return 1
|
||||
fi
|
||||
if [[ $RUN_VERSITYGW == "true" ]]; then
|
||||
if ! check_versity_vars; then
|
||||
log 2 "error checking versity params"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
if [[ $RUN_S3CMD == "true" ]]; then
|
||||
if [[ -z "$S3CMD_CONFIG" ]]; then
|
||||
log 2 "running s3cmd commands requires S3CMD_CONFIG param"
|
||||
return 1
|
||||
fi
|
||||
export S3CMD_CONFIG
|
||||
fi
|
||||
if [[ $RUN_MC == "true" ]]; then
|
||||
if [ -z "$MC_ALIAS" ]; then
|
||||
log 2 "running mc tests requires MC_ALIAS param"
|
||||
return 1
|
||||
fi
|
||||
export MC_ALIAS
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_universal_vars() {
|
||||
if [ -z "$VERSITYGW_TEST_ENV" ]; then
|
||||
if [ -r tests/.env ]; then
|
||||
source tests/.env
|
||||
else
|
||||
log 3 "Warning: no .env file found in tests folder"
|
||||
fi
|
||||
elif [[ $BYPASS_ENV_FILE != "true" ]]; then
|
||||
# shellcheck source=./tests/.env.default
|
||||
source "$VERSITYGW_TEST_ENV"
|
||||
fi
|
||||
if [ "$GITHUB_ACTIONS" != "true" ] && [ -r "$SECRETS_FILE" ]; then
|
||||
# shellcheck source=./tests/.secrets
|
||||
source "$SECRETS_FILE"
|
||||
else
|
||||
log 3 "Warning: no secrets file found"
|
||||
fi
|
||||
if [[ -n "$LOG_LEVEL" ]]; then
|
||||
export LOG_LEVEL_INT=$LOG_LEVEL
|
||||
fi
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||
log 2 "No AWS access key set"
|
||||
return 1
|
||||
elif [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||
log 2 "No AWS secret access key set"
|
||||
return 1
|
||||
elif [ -z "$AWS_PROFILE" ]; then
|
||||
log 2 "No AWS profile set"
|
||||
return 1
|
||||
elif [ -z "$AWS_ENDPOINT_URL" ]; then
|
||||
log 2 "No AWS endpoint URL set"
|
||||
return 1
|
||||
elif [[ $RUN_VERSITYGW != "true" ]] && [[ $RUN_VERSITYGW != "false" ]]; then
|
||||
log 2 "RUN_VERSITYGW must be 'true' or 'false'"
|
||||
return 1
|
||||
elif [ -z "$BUCKET_ONE_NAME" ]; then
|
||||
log 2 "No bucket one name set"
|
||||
return 1
|
||||
elif [ -z "$BUCKET_TWO_NAME" ]; then
|
||||
log 2 "No bucket two name set"
|
||||
return 1
|
||||
elif [ -z "$RECREATE_BUCKETS" ]; then
|
||||
log 2 "No recreate buckets parameter set"
|
||||
return 1
|
||||
elif [[ $RECREATE_BUCKETS != "true" ]] && [[ $RECREATE_BUCKETS != "false" ]]; then
|
||||
log 2 "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
return 1
|
||||
fi
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_PROFILE AWS_ENDPOINT_URL RUN_VERSITYGW \
|
||||
BUCKET_ONE_NAME BUCKET_TWO_NAME RECREATE_BUCKETS
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
export TEST_LOG_FILE
|
||||
fi
|
||||
if [[ -n "$VERSITY_LOG_FILE" ]]; then
|
||||
export VERSITY_LOG_FILE
|
||||
fi
|
||||
}
|
||||
|
||||
check_versity_vars() {
|
||||
if [ -z "$LOCAL_FOLDER" ]; then
|
||||
log 2 "No local storage folder set"
|
||||
return 1
|
||||
elif [ -z "$VERSITY_EXE" ]; then
|
||||
log 2 "No versity executable location set"
|
||||
return 1
|
||||
elif [ -z "$BACKEND" ]; then
|
||||
log 2 "No backend parameter set (options: 'posix', 's3')"
|
||||
return 1
|
||||
fi
|
||||
export LOCAL_FOLDER VERSITY_EXE BACKEND
|
||||
if [ "$BACKEND" == 's3' ]; then
|
||||
if [ -z "$AWS_ACCESS_KEY_ID_TWO" ]; then
|
||||
log 2 "missing second AWS access key ID for s3 backend"
|
||||
return 1
|
||||
fi
|
||||
if [ -z "$AWS_SECRET_ACCESS_KEY_TWO" ]; then
|
||||
log 2 "missing second AWS secret access key for s3 backend"
|
||||
return 1
|
||||
fi
|
||||
export AWS_ACCESS_KEY_ID_TWO AWS_SECRET_ACCESS_KEY_TWO
|
||||
fi
|
||||
if [[ -r $GOCOVERDIR ]]; then
|
||||
export GOCOVERDIR=$GOCOVERDIR
|
||||
fi
|
||||
if [[ $RUN_USERS == "true" ]]; then
|
||||
if ! check_user_vars; then
|
||||
log 2 "error setting user vars"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_user_vars() {
|
||||
if [[ -z "$IAM_TYPE" ]]; then
|
||||
export IAM_TYPE="folder"
|
||||
fi
|
||||
if [[ "$IAM_TYPE" == "folder" ]]; then
|
||||
if [[ -z "$USERS_FOLDER" ]]; then
|
||||
log 2 "if IAM type is folder (or not set), USERS_FOLDER parameter is required"
|
||||
return 1
|
||||
fi
|
||||
if [ ! -d "$USERS_FOLDER" ]; then
|
||||
if mkdir_error=$(mkdir "$USERS_FOLDER" 2>&1); then
|
||||
log 2 "error creating users folder: $mkdir_error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
IAM_PARAMS="--iam-dir=$USERS_FOLDER"
|
||||
export IAM_PARAMS
|
||||
return 0
|
||||
fi
|
||||
if [[ $IAM_TYPE == "s3" ]]; then
|
||||
if [[ -z "$USERS_BUCKET" ]]; then
|
||||
log 2 "if IAM type is s3, USERS_BUCKET is required"
|
||||
return 1
|
||||
fi
|
||||
IAM_PARAMS="--s3-iam-access $AWS_ACCESS_KEY_ID --s3-iam-secret $AWS_SECRET_ACCESS_KEY \
|
||||
--s3-iam-region us-east-1 --s3-iam-bucket $USERS_BUCKET --s3-iam-endpoint $AWS_ENDPOINT_URL \
|
||||
--s3-iam-noverify"
|
||||
export IAM_PARAMS
|
||||
return 0
|
||||
fi
|
||||
log 2 "unrecognized IAM_TYPE value: $IAM_TYPE"
|
||||
return 1
|
||||
}
|
||||
50
tests/iam.sh
Normal file
50
tests/iam.sh
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
get_iam_parameters() {
|
||||
if [[ -z "$IAM_TYPE" ]]; then
|
||||
export IAM_TYPE="folder"
|
||||
fi
|
||||
if [[ "$IAM_TYPE" == "folder" ]]; then
|
||||
if [[ -z "$USERS_FOLDER" ]]; then
|
||||
log 2 "if IAM type is folder (or not set), USERS_FOLDER parameter is required"
|
||||
return 1
|
||||
fi
|
||||
if [ ! -d "$USERS_FOLDER" ]; then
|
||||
if mkdir_error=$(mkdir "$USERS_FOLDER" 2>&1); then
|
||||
log 2 "error creating users folder: $mkdir_error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
iam_params="--iam-dir=$USERS_FOLDER"
|
||||
export iam_params
|
||||
return 0
|
||||
fi
|
||||
if [[ $IAM_TYPE == "s3" ]]; then
|
||||
if [[ -z "$USERS_BUCKET" ]]; then
|
||||
log 2 "if IAM type is s3, USERS_BUCKET is required"
|
||||
return 1
|
||||
fi
|
||||
log 4 "$USERS_BUCKET"
|
||||
if ! bucket_exists "s3api" "$USERS_BUCKET"; then
|
||||
log 4 "bucket doesn't exist"
|
||||
if [[ $? == 2 ]]; then
|
||||
log 2 "error checking if users bucket exists"
|
||||
return 1
|
||||
fi
|
||||
if ! create_bucket "s3api" "$USERS_BUCKET"; then
|
||||
log 2 "error creating bucket"
|
||||
return 1
|
||||
fi
|
||||
log 4 "bucket create successful"
|
||||
else
|
||||
log 4 "bucket exists"
|
||||
fi
|
||||
iam_params="--s3-iam-access $AWS_ACCESS_KEY_ID --s3-iam-secret $AWS_SECRET_ACCESS_KEY \
|
||||
--s3-iam-region us-east-1 --s3-iam-bucket $USERS_BUCKET --s3-iam-endpoint $AWS_ENDPOINT_URL \
|
||||
--s3-iam-noverify"
|
||||
export iam_params
|
||||
return 0
|
||||
fi
|
||||
log 2 "unrecognized IAM_TYPE value: $IAM_TYPE"
|
||||
return 1
|
||||
}
|
||||
@@ -93,13 +93,8 @@ func (c *S3Conf) getCreds() credentials.StaticCredentialsProvider {
|
||||
return credentials.NewStaticCredentialsProvider(c.awsID, c.awsSecret, "")
|
||||
}
|
||||
|
||||
func (c *S3Conf) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: c.endpoint,
|
||||
SigningRegion: c.awsRegion,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
func (c *S3Conf) GetClient() *s3.Client {
|
||||
return s3.NewFromConfig(c.Config())
|
||||
}
|
||||
|
||||
func (c *S3Conf) Config() aws.Config {
|
||||
@@ -114,11 +109,6 @@ func (c *S3Conf) Config() aws.Config {
|
||||
config.WithHTTPClient(client),
|
||||
}
|
||||
|
||||
if c.endpoint != "" && c.endpoint != "aws" {
|
||||
opts = append(opts,
|
||||
config.WithEndpointResolverWithOptions(c))
|
||||
}
|
||||
|
||||
if c.checksumDisable {
|
||||
opts = append(opts,
|
||||
config.WithAPIOptions([]func(*middleware.Stack) error{v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware}))
|
||||
@@ -135,11 +125,15 @@ func (c *S3Conf) Config() aws.Config {
|
||||
log.Fatalln("error:", err)
|
||||
}
|
||||
|
||||
if c.endpoint != "" && c.endpoint != "aws" {
|
||||
cfg.BaseEndpoint = &c.endpoint
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (c *S3Conf) UploadData(r io.Reader, bucket, object string) error {
|
||||
uploader := manager.NewUploader(s3.NewFromConfig(c.Config()))
|
||||
uploader := manager.NewUploader(c.GetClient())
|
||||
uploader.PartSize = c.PartSize
|
||||
uploader.Concurrency = c.Concurrency
|
||||
|
||||
@@ -154,7 +148,7 @@ func (c *S3Conf) UploadData(r io.Reader, bucket, object string) error {
|
||||
}
|
||||
|
||||
func (c *S3Conf) DownloadData(w io.WriterAt, bucket, object string) (int64, error) {
|
||||
downloader := manager.NewDownloader(s3.NewFromConfig(c.Config()))
|
||||
downloader := manager.NewDownloader(c.GetClient())
|
||||
downloader.PartSize = c.PartSize
|
||||
downloader.Concurrency = c.Concurrency
|
||||
|
||||
|
||||
@@ -3031,16 +3031,20 @@ func GetObject_invalid_ranges(s *S3Conf) error {
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.GetObject(ctx, &s3.GetObjectInput{
|
||||
resp, err := s3client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
Range: getPtr("bytes=1000000000-999999999999"),
|
||||
Range: getPtr("bytes=1500-999999999999"),
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidRange)); err != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *resp.ContentLength != dataLength-1500 {
|
||||
return fmt.Errorf("expected content-length to be %v, instead got %v", dataLength-1500, *resp.ContentLength)
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
|
||||
@@ -2,12 +2,15 @@
|
||||
|
||||
# levels: 1 - crit, 2 - err, 3 - warn, 4 - info, 5 - debug, 6 - trace
|
||||
|
||||
export LOG_LEVEL_INT=4
|
||||
|
||||
log() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "log function requires level, message"
|
||||
return 1
|
||||
fi
|
||||
if [[ $1 -gt $LOG_LEVEL ]]; then
|
||||
# shellcheck disable=SC2153
|
||||
if [[ $1 -gt $LOG_LEVEL_INT ]]; then
|
||||
return 0
|
||||
fi
|
||||
log_level=""
|
||||
@@ -18,9 +21,51 @@ log() {
|
||||
4) log_level="INFO";;
|
||||
5) log_level="DEBUG";;
|
||||
6) log_level="TRACE";;
|
||||
*) echo "invalid log level $1"; return 1
|
||||
esac
|
||||
echo "$log_level $2"
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
echo "$2" >> "$TEST_LOG_FILE"
|
||||
if [[ "$2" == *"secret"* ]]; then
|
||||
log_mask "$log_level" "$2"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
log_message "$log_level" "$2"
|
||||
}
|
||||
|
||||
log_mask() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "mask and log requires level, string"
|
||||
return 1
|
||||
fi
|
||||
local masked_args=() # Initialize an array to hold the masked arguments
|
||||
|
||||
IFS=' ' read -r -a array <<< "$2"
|
||||
|
||||
mask_next=false
|
||||
for arg in "${array[@]}"; do
|
||||
if [[ $mask_next == true ]]; then
|
||||
masked_args+=("********")
|
||||
mask_next=false
|
||||
elif [[ "$arg" == --secret_key=* ]]; then
|
||||
masked_args+=("--secret_key=********")
|
||||
elif [[ "$arg" == --secret=* ]]; then
|
||||
masked_args+=("--secret=********")
|
||||
else
|
||||
if [[ "$arg" == "--secret_key" ]] || [[ "$arg" == "--secret" ]] || [[ "$arg" == "--s3-iam-secret" ]]; then
|
||||
mask_next=true
|
||||
fi
|
||||
masked_args+=("$arg")
|
||||
fi
|
||||
done
|
||||
log_message "$log_level" "${masked_args[*]}"
|
||||
}
|
||||
|
||||
log_message() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "log message requires level, message"
|
||||
return 1
|
||||
fi
|
||||
now="$(date "+%Y-%m-%d %H:%M:%S")"
|
||||
echo "$now $1 $2"
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
echo "$now $1 $2" >> "$TEST_LOG_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
15
tests/run.sh
15
tests/run.sh
@@ -5,9 +5,12 @@ show_help() {
|
||||
echo "Usage: $0 [option...]"
|
||||
echo " -h, --help Display this help message and exit"
|
||||
echo " -s, --static Don't remove buckets between tests"
|
||||
echo " aws Run tests with aws cli"
|
||||
echo " aws Run tests with aws (s3api) cli"
|
||||
echo " s3api Run tests with s3api cli"
|
||||
echo " s3 Run tests with s3 cli"
|
||||
echo " s3cmd Run tests with s3cmd utility"
|
||||
echo " mc Run tests with mc utility"
|
||||
echo " aws-user Run user tests with aws cli"
|
||||
}
|
||||
|
||||
handle_param() {
|
||||
@@ -19,7 +22,7 @@ handle_param() {
|
||||
-s|--static)
|
||||
export RECREATE_BUCKETS=false
|
||||
;;
|
||||
s3|s3api|aws|s3cmd|mc|user)
|
||||
s3|s3api|aws|s3cmd|mc|aws-user)
|
||||
set_command_type "$1"
|
||||
;;
|
||||
*) # Handle unrecognized options or positional arguments
|
||||
@@ -51,8 +54,8 @@ while [[ "$#" -gt 0 ]]; do
|
||||
shift # past argument or value
|
||||
done
|
||||
|
||||
if [[ -z "$VERSITYGW_TEST_ENV" ]]; then
|
||||
echo "Error: VERSITYGW_TEST_ENV parameter must be set"
|
||||
if [[ -z "$VERSITYGW_TEST_ENV" ]] && [[ $BYPASS_ENV_FILE != "true" ]]; then
|
||||
echo "Error: VERSITYGW_TEST_ENV parameter must be set, or BYPASS_ENV_FILE must be set to true"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -86,6 +89,10 @@ case $command_type in
|
||||
echo "Running mc tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_mc.sh || exit_code=$?
|
||||
;;
|
||||
aws-user)
|
||||
echo "Running aws user tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_user_aws.sh || exit_code=$?
|
||||
esac
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
exit $exit_code
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -z "$VERSITYGW_TEST_ENV" ]]; then
|
||||
echo "Error: VERSITYGW_TEST_ENV parameter must be set"
|
||||
if [[ -z "$VERSITYGW_TEST_ENV" ]] && [[ $BYPASS_ENV_FILE != "true" ]]; then
|
||||
echo "Error: VERSITYGW_TEST_ENV parameter must be set, or BYPASS_ENV_FILE must be set to true"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck source=./.env.default
|
||||
# shellcheck source=./tests/.env.default
|
||||
source "$VERSITYGW_TEST_ENV"
|
||||
export RECREATE_BUCKETS
|
||||
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/env.sh
|
||||
source ./tests/setup_mc.sh
|
||||
source ./tests/versity.sh
|
||||
|
||||
# bats setup function
|
||||
setup() {
|
||||
start_versity || start_result=$?
|
||||
if [[ $start_result -ne 0 ]]; then
|
||||
echo "error starting versity executable"
|
||||
if ! check_env_vars; then
|
||||
log 2 "error checking env values"
|
||||
return 1
|
||||
fi
|
||||
|
||||
check_params || check_result=$?
|
||||
if [[ $check_result -ne 0 ]]; then
|
||||
echo "parameter check failed"
|
||||
return 1
|
||||
if [ "$RUN_VERSITYGW" == "true" ]; then
|
||||
if ! run_versity_app; then
|
||||
log 2 "error starting versity apps"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
log 4 "Running test $BATS_TEST_NAME"
|
||||
@@ -32,9 +32,8 @@ setup() {
|
||||
fi
|
||||
|
||||
if [[ $RUN_MC == true ]]; then
|
||||
check_add_mc_alias || check_result=$?
|
||||
if [[ $check_result -ne 0 ]]; then
|
||||
echo "mc alias check/add failed"
|
||||
if ! check_add_mc_alias; then
|
||||
log 2 "mc alias check/add failed"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
@@ -44,33 +43,6 @@ setup() {
|
||||
BUCKET_TWO_NAME
|
||||
}
|
||||
|
||||
# make sure required environment variables for tests are defined properly
|
||||
# return 0 for yes, 1 for no
|
||||
check_params() {
|
||||
if [ -z "$BUCKET_ONE_NAME" ]; then
|
||||
echo "No bucket one name set"
|
||||
return 1
|
||||
elif [ -z "$BUCKET_TWO_NAME" ]; then
|
||||
echo "No bucket two name set"
|
||||
return 1
|
||||
elif [ -z "$RECREATE_BUCKETS" ]; then
|
||||
echo "No recreate buckets parameter set"
|
||||
return 1
|
||||
elif [[ $RECREATE_BUCKETS != "true" ]] && [[ $RECREATE_BUCKETS != "false" ]]; then
|
||||
echo "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$LOG_LEVEL" ]]; then
|
||||
export LOG_LEVEL=2
|
||||
else
|
||||
export LOG_LEVEL
|
||||
fi
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
export TEST_LOG_FILE
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# fail a test
|
||||
# param: error message
|
||||
fail() {
|
||||
|
||||
@@ -8,8 +8,7 @@ check_for_alias() {
|
||||
return 2
|
||||
fi
|
||||
while IFS= read -r line; do
|
||||
error=$(echo "$line" | grep -w "$MC_ALIAS ")
|
||||
if [[ $? -eq 0 ]]; then
|
||||
if echo "$line" | grep -w "$MC_ALIAS "; then
|
||||
return 0
|
||||
fi
|
||||
done <<< "$aliases"
|
||||
|
||||
@@ -28,6 +28,8 @@ source ./tests/commands/put_object_legal_hold.sh
|
||||
source ./tests/commands/put_object_retention.sh
|
||||
source ./tests/commands/select_object_content.sh
|
||||
|
||||
export RUN_USERS=true
|
||||
|
||||
# abort-multipart-upload
|
||||
@test "test_abort_multipart_upload" {
|
||||
local bucket_file="bucket-file"
|
||||
@@ -103,10 +105,10 @@ source ./tests/commands/select_object_content.sh
|
||||
os_name="$(uname)"
|
||||
if [[ "$os_name" == "Darwin" ]]; then
|
||||
now=$(date -u +"%Y-%m-%dT%H:%M:%S")
|
||||
five_seconds_later=$(date -j -v +5S -f "%Y-%m-%dT%H:%M:%S" "$now" +"%Y-%m-%dT%H:%M:%S")
|
||||
five_seconds_later=$(date -j -v +10S -f "%Y-%m-%dT%H:%M:%S" "$now" +"%Y-%m-%dT%H:%M:%S")
|
||||
else
|
||||
now=$(date +"%Y-%m-%dT%H:%M:%S")
|
||||
five_seconds_later=$(date -d "$now 5 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
five_seconds_later=$(date -d "$now 10 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
|
||||
create_test_files "$bucket_file" || fail "error creating test file"
|
||||
@@ -152,7 +154,7 @@ source ./tests/commands/select_object_content.sh
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "error getting object"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/$bucket_file-copy" || fail "files not equal"
|
||||
|
||||
sleep 2
|
||||
sleep 10
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
@@ -171,6 +173,59 @@ source ./tests/commands/select_object_content.sh
|
||||
test_common_set_get_delete_bucket_tags "aws"
|
||||
}
|
||||
|
||||
# delete-object - tested with bucket cleanup before or after tests
|
||||
|
||||
# delete-object-tagging
|
||||
@test "test_delete_object_tagging" {
|
||||
test_common_delete_object_tagging "aws"
|
||||
}
|
||||
|
||||
# delete-objects
|
||||
@test "test_delete_objects" {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result_one=$?
|
||||
[[ $result_one -eq 0 ]] || fail "Error creating bucket"
|
||||
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local result_two=$?
|
||||
[[ $result_two -eq 0 ]] || fail "Error adding object one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local result_three=$?
|
||||
[[ $result_three -eq 0 ]] || fail "Error adding object two"
|
||||
|
||||
error=$(aws --no-verify-ssl s3api delete-objects --bucket "$BUCKET_ONE_NAME" --delete '{
|
||||
"Objects": [
|
||||
{"Key": "test-file-one"},
|
||||
{"Key": "test-file-two"}
|
||||
]
|
||||
}') || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error deleting objects: $error"
|
||||
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_one" || local exists_one=$?
|
||||
[[ $exists_one -eq 1 ]] || fail "Object one not deleted"
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_two" || local exists_two=$?
|
||||
[[ $exists_two -eq 1 ]] || fail "Object two not deleted"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
# get-bucket-acl
|
||||
@test "test_get_bucket_acl" {
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating bucket"
|
||||
|
||||
get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error retrieving acl"
|
||||
|
||||
id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq '.Owner.ID')
|
||||
[[ $id == '"'"$AWS_ACCESS_KEY_ID"'"' ]] || fail "Acl mismatch"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
#@test "test_get_object_invalid_range" {
|
||||
# bucket_file="bucket_file"
|
||||
#
|
||||
@@ -183,22 +238,18 @@ source ./tests/commands/select_object_content.sh
|
||||
# [[ $get_result -ne 0 ]] || fail "Get object with zero range returned no error"
|
||||
#}
|
||||
|
||||
#@test "test_get_object_full_range" {
|
||||
# bucket_file="bucket_file"
|
||||
#
|
||||
# create_test_files "$bucket_file" || local created=$?
|
||||
# [[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
# echo -n "0123456789" > "$test_file_folder/$bucket_file"
|
||||
# setup_bucket "s3api" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
# [[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
# put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error putting object"
|
||||
# get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=9-15" "$test_file_folder/$bucket_file-range" || fail "error getting range"
|
||||
# cat "$test_file_folder/$bucket_file"
|
||||
# cat "$test_file_folder/$bucket_file-range"
|
||||
# ls -l "$test_file_folder/$bucket_file"
|
||||
# ls -l "$test_file_folder/$bucket_file-range"
|
||||
# compare_files "$test_file_folder/$bucket_file" "$test_file_folder/$bucket_file-range" || fail "files not equal"
|
||||
#}
|
||||
@test "test_get_object_full_range" {
|
||||
bucket_file="bucket_file"
|
||||
|
||||
create_test_files "$bucket_file" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
echo -n "0123456789" > "$test_file_folder/$bucket_file"
|
||||
setup_bucket "s3api" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || fail "error putting object"
|
||||
get_object_with_range "$BUCKET_ONE_NAME" "$bucket_file" "bytes=9-15" "$test_file_folder/$bucket_file-range" || fail "error getting range"
|
||||
[[ "$(cat "$test_file_folder/$bucket_file-range")" == "9" ]] || fail "byte range not copied properly"
|
||||
}
|
||||
|
||||
@test "test_put_object" {
|
||||
bucket_file="bucket_file"
|
||||
@@ -211,8 +262,8 @@ source ./tests/commands/select_object_content.sh
|
||||
[[ $setup_result_two -eq 0 ]] || fail "Bucket two setup error"
|
||||
put_object "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
error=$(aws --no-verify-ssl s3api copy-object --copy-source "$BUCKET_ONE_NAME/$bucket_file" --key "$bucket_file" --bucket "$BUCKET_TWO_NAME" 2>&1) || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Error copying file: $error"
|
||||
copy_error=$(aws --no-verify-ssl s3api copy-object --copy-source "$BUCKET_ONE_NAME/$bucket_file" --key "$bucket_file" --bucket "$BUCKET_TWO_NAME" 2>&1) || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Error copying file: $copy_error"
|
||||
copy_file "s3://$BUCKET_TWO_NAME/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local copy_result=$?
|
||||
[[ $copy_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
compare_files "$test_file_folder/$bucket_file" "$test_file_folder/${bucket_file}_copy" || local compare_result=$?
|
||||
@@ -253,19 +304,6 @@ source ./tests/commands/select_object_content.sh
|
||||
test_common_list_objects "aws"
|
||||
}
|
||||
|
||||
# test ability to retrieve bucket ACLs
|
||||
@test "test_get_bucket_acl" {
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating bucket"
|
||||
|
||||
get_bucket_acl "s3api" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error retrieving acl"
|
||||
|
||||
id=$(echo "$acl" | grep -v "InsecureRequestWarning" | jq '.Owner.ID')
|
||||
[[ $id == '"'"$AWS_ACCESS_KEY_ID"'"' ]] || fail "Acl mismatch"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_get_object_attributes" {
|
||||
bucket_file="bucket_file"
|
||||
@@ -316,11 +354,13 @@ source ./tests/commands/select_object_content.sh
|
||||
echo "fdkljafajkfs" > "$test_file_folder/$bucket_file"
|
||||
put_object_with_user "s3api" "$test_file_folder/$bucket_file" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local put_result=$?
|
||||
[[ $put_result -ne 0 ]] || fail "able to overwrite object with hold"
|
||||
[[ $error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error"
|
||||
# shellcheck disable=SC2154
|
||||
[[ $put_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $put_object_error"
|
||||
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || local delete_result=$?
|
||||
[[ $delete_result -ne 0 ]] || fail "able to delete object with hold"
|
||||
[[ $error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $error"
|
||||
# shellcheck disable=SC2154
|
||||
[[ $delete_object_error == *"Object is WORM protected and cannot be overwritten"* ]] || fail "unexpected error message: $delete_object_error"
|
||||
put_object_legal_hold "$BUCKET_ONE_NAME" "$bucket_file" "OFF" || fail "error removing legal hold on object"
|
||||
delete_object_with_user "s3api" "$BUCKET_ONE_NAME" "$bucket_file" "$username" "$secret_key" || fail "error deleting object after removing legal hold"
|
||||
|
||||
@@ -411,37 +451,6 @@ legal_hold_retention_setup() {
|
||||
# delete_bucket_or_contents "$BUCKET_ONE_NAME"
|
||||
#}
|
||||
|
||||
# test ability to delete multiple objects from bucket
|
||||
@test "test_delete_objects" {
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
|
||||
create_test_files "$object_one" "$object_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result_one=$?
|
||||
[[ $result_one -eq 0 ]] || fail "Error creating bucket"
|
||||
|
||||
put_object "s3api" "$test_file_folder"/"$object_one" "$BUCKET_ONE_NAME" "$object_one" || local result_two=$?
|
||||
[[ $result_two -eq 0 ]] || fail "Error adding object one"
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local result_three=$?
|
||||
[[ $result_three -eq 0 ]] || fail "Error adding object two"
|
||||
|
||||
error=$(aws --no-verify-ssl s3api delete-objects --bucket "$BUCKET_ONE_NAME" --delete '{
|
||||
"Objects": [
|
||||
{"Key": "test-file-one"},
|
||||
{"Key": "test-file-two"}
|
||||
]
|
||||
}') || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Error deleting objects: $error"
|
||||
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_one" || local exists_one=$?
|
||||
[[ $exists_one -eq 1 ]] || fail "Object one not deleted"
|
||||
object_exists "aws" "$BUCKET_ONE_NAME" "$object_two" || local exists_two=$?
|
||||
[[ $exists_two -eq 1 ]] || fail "Object two not deleted"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files "$object_one" "$object_two"
|
||||
}
|
||||
|
||||
#@test "test_select_object_content" {
|
||||
# bucket_file="bucket_file"
|
||||
@@ -479,8 +488,6 @@ legal_hold_retention_setup() {
|
||||
put_object "s3api" "$test_file_folder"/"$object_two" "$BUCKET_ONE_NAME" "$object_two" || local copy_result_two=$?
|
||||
[[ $copy_result_two -eq 0 ]] || fail "Failed to add object $object_two"
|
||||
|
||||
sleep 1
|
||||
|
||||
list_objects_s3api_v1 "$BUCKET_ONE_NAME"
|
||||
key_one=$(echo "$objects" | jq -r '.Contents[0].Key')
|
||||
[[ $key_one == "$object_one" ]] || fail "Object one mismatch ($key_one, $object_one)"
|
||||
@@ -558,6 +565,7 @@ legal_hold_retention_setup() {
|
||||
echo "error: blank etag"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2004
|
||||
parts_map[$part_number]=$etag
|
||||
done
|
||||
[[ ${#parts_map[@]} -ne 0 ]] || fail "error loading multipart upload parts to check"
|
||||
@@ -593,8 +601,7 @@ legal_hold_retention_setup() {
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
list_multipart_uploads "$BUCKET_ONE_NAME" "$test_file_folder"/"$bucket_file_one" "$test_file_folder"/"$bucket_file_two"
|
||||
[[ $? -eq 0 ]] || fail "failed to list multipart uploads"
|
||||
list_multipart_uploads "$BUCKET_ONE_NAME" "$test_file_folder"/"$bucket_file_one" "$test_file_folder"/"$bucket_file_two" || fail "failed to list multipart uploads"
|
||||
|
||||
local key_one
|
||||
local key_two
|
||||
@@ -635,6 +642,42 @@ legal_hold_retention_setup() {
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
@test "test_multipart_upload_from_bucket_range_too_large" {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_large_file "$bucket_file" || error creating file "$bucket_file"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
multipart_upload_from_bucket_range "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 "bytes=0-1000000000" || local upload_result=$?
|
||||
[[ $upload_result -eq 1 ]] || fail "multipart upload with overly large range should have failed"
|
||||
log 5 "error: $upload_part_copy_error"
|
||||
[[ $upload_part_copy_error == *"Range specified is not valid"* ]] || [[ $upload_part_copy_error == *"InvalidRange"* ]] || fail "unexpected error: $upload_part_copy_error"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
@test "test_multipart_upload_from_bucket_range_valid" {
|
||||
local bucket_file="bucket-file"
|
||||
|
||||
create_large_file "$bucket_file" || error creating file "$bucket_file"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
range_max=$((5*1024*1024-1))
|
||||
multipart_upload_from_bucket_range "$BUCKET_ONE_NAME" "$bucket_file" "$test_file_folder"/"$bucket_file" 4 "bytes=0-$range_max" || fail "upload failure"
|
||||
|
||||
get_object "s3api" "$BUCKET_ONE_NAME" "$bucket_file-copy" "$test_file_folder/$bucket_file-copy" || fail "error retrieving object after upload"
|
||||
if [[ $(uname) == 'Darwin' ]]; then
|
||||
object_size=$(stat -f%z "$test_file_folder/$bucket_file-copy")
|
||||
else
|
||||
object_size=$(stat --format=%s "$test_file_folder/$bucket_file-copy")
|
||||
fi
|
||||
[[ object_size -eq $((range_max*4+4)) ]] || fail "object size mismatch ($object_size, $((range_max*4+4)))"
|
||||
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
delete_test_files $bucket_file
|
||||
}
|
||||
|
||||
@test "test-presigned-url-utf8-chars" {
|
||||
test_common_presigned_url_utf8_chars "aws"
|
||||
}
|
||||
@@ -691,6 +734,11 @@ legal_hold_retention_setup() {
|
||||
delete_bucket_or_contents "aws" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_head_bucket_invalid_name" {
|
||||
head_bucket "aws" "" || local head_result=$?
|
||||
[[ $head_result -ne 0 ]] || fail "able to get bucket info for invalid name"
|
||||
}
|
||||
|
||||
@test "test_head_bucket_doesnt_exist" {
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
@@ -726,9 +774,6 @@ legal_hold_retention_setup() {
|
||||
[[ $value == "\"$test_value\"" ]] || fail "values doesn't match (expected $value, actual \"$test_value\")"
|
||||
}
|
||||
|
||||
@test "test_delete_object_tagging" {
|
||||
test_common_delete_object_tagging "aws"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_location" {
|
||||
test_common_get_bucket_location "aws"
|
||||
|
||||
@@ -62,32 +62,25 @@ test_common_copy_object() {
|
||||
fail "copy object test requires command type"
|
||||
fi
|
||||
local object_name="test-object"
|
||||
create_test_files "$object_name" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Error creating test file"
|
||||
create_test_files "$object_name" || fail "error creating test file"
|
||||
echo "test data" > "$test_file_folder/$object_name"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket one"
|
||||
setup_bucket "$1" "$BUCKET_TWO_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket two"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "error setting up bucket one"
|
||||
setup_bucket "$1" "$BUCKET_TWO_NAME" || fail "error setting up bucket two"
|
||||
|
||||
if [[ $1 == 's3' ]]; then
|
||||
copy_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || local put_result=$?
|
||||
copy_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to copy object to bucket one"
|
||||
else
|
||||
put_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || local put_result=$?
|
||||
put_object "$1" "$test_file_folder/$object_name" "$BUCKET_ONE_NAME" "$object_name" || fail "failed to put object to bucket one"
|
||||
fi
|
||||
[[ $put_result -eq 0 ]] || fail "Failed to add object to bucket"
|
||||
if [[ $1 == 's3' ]]; then
|
||||
copy_object "$1" "s3://$BUCKET_ONE_NAME/$object_name" "$BUCKET_TWO_NAME" "$object_name" || local copy_result_one=$?
|
||||
copy_object "$1" "s3://$BUCKET_ONE_NAME/$object_name" "$BUCKET_TWO_NAME" "$object_name" || fail "object not copied to bucket two"
|
||||
else
|
||||
copy_object "$1" "$BUCKET_ONE_NAME/$object_name" "$BUCKET_TWO_NAME" "$object_name" || local copy_result_one=$?
|
||||
copy_object "$1" "$BUCKET_ONE_NAME/$object_name" "$BUCKET_TWO_NAME" "$object_name" || fail "object not copied to bucket two"
|
||||
fi
|
||||
[[ $copy_result_one -eq 0 ]] || fail "Object not added to bucket"
|
||||
get_object "$1" "$BUCKET_TWO_NAME" "$object_name" "$test_file_folder/$object_name-copy" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "failed to retrieve object"
|
||||
get_object "$1" "$BUCKET_TWO_NAME" "$object_name" "$test_file_folder/$object_name-copy" || fail "failed to retrieve object"
|
||||
|
||||
compare_files "$test_file_folder/$object_name" "$test_file_folder/$object_name-copy" || local compare_result=$?
|
||||
[[ $compare_result -eq 0 ]] || fail "files not the same"
|
||||
compare_files "$test_file_folder/$object_name" "$test_file_folder/$object_name-copy" || fail "files not the same"
|
||||
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
delete_bucket_or_contents "$1" "$BUCKET_TWO_NAME"
|
||||
@@ -274,18 +267,14 @@ test_common_set_get_delete_bucket_tags() {
|
||||
local key="test_key"
|
||||
local value="test_value"
|
||||
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || local result=$?
|
||||
[[ $result -eq 0 ]] || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
setup_bucket "$1" "$BUCKET_ONE_NAME" || fail "Failed to create bucket '$BUCKET_ONE_NAME'"
|
||||
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting bucket tags first time"
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags first time"
|
||||
|
||||
check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || local check_result=$?
|
||||
[[ $check_result -eq 0 ]] || fail "error checking if bucket tags are empty"
|
||||
check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || fail "error checking if bucket tags are empty"
|
||||
|
||||
put_bucket_tag "$1" "$BUCKET_ONE_NAME" $key $value
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || local get_result_two=$?
|
||||
[[ $get_result_two -eq 0 ]] || fail "Error getting bucket tags second time"
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags second time"
|
||||
|
||||
local tag_set_key
|
||||
local tag_set_value
|
||||
@@ -302,11 +291,9 @@ test_common_set_get_delete_bucket_tags() {
|
||||
fi
|
||||
delete_bucket_tags "$1" "$BUCKET_ONE_NAME"
|
||||
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting bucket tags third time"
|
||||
get_bucket_tagging "$1" "$BUCKET_ONE_NAME" || fail "Error getting bucket tags third time"
|
||||
|
||||
check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || local check_result=$?
|
||||
[[ $check_result -eq 0 ]] || fail "error checking if bucket tags are empty"
|
||||
check_bucket_tags_empty "$1" "$BUCKET_ONE_NAME" || fail "error checking if bucket tags are empty"
|
||||
delete_bucket_or_contents "$1" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@@ -474,6 +461,7 @@ test_common_put_bucket_acl() {
|
||||
fi
|
||||
|
||||
acl_file="test-acl"
|
||||
create_test_files "$acl_file"
|
||||
|
||||
cat <<EOF > "$test_file_folder"/"$acl_file"
|
||||
{
|
||||
@@ -575,6 +563,7 @@ EOF
|
||||
get_bucket_policy "$1" "$BUCKET_ONE_NAME" || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "error getting bucket policy after setting"
|
||||
|
||||
log 5 "$bucket_policy"
|
||||
returned_effect=$(echo "$bucket_policy" | jq -r '.Statement[0].Effect')
|
||||
[[ $effect == "$returned_effect" ]] || fail "effect mismatch ($effect, $returned_effect)"
|
||||
returned_principal=$(echo "$bucket_policy" | jq -r '.Statement[0].Principal')
|
||||
|
||||
@@ -36,6 +36,15 @@ export RUN_MC=true
|
||||
test_common_set_get_delete_bucket_tags "mc"
|
||||
}
|
||||
|
||||
# delete-object - put-object tests
|
||||
|
||||
# delete-objects - test setup/teardown
|
||||
|
||||
# delete-object-tagging
|
||||
@test "test_delete_object_tagging" {
|
||||
test_common_delete_object_tagging "mc"
|
||||
}
|
||||
|
||||
@test "test_put_object-with-data-mc" {
|
||||
test_common_put_object_with_data "mc"
|
||||
}
|
||||
@@ -94,9 +103,6 @@ export RUN_MC=true
|
||||
delete_bucket_or_contents "mc" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_delete_object_tagging" {
|
||||
test_common_delete_object_tagging "mc"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_location" {
|
||||
test_common_get_bucket_location "mc"
|
||||
|
||||
@@ -19,6 +19,10 @@ source ./tests/test_common.sh
|
||||
|
||||
# delete-bucket - test_create_delete_bucket
|
||||
|
||||
# delete-object - test_put_object
|
||||
|
||||
# delete-objects - tested with recursive bucket delete
|
||||
|
||||
@test "test_put_object" {
|
||||
test_common_put_object_no_data "s3"
|
||||
}
|
||||
|
||||
@@ -17,20 +17,28 @@ export RUN_S3CMD=true
|
||||
}
|
||||
|
||||
# copy-object
|
||||
@test "test_copy_object_with_data" {
|
||||
test_common_put_object_with_data "s3cmd"
|
||||
}
|
||||
|
||||
# copy-object
|
||||
@test "test_copy_object_no_data" {
|
||||
test_common_put_object_no_data "s3cmd"
|
||||
}
|
||||
#@test "test_copy_object" {
|
||||
# test_common_copy_object "s3cmd"
|
||||
#}
|
||||
|
||||
# create-bucket
|
||||
@test "test_create_delete_bucket" {
|
||||
test_common_create_delete_bucket "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_create_bucket_invalid_name_s3cmd" {
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
create_bucket_invalid_name "s3cmd" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Invalid name test failed"
|
||||
|
||||
[[ "$bucket_create_error" == *"just the bucket name"* ]] || fail "unexpected error: $bucket_create_error"
|
||||
|
||||
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
# delete-bucket - test_create_delete_bucket
|
||||
|
||||
# delete-bucket-policy
|
||||
@@ -38,6 +46,12 @@ export RUN_S3CMD=true
|
||||
test_common_get_put_delete_bucket_policy "s3cmd"
|
||||
}
|
||||
|
||||
# delete-object - test_put_object
|
||||
|
||||
# delete-objects - tested with cleanup before or after tests
|
||||
|
||||
# get-bucket-acl - test_put_bucket_acl
|
||||
|
||||
#@test "test_put_bucket_acl" {
|
||||
# test_common_put_bucket_acl "s3cmd"
|
||||
#}
|
||||
@@ -59,18 +73,6 @@ export RUN_S3CMD=true
|
||||
test_common_list_objects_file_count "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_create_bucket_invalid_name_s3cmd" {
|
||||
if [[ $RECREATE_BUCKETS != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
create_bucket_invalid_name "s3cmd" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Invalid name test failed"
|
||||
|
||||
[[ "$bucket_create_error" == *"just the bucket name"* ]] || fail "unexpected error: $bucket_create_error"
|
||||
|
||||
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_info_s3cmd" {
|
||||
setup_bucket "s3cmd" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
@@ -80,6 +82,15 @@ export RUN_S3CMD=true
|
||||
delete_bucket_or_contents "s3cmd" "$BUCKET_ONE_NAME"
|
||||
}
|
||||
|
||||
# put-object
|
||||
@test "test_put_object_with_data" {
|
||||
test_common_put_object_with_data "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_put_object_no_data" {
|
||||
test_common_put_object_no_data "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_get_bucket_info_doesnt_exist_s3cmd" {
|
||||
setup_bucket "s3cmd" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/test_user_common.sh
|
||||
source ./tests/util_users.sh
|
||||
|
||||
export RUN_USERS=true
|
||||
|
||||
@test "test_admin_user_aws" {
|
||||
test_admin_user "aws"
|
||||
@@ -10,6 +13,12 @@ source ./tests/test_user_common.sh
|
||||
test_create_user_already_exists "aws"
|
||||
}
|
||||
|
||||
@test "test_delete_user_no_access_key" {
|
||||
if delete_user ""; then
|
||||
fail "delete user with empty access key succeeded"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "test_user_user_aws" {
|
||||
test_user_user "aws"
|
||||
}
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
source ./tests/test_user_common.sh
|
||||
|
||||
export RUN_S3CMD=true
|
||||
export RUN_USERS=true
|
||||
|
||||
@test "test_admin_user_s3cmd" {
|
||||
test_admin_user "s3cmd"
|
||||
}
|
||||
|
||||
138
tests/util.sh
138
tests/util.sh
@@ -14,6 +14,7 @@ source ./tests/commands/get_object_tagging.sh
|
||||
source ./tests/commands/head_bucket.sh
|
||||
source ./tests/commands/head_object.sh
|
||||
source ./tests/commands/list_objects.sh
|
||||
source ./tests/commands/upload_part_copy.sh
|
||||
|
||||
# recursively delete an AWS bucket
|
||||
# param: bucket name
|
||||
@@ -29,7 +30,7 @@ delete_bucket_recursive() {
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 rb s3://"$2" --force 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
|
||||
delete_bucket_recursive_s3api "$2" 2>&1 || exit_code="$?"
|
||||
delete_bucket_recursive_s3api "$2" || exit_code="$?"
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" --recursive 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
@@ -55,16 +56,26 @@ delete_bucket_recursive_s3api() {
|
||||
log 2 "delete bucket recursive command for s3api requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
list_objects 's3api' "$1" || local list_result=$?
|
||||
if [[ $list_result -ne 0 ]]; then
|
||||
if ! list_objects 's3api' "$1"; then
|
||||
log 2 "error listing objects"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
for object in "${object_array[@]}"; do
|
||||
delete_object 's3api' "$1" "$object" || local delete_object_result=$?
|
||||
if [[ $delete_object_result -ne 0 ]]; then
|
||||
if ! delete_object 's3api' "$1" "$object"; then
|
||||
log 2 "error deleting object $object"
|
||||
if [[ $delete_object_error == *"WORM"* ]]; then
|
||||
log 5 "WORM protection found"
|
||||
if ! put_object_legal_hold "$1" "$object" "OFF"; then
|
||||
log 2 "error removing object legal hold"
|
||||
return 1
|
||||
fi
|
||||
if ! delete_object 's3api' "$1" "$object"; then
|
||||
log 2 "error deleting object after legal hold removal"
|
||||
return 1
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
@@ -114,8 +125,7 @@ bucket_exists() {
|
||||
return 2
|
||||
fi
|
||||
|
||||
head_bucket "$1" "$2" || local check_result=$?
|
||||
if [[ $check_result -ne 0 ]]; then
|
||||
if ! head_bucket "$1" "$2"; then
|
||||
# shellcheck disable=SC2154
|
||||
bucket_info=$(echo "$bucket_info" | grep -v "InsecureRequestWarning")
|
||||
log 5 "$bucket_info"
|
||||
@@ -197,6 +207,7 @@ setup_bucket() {
|
||||
return 1
|
||||
fi
|
||||
local create_result
|
||||
log 5 "util.setup_bucket: command type: $1, bucket name: $2"
|
||||
create_bucket "$1" "$2" || create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
log 2 "Error creating bucket"
|
||||
@@ -219,35 +230,8 @@ object_exists() {
|
||||
echo "error checking if object exists"
|
||||
return 2
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
return $head_result
|
||||
|
||||
return 0
|
||||
local exit_code=0
|
||||
local error=""
|
||||
if [[ $1 == 's3' ]]; then
|
||||
error=$(aws --no-verify-ssl s3 ls "s3://$2/$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
|
||||
error=$(aws --no-verify-ssl s3api head-object --bucket "$2" --prefix "$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate ls s3://"$2/$3" 2>&1) || exit_code="$?"
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
error=$(mc --insecure ls "$MC_ALIAS/$2/$3" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 2
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
if [[ "$error" == "" ]] || [[ $error == *"InsecureRequestWarning"* ]]; then
|
||||
return 1
|
||||
else
|
||||
echo "error checking if object exists: $error"
|
||||
return 2
|
||||
fi
|
||||
# s3cmd, mc return empty when object doesn't exist, rather than error
|
||||
elif [[ ( $1 == 's3cmd' ) || ( $1 == 'mc' ) ]] && [[ $error == "" ]]; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_object_with_metadata() {
|
||||
@@ -289,9 +273,9 @@ get_object_metadata() {
|
||||
echo "error copying object to bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
log 5 "$metadata_struct"
|
||||
log 5 "raw metadata: $metadata_struct"
|
||||
metadata=$(echo "$metadata_struct" | jq '.Metadata')
|
||||
echo $metadata
|
||||
log 5 "metadata: $metadata"
|
||||
export metadata
|
||||
return 0
|
||||
}
|
||||
@@ -504,12 +488,12 @@ check_object_tags_empty() {
|
||||
echo "bucket tags empty check requires command type, bucket, and key"
|
||||
return 2
|
||||
fi
|
||||
get_object_tagging "$1" "$2" "$3" || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
if ! get_object_tagging "$1" "$2" "$3"; then
|
||||
echo "failed to get tags"
|
||||
return 2
|
||||
fi
|
||||
check_tags_empty "$1" || local check_result=$?
|
||||
# shellcheck disable=SC2086
|
||||
return $check_result
|
||||
}
|
||||
|
||||
@@ -518,12 +502,12 @@ check_bucket_tags_empty() {
|
||||
echo "bucket tags empty check requires command type, bucket"
|
||||
return 2
|
||||
fi
|
||||
get_bucket_tagging "$1" "$2" || get_result=$?
|
||||
if [[ $get_result -ne 0 ]]; then
|
||||
if ! get_bucket_tagging "$1" "$2"; then
|
||||
echo "failed to get tags"
|
||||
return 2
|
||||
fi
|
||||
check_tags_empty "$1" || local check_result=$?
|
||||
# shellcheck disable=SC2086
|
||||
return $check_result
|
||||
}
|
||||
|
||||
@@ -738,7 +722,7 @@ multipart_upload_before_completion_custom() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2048
|
||||
# shellcheck disable=SC2086 disable=SC2048
|
||||
create_multipart_upload_custom "$1" "$2" ${*:5} || local create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
log 2 "error creating multipart upload"
|
||||
@@ -769,7 +753,7 @@ multipart_upload_custom() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2048
|
||||
# shellcheck disable=SC2086 disable=SC2048
|
||||
multipart_upload_before_completion_custom "$1" "$2" "$3" "$4" ${*:5} || local result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
log 2 "error performing pre-completion multipart upload"
|
||||
@@ -919,9 +903,6 @@ list_multipart_uploads() {
|
||||
export uploads
|
||||
}
|
||||
|
||||
# perform a multi-part upload within bucket
|
||||
# params: bucket, key, file, number of parts
|
||||
# return 0 for success, 1 for failure
|
||||
multipart_upload_from_bucket() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "multipart upload from bucket command missing bucket, copy source, key, and/or part count"
|
||||
@@ -964,29 +945,62 @@ multipart_upload_from_bucket() {
|
||||
parts+="]"
|
||||
|
||||
error=$(aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2-copy" --upload-id "$upload_id" --multipart-upload '{"Parts": '"$parts"'}') || local completed=$?
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
echo "Error completing upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
parts+="]"
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
echo "Error completing upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
upload_part_copy() {
|
||||
multipart_upload_from_bucket_range() {
|
||||
if [ $# -ne 5 ]; then
|
||||
echo "upload multipart part copy function must have bucket, key, upload ID, file name, part number"
|
||||
echo "multipart upload from bucket with range command requires bucket, copy source, key, part count, and range"
|
||||
return 1
|
||||
fi
|
||||
local etag_json
|
||||
echo "$1 $2 $3 $4 $5"
|
||||
etag_json=$(aws --no-verify-ssl s3api upload-part-copy --bucket "$1" --key "$2" --upload-id "$3" --part-number "$5" --copy-source "$1/$4-$(($5-1))") || local uploaded=$?
|
||||
if [[ $uploaded -ne 0 ]]; then
|
||||
echo "Error uploading part $5: $etag_json"
|
||||
|
||||
split_file "$3" "$4" || local split_result=$?
|
||||
if [[ $split_result -ne 0 ]]; then
|
||||
echo "error splitting file"
|
||||
return 1
|
||||
fi
|
||||
etag=$(echo "$etag_json" | jq '.CopyPartResult.ETag')
|
||||
export etag
|
||||
|
||||
for ((i=0;i<$4;i++)) {
|
||||
echo "key: $3"
|
||||
log 5 "file info: $(ls -l "$3"-"$i")"
|
||||
put_object "s3api" "$3-$i" "$1" "$2-$i" || local copy_result=$?
|
||||
if [[ $copy_result -ne 0 ]]; then
|
||||
echo "error copying object"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
create_multipart_upload "$1" "$2-copy" || local create_multipart_result=$?
|
||||
if [[ $create_multipart_result -ne 0 ]]; then
|
||||
echo "error running first multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
upload_part_copy_with_range "$1" "$2-copy" "$upload_id" "$2" "$i" "$5" || local upload_part_copy_result=$?
|
||||
if [[ $upload_part_copy_result -ne 0 ]]; then
|
||||
# shellcheck disable=SC2154
|
||||
echo "error uploading part $i: $upload_part_copy_error"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
error=$(aws --no-verify-ssl s3api complete-multipart-upload --bucket "$1" --key "$2-copy" --upload-id "$upload_id" --multipart-upload '{"Parts": '"$parts"'}') || local completed=$?
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
echo "Error completing upload: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_presigned_url() {
|
||||
|
||||
@@ -29,6 +29,7 @@ abort_all_multipart_uploads() {
|
||||
|
||||
log 5 "$lines"
|
||||
while read -r line; do
|
||||
# shellcheck disable=SC2086
|
||||
error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" $line 2>&1) || abort_result=$?
|
||||
if [[ $abort_result -ne 0 ]]; then
|
||||
echo "error aborting multipart upload: $error"
|
||||
|
||||
@@ -157,6 +157,7 @@ create_test_file_count() {
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
# shellcheck disable=SC2153
|
||||
if [[ $LOG_LEVEL -ge 5 ]]; then
|
||||
ls_result=$(ls "$test_file_folder"/file_*)
|
||||
log 5 "$ls_result"
|
||||
|
||||
@@ -76,9 +76,13 @@ delete_user() {
|
||||
echo "delete user command requires user ID"
|
||||
return 1
|
||||
fi
|
||||
error=$($VERSITY_EXE admin --allow-insecure --access $AWS_ACCESS_KEY_ID --secret $AWS_SECRET_ACCESS_KEY --endpoint-url $AWS_ENDPOINT_URL delete-user --access "$1") || local delete_result=$?
|
||||
log 5 "$VERSITY_EXE admin --allow-insecure --access $AWS_ACCESS_KEY_ID --secret $AWS_SECRET_ACCESS_KEY --endpoint-url $AWS_ENDPOINT_URL delete-user --access $1"
|
||||
error=$($VERSITY_EXE admin --allow-insecure --access "$AWS_ACCESS_KEY_ID" --secret "$AWS_SECRET_ACCESS_KEY" --endpoint-url "$AWS_ENDPOINT_URL" delete-user --access "$1") || local delete_result=$?
|
||||
|
||||
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "error deleting user: $error"
|
||||
export error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
209
tests/versity.sh
209
tests/versity.sh
@@ -1,126 +1,45 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/util_file.sh
|
||||
|
||||
check_exe_params_versity() {
|
||||
if [ -z "$LOCAL_FOLDER" ]; then
|
||||
echo "No local storage folder set"
|
||||
return 1
|
||||
elif [ -z "$VERSITY_EXE" ]; then
|
||||
echo "No versity executable location set"
|
||||
return 1
|
||||
elif [ -z "$BACKEND" ]; then
|
||||
echo "No backend parameter set (options: 'posix')"
|
||||
return 1
|
||||
fi
|
||||
if [ "$BACKEND" == 's3' ]; then
|
||||
if [ -z "$AWS_ACCESS_KEY_ID_TWO" ]; then
|
||||
echo "missing second AWS access key ID for s3 backend"
|
||||
return 1
|
||||
fi
|
||||
if [ -z "$AWS_SECRET_ACCESS_KEY_TWO" ]; then
|
||||
echo "missing second AWS secret access key for s3 backend"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_exe_params() {
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||
echo "No AWS access key set"
|
||||
return 1
|
||||
elif [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||
echo "No AWS secret access key set"
|
||||
return 1
|
||||
elif [ -z "$AWS_PROFILE" ]; then
|
||||
echo "No AWS profile set"
|
||||
return 1
|
||||
elif [ -z "$AWS_ENDPOINT_URL" ]; then
|
||||
echo "No AWS endpoint URL set"
|
||||
return 1
|
||||
elif [ -z "$MC_ALIAS" ]; then
|
||||
echo "No mc alias set"
|
||||
return 1
|
||||
elif [[ $RUN_VERSITYGW != "true" ]] && [[ $RUN_VERSITYGW != "false" ]]; then
|
||||
echo "RUN_VERSITYGW must be 'true' or 'false'"
|
||||
return 1
|
||||
elif [ -z "$USERS_FOLDER" ]; then
|
||||
echo "No users folder parameter set"
|
||||
return 1
|
||||
fi
|
||||
if [[ -r $GOCOVERDIR ]]; then
|
||||
export GOCOVERDIR=$GOCOVERDIR
|
||||
fi
|
||||
if [[ $RUN_VERSITYGW == "true" ]]; then
|
||||
local check_result
|
||||
check_exe_params_versity || check_result=$?
|
||||
if [[ $check_result -ne 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
start_versity() {
|
||||
if [ -z "$VERSITYGW_TEST_ENV" ]; then
|
||||
if [ -r tests/.env ]; then
|
||||
source tests/.env
|
||||
else
|
||||
echo "Warning: no .env file found in tests folder"
|
||||
fi
|
||||
else
|
||||
# shellcheck source=./.env.default
|
||||
source "$VERSITYGW_TEST_ENV"
|
||||
fi
|
||||
if [ "$GITHUB_ACTIONS" != "true" ] && [ -r "$SECRETS_FILE" ]; then
|
||||
# shellcheck source=/.secrets
|
||||
source "$SECRETS_FILE"
|
||||
else
|
||||
echo "Warning: no secrets file found"
|
||||
fi
|
||||
|
||||
check_exe_params || check_result=$?
|
||||
if [[ $check_result -ne 0 ]]; then
|
||||
echo "error checking for parameters"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "$RUN_VERSITYGW" == "true" ]; then
|
||||
run_versity_app || run_result=$?
|
||||
if [[ $run_result -ne 0 ]]; then
|
||||
echo "error starting versity apps"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE AWS_ENDPOINT_URL VERSITY_EXE
|
||||
}
|
||||
source ./tests/iam.sh
|
||||
|
||||
start_versity_process() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "start versity process function requires number"
|
||||
log 2 "start versity process function requires number"
|
||||
return 1
|
||||
fi
|
||||
create_test_file_folder || create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
echo "error creating test log folder"
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test log folder"
|
||||
return 1
|
||||
fi
|
||||
base_command+=(">" "$test_file_folder/versity_log_$1.txt" "2>&1")
|
||||
("${base_command[@]}") &
|
||||
IFS=' ' read -r -a full_command <<< "${base_command[@]}"
|
||||
log 5 "versity command: ${full_command[*]}"
|
||||
if [ -n "$VERSITY_LOG_FILE" ]; then
|
||||
"${full_command[@]}" >> "$VERSITY_LOG_FILE" 2>&1 &
|
||||
else
|
||||
"${full_command[@]}" 2>&1 &
|
||||
fi
|
||||
# shellcheck disable=SC2181
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "error running versitygw command: $(cat "$test_file_folder/versity_log_$1.txt")"
|
||||
sleep 1
|
||||
if [ -n "$VERSITY_LOG_FILE" ]; then
|
||||
log 2 "error running versitygw command: $(cat "$VERSITY_LOG_FILE")"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
eval versitygw_pid_"$1"=$!
|
||||
process_info="Versity process $1, PID $!"
|
||||
echo "$process_info" >> "$VERSITY_LOG_FILE"
|
||||
log 4 "$process_info"
|
||||
local pid
|
||||
eval pid=\$versitygw_pid_"$1"
|
||||
sleep 1
|
||||
|
||||
local proc_check
|
||||
check_result=$(kill -0 $pid 2>&1) || proc_check=$?
|
||||
if [[ $proc_check -ne 0 ]]; then
|
||||
echo "versitygw failed to start: $check_result"
|
||||
echo "log data: $(cat "$test_file_folder/versity_log_$1.txt")"
|
||||
if ! check_result=$(kill -0 "$pid" 2>&1); then
|
||||
log 2 "versitygw failed to start: $check_result"
|
||||
if [ -n "$VERSITY_LOG_FILE" ]; then
|
||||
log 2 "log data: $(cat "$VERSITY_LOG_FILE")"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
export versitygw_pid_"$1"
|
||||
@@ -128,20 +47,26 @@ start_versity_process() {
|
||||
|
||||
run_versity_app_posix() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "run versity app w/posix command requires access ID, secret key, process number"
|
||||
log 2 "run versity app w/posix command requires access ID, secret key, process number"
|
||||
return 1
|
||||
fi
|
||||
base_command=("$VERSITY_EXE" --access="$1" --secret="$2" --region="$AWS_REGION" --iam-dir="$USERS_FOLDER")
|
||||
base_command=("$VERSITY_EXE" --access="$1" --secret="$2" --region="$AWS_REGION")
|
||||
if [ -n "$RUN_USERS" ]; then
|
||||
# shellcheck disable=SC2153
|
||||
IFS=' ' read -r -a iam_array <<< "$IAM_PARAMS"
|
||||
fi
|
||||
base_command+=("${iam_array[@]}")
|
||||
if [ -n "$CERT" ] && [ -n "$KEY" ]; then
|
||||
base_command+=(--cert "$CERT" --key "$KEY")
|
||||
fi
|
||||
if [ -n "$PORT" ]; then
|
||||
base_command+=(--port ":$PORT")
|
||||
fi
|
||||
base_command+=(posix "$LOCAL_FOLDER")
|
||||
export base_command
|
||||
|
||||
local versity_result
|
||||
start_versity_process "$3" || versity_result=$?
|
||||
if [[ $versity_result -ne 0 ]]; then
|
||||
echo "error starting versity process"
|
||||
if ! start_versity_process "$3"; then
|
||||
log 2 "error starting versity process"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
@@ -149,20 +74,23 @@ run_versity_app_posix() {
|
||||
|
||||
run_versity_app_s3() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "run versity app w/s3 command requires process number"
|
||||
log 2 "run versity app w/s3 command requires process number"
|
||||
return 1
|
||||
fi
|
||||
base_command=("$VERSITY_EXE" --port=":7071" --access="$AWS_ACCESS_KEY_ID" --secret="$AWS_SECRET_ACCESS_KEY")
|
||||
base_command=("$VERSITY_EXE" --access="$AWS_ACCESS_KEY_ID" --secret="$AWS_SECRET_ACCESS_KEY")
|
||||
if [ -n "$CERT" ] && [ -n "$KEY" ]; then
|
||||
base_command+=(--cert "$CERT" --key "$KEY")
|
||||
fi
|
||||
if [ -n "$PORT_TWO" ]; then
|
||||
base_command+=(--port ":$PORT_TWO")
|
||||
else
|
||||
base_command+=(--port ":7071")
|
||||
fi
|
||||
base_command+=(s3 --access="$AWS_ACCESS_KEY_ID_TWO" --secret="$AWS_SECRET_ACCESS_KEY_TWO" --region="$AWS_REGION" --endpoint=https://s3.amazonaws.com)
|
||||
export base_command
|
||||
|
||||
local versity_result
|
||||
start_versity_process "$1" || versity_result=$?
|
||||
if [[ $versity_result -ne 0 ]]; then
|
||||
echo "error starting versity process"
|
||||
if ! start_versity_process "$1"; then
|
||||
log 2 "error starting versity process"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
@@ -170,38 +98,45 @@ run_versity_app_s3() {
|
||||
|
||||
run_versity_app() {
|
||||
if [[ $BACKEND == 'posix' ]]; then
|
||||
run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1" || result_one=$?
|
||||
if [[ $result_one -ne 0 ]]; then
|
||||
echo "error starting versity app"
|
||||
if ! run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1"; then
|
||||
log 2 "error starting versity app"
|
||||
return 1
|
||||
fi
|
||||
elif [[ $BACKEND == 's3' ]]; then
|
||||
run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1" || result_one=$?
|
||||
if [[ $result_one -ne 0 ]]; then
|
||||
echo "error starting versity app"
|
||||
if ! run_versity_app_posix "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "1"; then
|
||||
log 2 "error starting versity app"
|
||||
return 1
|
||||
fi
|
||||
run_versity_app_s3 "2" || result_two=$?
|
||||
if [[ $result_two -ne 0 ]]; then
|
||||
echo "error starting second versity app"
|
||||
if ! run_versity_app_s3 "2"; then
|
||||
log 2 "error starting second versity app"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "unrecognized backend type $BACKEND"
|
||||
log 2 "unrecognized backend type $BACKEND"
|
||||
return 1
|
||||
fi
|
||||
if [[ $IAM_TYPE == "s3" ]]; then
|
||||
if ! bucket_exists "s3api" "$USERS_BUCKET"; then
|
||||
if ! create_bucket "s3api" "$USERS_BUCKET"; then
|
||||
log 2 "error creating IAM bucket"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
stop_single_process() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "stop single process function requires process ID"
|
||||
log 2 "stop single process function requires process ID"
|
||||
return 1
|
||||
fi
|
||||
if ps -p "$1" > /dev/null; then
|
||||
log 5 "stop process with ID: $1"
|
||||
# shellcheck disable=SC2086
|
||||
if ps_result=$(ps -p $1 2>&1) > /dev/null; then
|
||||
kill "$1"
|
||||
wait "$1" || true
|
||||
else
|
||||
echo "Process with PID $1 does not exist."
|
||||
log 3 "error stopping versity app: $ps_result"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -209,18 +144,14 @@ stop_versity() {
|
||||
if [ "$RUN_VERSITYGW" == "false" ]; then
|
||||
return
|
||||
fi
|
||||
local result_one
|
||||
local result_two
|
||||
# shellcheck disable=SC2154
|
||||
stop_single_process "$versitygw_pid_1" || result_one=$?
|
||||
if [[ $result_one -ne 0 ]]; then
|
||||
echo "error stopping versity process"
|
||||
if ! stop_single_process "$versitygw_pid_1"; then
|
||||
log 2 "error stopping versity process"
|
||||
fi
|
||||
if [[ $BACKEND == 's3' ]]; then
|
||||
# shellcheck disable=SC2154
|
||||
stop_single_process "$versitygw_pid_2" || result_two=$?
|
||||
if [[ $result_two -ne 0 ]]; then
|
||||
echo "error stopping versity process two"
|
||||
if ! stop_single_process "$versitygw_pid_2"; then
|
||||
log 2 "error stopping versity process two"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
Reference in New Issue
Block a user