mirror of
https://github.com/versity/versitygw.git
synced 2026-01-24 12:02:02 +00:00
Compare commits
110 Commits
v0.6
...
proxy-test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27a8aa66d9 | ||
|
|
dac3b39f7e | ||
|
|
f2c02c6362 | ||
|
|
911f7a7f0f | ||
|
|
32a5e12876 | ||
|
|
e269473523 | ||
|
|
4beb76faf1 | ||
|
|
3dd28857f3 | ||
|
|
c3a30dbf3b | ||
|
|
316f2dd068 | ||
|
|
4c51a13f55 | ||
|
|
d3f9186dda | ||
|
|
dcb2f6fce7 | ||
|
|
404eb7e630 | ||
|
|
4f8e4714ee | ||
|
|
feceb9784b | ||
|
|
920b4945cd | ||
|
|
1117879031 | ||
|
|
57c4c76142 | ||
|
|
3a60dcd88f | ||
|
|
f58646b58d | ||
|
|
641841f9d5 | ||
|
|
52674ab0c5 | ||
|
|
a3357ac7c6 | ||
|
|
b8140fe3ed | ||
|
|
0701631b03 | ||
|
|
d160243ee1 | ||
|
|
5e4b515906 | ||
|
|
ae0b270c2c | ||
|
|
3a18b4cc22 | ||
|
|
6e73cb8e4a | ||
|
|
23281774aa | ||
|
|
5ca44e7c2f | ||
|
|
1fb085a544 | ||
|
|
9d813def54 | ||
|
|
16a6aebf85 | ||
|
|
856d79d385 | ||
|
|
664e6e7814 | ||
|
|
6f1629b2bd | ||
|
|
39648c19d8 | ||
|
|
8f7a1bfc86 | ||
|
|
3056568742 | ||
|
|
94b207ba1c | ||
|
|
f0a8304a8b | ||
|
|
ae4e382e61 | ||
|
|
4661af11dd | ||
|
|
9cb357ecc5 | ||
|
|
dbcffb4984 | ||
|
|
4ecb9e36a6 | ||
|
|
e5e501b1d6 | ||
|
|
099ac39f22 | ||
|
|
4ba071dd47 | ||
|
|
5c48fcd443 | ||
|
|
311621259a | ||
|
|
a67a2e5c8f | ||
|
|
7eeaee8a54 | ||
|
|
4be5d64c8b | ||
|
|
d60b6a9b85 | ||
|
|
e0c09ad4d9 | ||
|
|
0aa2da7dd5 | ||
|
|
e392ac940a | ||
|
|
6104a750cd | ||
|
|
a77954a307 | ||
|
|
e46e4e941b | ||
|
|
c9653cff71 | ||
|
|
48798c9e39 | ||
|
|
42c4ad3b9e | ||
|
|
1874d3c329 | ||
|
|
9f0c9badba | ||
|
|
cb6b60324c | ||
|
|
c53707d4ae | ||
|
|
ee1ab5bdcc | ||
|
|
7a0c4423e4 | ||
|
|
8382911ab6 | ||
|
|
4e7615b4fd | ||
|
|
8951cce6d0 | ||
|
|
363c82971a | ||
|
|
cf1c44969b | ||
|
|
37b5429468 | ||
|
|
c9475adb04 | ||
|
|
b00819ff31 | ||
|
|
5ab38e3dab | ||
|
|
c04f6d7f00 | ||
|
|
6ac69b3198 | ||
|
|
c72686f7fa | ||
|
|
145c2dd4e3 | ||
|
|
f90562fea2 | ||
|
|
b0b22467cc | ||
|
|
b2247e20ee | ||
|
|
8017b0cff0 | ||
|
|
c1b105d928 | ||
|
|
57c7518864 | ||
|
|
5a94a70212 | ||
|
|
064230108f | ||
|
|
51680d445c | ||
|
|
732e92a72f | ||
|
|
36aea696c6 | ||
|
|
46c0762133 | ||
|
|
8d6b5c387f | ||
|
|
a241e6a7e6 | ||
|
|
c690b01a90 | ||
|
|
0d044c2303 | ||
|
|
42270fbe1c | ||
|
|
35fe6d8dee | ||
|
|
23b5eb30ed | ||
|
|
24309ae25a | ||
|
|
f74179d01c | ||
|
|
1959fac8a0 | ||
|
|
eb05f5a93e | ||
|
|
23c26d802c |
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
groups:
|
||||
dev-dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
@@ -45,6 +45,7 @@ changelog:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- '^Merge '
|
||||
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
|
||||
37
auth/acl.go
37
auth/acl.go
@@ -17,7 +17,6 @@ package auth
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
@@ -105,7 +104,6 @@ func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) ([]byte, er
|
||||
|
||||
if *input.GrantFullControl != "" {
|
||||
fullControlList = splitUnique(*input.GrantFullControl, ",")
|
||||
fmt.Println(fullControlList)
|
||||
for _, str := range fullControlList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "FULL_CONTROL"})
|
||||
}
|
||||
@@ -173,12 +171,16 @@ func CheckIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
|
||||
|
||||
for _, acc := range accs {
|
||||
_, err := iam.GetUserAccount(acc)
|
||||
if err != nil && err != ErrNoSuchUser {
|
||||
if err != nil {
|
||||
if err == ErrNoSuchUser {
|
||||
result = append(result, acc)
|
||||
continue
|
||||
}
|
||||
if err == ErrNotSupported {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
return nil, fmt.Errorf("check user account: %w", err)
|
||||
}
|
||||
if err == ErrNoSuchUser {
|
||||
result = append(result, acc)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -198,7 +200,7 @@ func splitUnique(s, divider string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func VerifyACL(acl ACL, bucket, access string, permission types.Permission, isRoot bool) error {
|
||||
func VerifyACL(acl ACL, access string, permission types.Permission, isRoot bool) error {
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
@@ -237,29 +239,14 @@ func VerifyACL(acl ACL, bucket, access string, permission types.Permission, isRo
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func IsAdmin(access string, isRoot bool) error {
|
||||
var data IAMConfig
|
||||
|
||||
func IsAdmin(acct Account, isRoot bool) error {
|
||||
if isRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := os.ReadFile("users.json")
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read config file: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(file, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc, ok := data.AccessAccounts[access]
|
||||
if !ok {
|
||||
return fmt.Errorf("user does not exist")
|
||||
}
|
||||
|
||||
if acc.Role == "admin" {
|
||||
if acct.Role == "admin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
57
auth/iam.go
57
auth/iam.go
@@ -16,23 +16,72 @@ package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Account is a gateway IAM account
|
||||
type Account struct {
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role string `json:"role"`
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role string `json:"role"`
|
||||
UserID int `json:"userID"`
|
||||
GroupID int `json:"groupID"`
|
||||
ProjectID int `json:"projectID"`
|
||||
}
|
||||
|
||||
// IAMService is the interface for all IAM service implementations
|
||||
//
|
||||
//go:generate moq -out ../s3api/controllers/iam_moq_test.go -pkg controllers . IAMService
|
||||
type IAMService interface {
|
||||
CreateAccount(access string, account Account) error
|
||||
CreateAccount(account Account) error
|
||||
GetUserAccount(access string) (Account, error)
|
||||
DeleteUserAccount(access string) error
|
||||
ListUserAccounts() ([]Account, error)
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
var ErrNoSuchUser = errors.New("user not found")
|
||||
|
||||
type Opts struct {
|
||||
Dir string
|
||||
LDAPServerURL string
|
||||
LDAPBindDN string
|
||||
LDAPPassword string
|
||||
LDAPQueryBase string
|
||||
LDAPObjClasses string
|
||||
LDAPAccessAtr string
|
||||
LDAPSecretAtr string
|
||||
LDAPRoleAtr string
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
var svc IAMService
|
||||
var err error
|
||||
|
||||
switch {
|
||||
case o.Dir != "":
|
||||
svc, err = NewInternal(o.Dir)
|
||||
case o.LDAPServerURL != "":
|
||||
svc, err = NewLDAPService(o.LDAPServerURL, o.LDAPBindDN, o.LDAPPassword,
|
||||
o.LDAPQueryBase, o.LDAPAccessAtr, o.LDAPSecretAtr, o.LDAPRoleAtr,
|
||||
o.LDAPObjClasses)
|
||||
default:
|
||||
// if no iam options selected, default to the single user mode
|
||||
return IAMServiceSingle{}, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.CacheDisable {
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
return NewCache(svc,
|
||||
time.Duration(o.CacheTTL)*time.Second,
|
||||
time.Duration(o.CachePrune)*time.Second), nil
|
||||
}
|
||||
|
||||
179
auth/iam_cache.go
Normal file
179
auth/iam_cache.go
Normal file
@@ -0,0 +1,179 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// IAMCache is an in memory cache of the IAM accounts
|
||||
// with expiration. This helps to alleviate the load on
|
||||
// the real IAM service if the gateway is handling
|
||||
// many requests. This forwards account updates to the
|
||||
// underlying service, and returns cached results while
|
||||
// the in memory account is not expired.
|
||||
type IAMCache struct {
|
||||
service IAMService
|
||||
iamcache *icache
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
var _ IAMService = &IAMCache{}
|
||||
|
||||
type item struct {
|
||||
value Account
|
||||
exp time.Time
|
||||
}
|
||||
|
||||
type icache struct {
|
||||
sync.RWMutex
|
||||
expire time.Duration
|
||||
items map[string]item
|
||||
}
|
||||
|
||||
func (i *icache) set(k string, v Account) {
|
||||
cpy := v
|
||||
i.Lock()
|
||||
i.items[k] = item{
|
||||
exp: time.Now().Add(i.expire),
|
||||
value: cpy,
|
||||
}
|
||||
i.Unlock()
|
||||
}
|
||||
|
||||
func (i *icache) get(k string) (Account, bool) {
|
||||
i.RLock()
|
||||
v, ok := i.items[k]
|
||||
i.RUnlock()
|
||||
if !ok || !v.exp.After(time.Now()) {
|
||||
return Account{}, false
|
||||
}
|
||||
return v.value, true
|
||||
}
|
||||
|
||||
func (i *icache) Delete(k string) {
|
||||
i.Lock()
|
||||
delete(i.items, k)
|
||||
i.Unlock()
|
||||
}
|
||||
|
||||
func (i *icache) gcCache(ctx context.Context, interval time.Duration) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
i.Lock()
|
||||
// prune expired entries
|
||||
for k, v := range i.items {
|
||||
if now.After(v.exp) {
|
||||
delete(i.items, k)
|
||||
}
|
||||
}
|
||||
i.Unlock()
|
||||
|
||||
// sleep for the clean interval or context cancelation,
|
||||
// whichever comes first
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(interval):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewCache initializes an IAM cache for the provided service. The expireTime
|
||||
// is the duration a cache entry can be valid, and the cleanupInterval is
|
||||
// how often to scan cache and cleanup expired entries.
|
||||
func NewCache(service IAMService, expireTime, cleanupInterval time.Duration) *IAMCache {
|
||||
i := &IAMCache{
|
||||
service: service,
|
||||
iamcache: &icache{
|
||||
items: make(map[string]item),
|
||||
expire: expireTime,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go i.iamcache.gcCache(ctx, cleanupInterval)
|
||||
i.cancel = cancel
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// CreateAccount send create to IAM service and creates an account cache entry
|
||||
func (c *IAMCache) CreateAccount(account Account) error {
|
||||
err := c.service.CreateAccount(account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we need a copy of account to be able to store beyond the
|
||||
// lifetime of the request, otherwise Fiber will reuse and corrupt
|
||||
// these entries
|
||||
acct := Account{
|
||||
Access: strings.Clone(account.Access),
|
||||
Secret: strings.Clone(account.Secret),
|
||||
Role: strings.Clone(account.Role),
|
||||
}
|
||||
|
||||
c.iamcache.set(acct.Access, acct)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetUserAccount retrieves the cache account if it is in the cache and not
|
||||
// expired. Otherwise retrieves from underlying IAM service and caches
|
||||
// result for the expire duration.
|
||||
func (c *IAMCache) GetUserAccount(access string) (Account, error) {
|
||||
acct, found := c.iamcache.get(access)
|
||||
if found {
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
a, err := c.service.GetUserAccount(access)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
c.iamcache.set(access, a)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// DeleteUserAccount deletes account from IAM service and cache
|
||||
func (c *IAMCache) DeleteUserAccount(access string) error {
|
||||
err := c.service.DeleteUserAccount(access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.iamcache.Delete(access)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListUserAccounts is a passthrough to the underlying service and
|
||||
// does not make use of the cache
|
||||
func (c *IAMCache) ListUserAccounts() ([]Account, error) {
|
||||
return c.service.ListUserAccounts()
|
||||
}
|
||||
|
||||
// Shutdown graceful termination of service
|
||||
func (c *IAMCache) Shutdown() error {
|
||||
c.cancel()
|
||||
return nil
|
||||
}
|
||||
@@ -16,47 +16,44 @@ package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"sync"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
iamFile = "users.json"
|
||||
iamBackupFile = "users.json.backup"
|
||||
)
|
||||
|
||||
// IAMServiceInternal manages the internal IAM service
|
||||
type IAMServiceInternal struct {
|
||||
storer Storer
|
||||
|
||||
mu sync.RWMutex
|
||||
accts IAMConfig
|
||||
serial uint32
|
||||
dir string
|
||||
}
|
||||
|
||||
// UpdateAcctFunc accepts the current data and returns the new data to be stored
|
||||
type UpdateAcctFunc func([]byte) ([]byte, error)
|
||||
|
||||
// Storer is the interface to manage the peristent IAM data for the internal
|
||||
// IAM service
|
||||
type Storer interface {
|
||||
InitIAM() error
|
||||
GetIAM() ([]byte, error)
|
||||
StoreIAM(UpdateAcctFunc) error
|
||||
}
|
||||
|
||||
// IAMConfig stores all internal IAM accounts
|
||||
type IAMConfig struct {
|
||||
// iAMConfig stores all internal IAM accounts
|
||||
type iAMConfig struct {
|
||||
AccessAccounts map[string]Account `json:"accessAccounts"`
|
||||
}
|
||||
|
||||
var _ IAMService = &IAMServiceInternal{}
|
||||
|
||||
// NewInternal creates a new instance for the Internal IAM service
|
||||
func NewInternal(s Storer) (*IAMServiceInternal, error) {
|
||||
func NewInternal(dir string) (*IAMServiceInternal, error) {
|
||||
i := &IAMServiceInternal{
|
||||
storer: s,
|
||||
dir: dir,
|
||||
}
|
||||
|
||||
err := i.updateCache()
|
||||
err := i.initIAM()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("refresh iam cache: %w", err)
|
||||
return nil, fmt.Errorf("init iam: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
@@ -64,32 +61,23 @@ func NewInternal(s Storer) (*IAMServiceInternal, error) {
|
||||
|
||||
// CreateAccount creates a new IAM account. Returns an error if the account
|
||||
// already exists.
|
||||
func (s *IAMServiceInternal) CreateAccount(access string, account Account) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.storer.StoreIAM(func(data []byte) ([]byte, error) {
|
||||
var conf IAMConfig
|
||||
|
||||
if len(data) > 0 {
|
||||
if err := json.Unmarshal(data, &conf); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse iam: %w", err)
|
||||
}
|
||||
} else {
|
||||
conf = IAMConfig{AccessAccounts: map[string]Account{}}
|
||||
func (s *IAMServiceInternal) CreateAccount(account Account) error {
|
||||
return s.storeIAM(func(data []byte) ([]byte, error) {
|
||||
conf, err := parseIAM(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get iam data: %w", err)
|
||||
}
|
||||
|
||||
_, ok := conf.AccessAccounts[access]
|
||||
_, ok := conf.AccessAccounts[account.Access]
|
||||
if ok {
|
||||
return nil, fmt.Errorf("account already exists")
|
||||
}
|
||||
conf.AccessAccounts[access] = account
|
||||
conf.AccessAccounts[account.Access] = account
|
||||
|
||||
b, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize iam: %w", err)
|
||||
}
|
||||
s.accts = conf
|
||||
|
||||
return b, nil
|
||||
})
|
||||
@@ -98,25 +86,12 @@ func (s *IAMServiceInternal) CreateAccount(access string, account Account) error
|
||||
// GetUserAccount retrieves account info for the requested user. Returns
|
||||
// ErrNoSuchUser if the account does not exist.
|
||||
func (s *IAMServiceInternal) GetUserAccount(access string) (Account, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
data, err := s.storer.GetIAM()
|
||||
conf, err := s.getIAM()
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("get iam data: %w", err)
|
||||
}
|
||||
|
||||
serial := crc32.ChecksumIEEE(data)
|
||||
if serial != s.serial {
|
||||
s.mu.RUnlock()
|
||||
err := s.updateCache()
|
||||
s.mu.RLock()
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("refresh iam cache: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
acct, ok := s.accts.AccessAccounts[access]
|
||||
acct, ok := conf.AccessAccounts[access]
|
||||
if !ok {
|
||||
return Account{}, ErrNoSuchUser
|
||||
}
|
||||
@@ -124,47 +99,13 @@ func (s *IAMServiceInternal) GetUserAccount(access string) (Account, error) {
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
// updateCache must be called with no locks held
|
||||
func (s *IAMServiceInternal) updateCache() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
data, err := s.storer.GetIAM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get iam data: %w", err)
|
||||
}
|
||||
|
||||
serial := crc32.ChecksumIEEE(data)
|
||||
|
||||
if len(data) > 0 {
|
||||
if err := json.Unmarshal(data, &s.accts); err != nil {
|
||||
return fmt.Errorf("failed to parse the config file: %w", err)
|
||||
}
|
||||
} else {
|
||||
s.accts.AccessAccounts = make(map[string]Account)
|
||||
}
|
||||
|
||||
s.serial = serial
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteUserAccount deletes the specified user account. Does not check if
|
||||
// account exists.
|
||||
func (s *IAMServiceInternal) DeleteUserAccount(access string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.storer.StoreIAM(func(data []byte) ([]byte, error) {
|
||||
if len(data) == 0 {
|
||||
// empty config, do nothing
|
||||
return data, nil
|
||||
}
|
||||
|
||||
var conf IAMConfig
|
||||
|
||||
if err := json.Unmarshal(data, &conf); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse iam: %w", err)
|
||||
return s.storeIAM(func(data []byte) ([]byte, error) {
|
||||
conf, err := parseIAM(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get iam data: %w", err)
|
||||
}
|
||||
|
||||
delete(conf.AccessAccounts, access)
|
||||
@@ -174,39 +115,221 @@ func (s *IAMServiceInternal) DeleteUserAccount(access string) error {
|
||||
return nil, fmt.Errorf("failed to serialize iam: %w", err)
|
||||
}
|
||||
|
||||
s.accts = conf
|
||||
|
||||
return b, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ListUserAccounts lists all the user accounts stored.
|
||||
func (s *IAMServiceInternal) ListUserAccounts() (accs []Account, err error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
data, err := s.storer.GetIAM()
|
||||
func (s *IAMServiceInternal) ListUserAccounts() ([]Account, error) {
|
||||
conf, err := s.getIAM()
|
||||
if err != nil {
|
||||
return []Account{}, fmt.Errorf("get iam data: %w", err)
|
||||
}
|
||||
|
||||
serial := crc32.ChecksumIEEE(data)
|
||||
if serial != s.serial {
|
||||
s.mu.RUnlock()
|
||||
err := s.updateCache()
|
||||
s.mu.RLock()
|
||||
if err != nil {
|
||||
return []Account{}, fmt.Errorf("refresh iam cache: %w", err)
|
||||
}
|
||||
keys := make([]string, 0, len(conf.AccessAccounts))
|
||||
for k := range conf.AccessAccounts {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for access, usr := range s.accts.AccessAccounts {
|
||||
var accs []Account
|
||||
for _, k := range keys {
|
||||
accs = append(accs, Account{
|
||||
Access: access,
|
||||
Secret: usr.Secret,
|
||||
Role: usr.Role,
|
||||
Access: k,
|
||||
Secret: conf.AccessAccounts[k].Secret,
|
||||
Role: conf.AccessAccounts[k].Role,
|
||||
UserID: conf.AccessAccounts[k].UserID,
|
||||
GroupID: conf.AccessAccounts[k].GroupID,
|
||||
ProjectID: conf.AccessAccounts[k].ProjectID,
|
||||
})
|
||||
}
|
||||
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
// Shutdown graceful termination of service
|
||||
func (s *IAMServiceInternal) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
iamMode = 0600
|
||||
)
|
||||
|
||||
func (s *IAMServiceInternal) initIAM() error {
|
||||
fname := filepath.Join(s.dir, iamFile)
|
||||
|
||||
_, err := os.ReadFile(fname)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
b, err := json.Marshal(iAMConfig{AccessAccounts: map[string]Account{}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal default iam: %w", err)
|
||||
}
|
||||
err = os.WriteFile(fname, b, iamMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write default iam: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceInternal) getIAM() (iAMConfig, error) {
|
||||
b, err := s.readIAMData()
|
||||
if err != nil {
|
||||
return iAMConfig{}, err
|
||||
}
|
||||
|
||||
return parseIAM(b)
|
||||
}
|
||||
|
||||
func parseIAM(b []byte) (iAMConfig, error) {
|
||||
var conf iAMConfig
|
||||
if err := json.Unmarshal(b, &conf); err != nil {
|
||||
return iAMConfig{}, fmt.Errorf("failed to parse the config file: %w", err)
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
const (
|
||||
backoff = 100 * time.Millisecond
|
||||
maxretry = 300
|
||||
)
|
||||
|
||||
func (s *IAMServiceInternal) readIAMData() ([]byte, error) {
|
||||
// We are going to be racing with other running gateways without any
|
||||
// coordination. So we might find the file does not exist at times.
|
||||
// For this case we need to retry for a while assuming the other gateway
|
||||
// will eventually write the file. If it doesn't after the max retries,
|
||||
// then we will return the error.
|
||||
|
||||
retries := 0
|
||||
|
||||
for {
|
||||
b, err := os.ReadFile(filepath.Join(s.dir, iamFile))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
retries++
|
||||
if retries < maxretry {
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IAMServiceInternal) storeIAM(update UpdateAcctFunc) error {
|
||||
// We are going to be racing with other running gateways without any
|
||||
// coordination. So the strategy here is to read the current file data.
|
||||
// If the file doesn't exist, then we assume someone else is currently
|
||||
// updating the file. So we just need to keep retrying. We also need
|
||||
// to make sure the data is consistent within a single update. So racing
|
||||
// writes to a file would possibly leave this in some invalid state.
|
||||
// We can get atomic updates with rename. If we read the data, update
|
||||
// the data, write to a temp file, then rename the tempfile back to the
|
||||
// data file. This should always result in a complete data image.
|
||||
|
||||
// There is at least one unsolved failure mode here.
|
||||
// If a gateway removes the data file and then crashes, all other
|
||||
// gateways will retry forever thinking that the original will eventually
|
||||
// write the file.
|
||||
|
||||
retries := 0
|
||||
fname := filepath.Join(s.dir, iamFile)
|
||||
|
||||
for {
|
||||
b, err := os.ReadFile(fname)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
retries++
|
||||
if retries < maxretry {
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
|
||||
// we have been unsuccessful trying to read the iam file
|
||||
// so this must be the case where something happened and
|
||||
// the file did not get updated successfully, and probably
|
||||
// isn't going to be. The recovery procedure would be to
|
||||
// copy the backup file into place of the original.
|
||||
return fmt.Errorf("no iam file, needs backup recovery")
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
|
||||
// reset retries on successful read
|
||||
retries = 0
|
||||
|
||||
err = os.Remove(iamFile)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove old iam file: %w", err)
|
||||
}
|
||||
|
||||
// save copy of data
|
||||
datacopy := make([]byte, len(b))
|
||||
copy(datacopy, b)
|
||||
|
||||
// make a backup copy in case we crash before update
|
||||
// this is after remove, so there is a small window something
|
||||
// can go wrong, but the remove should barrier other gateways
|
||||
// from trying to write backup at the same time. Only one
|
||||
// gateway will successfully remove the file.
|
||||
os.WriteFile(filepath.Join(s.dir, iamBackupFile), b, iamMode)
|
||||
|
||||
b, err = update(b)
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(fname, datacopy, iamMode)
|
||||
return fmt.Errorf("update iam data: %w", err)
|
||||
}
|
||||
|
||||
err = s.writeTempFile(b)
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(fname, datacopy, iamMode)
|
||||
return err
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IAMServiceInternal) writeTempFile(b []byte) error {
|
||||
fname := filepath.Join(s.dir, iamFile)
|
||||
|
||||
f, err := os.CreateTemp(s.dir, iamFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create temp file: %w", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
_, err = f.Write(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write temp file: %w", err)
|
||||
}
|
||||
|
||||
err = os.Rename(f.Name(), fname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename temp file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
133
auth/iam_ldap.go
Normal file
133
auth/iam_ldap.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-ldap/ldap/v3"
|
||||
)
|
||||
|
||||
type LdapIAMService struct {
|
||||
conn *ldap.Conn
|
||||
queryBase string
|
||||
objClasses []string
|
||||
accessAtr string
|
||||
secretAtr string
|
||||
roleAtr string
|
||||
}
|
||||
|
||||
var _ IAMService = &LdapIAMService{}
|
||||
|
||||
func NewLDAPService(url, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, objClasses string) (IAMService, error) {
|
||||
if url == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" || secAtr == "" || roleAtr == "" || objClasses == "" {
|
||||
return nil, fmt.Errorf("required parameters list not fully provided")
|
||||
}
|
||||
conn, err := ldap.Dial("tcp", url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to LDAP server: %w", err)
|
||||
}
|
||||
|
||||
err = conn.Bind(bindDN, pass)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to bind to LDAP server %w", err)
|
||||
}
|
||||
return &LdapIAMService{
|
||||
conn: conn,
|
||||
queryBase: queryBase,
|
||||
objClasses: strings.Split(objClasses, ","),
|
||||
accessAtr: accAtr,
|
||||
secretAtr: secAtr,
|
||||
roleAtr: roleAtr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) CreateAccount(account Account) error {
|
||||
userEntry := ldap.NewAddRequest(fmt.Sprintf("%v=%v, %v", ld.accessAtr, account.Access, ld.queryBase), nil)
|
||||
userEntry.Attribute("objectClass", ld.objClasses)
|
||||
userEntry.Attribute(ld.accessAtr, []string{account.Access})
|
||||
userEntry.Attribute(ld.secretAtr, []string{account.Secret})
|
||||
userEntry.Attribute(ld.roleAtr, []string{account.Role})
|
||||
|
||||
err := ld.conn.Add(userEntry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding an entry: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
ld.queryBase,
|
||||
ldap.ScopeWholeSubtree,
|
||||
ldap.NeverDerefAliases,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
fmt.Sprintf("(%v=%v)", ld.accessAtr, access),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
result, err := ld.conn.Search(searchRequest)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
entry := result.Entries[0]
|
||||
return Account{
|
||||
Access: entry.GetAttributeValue(ld.accessAtr),
|
||||
Secret: entry.GetAttributeValue(ld.secretAtr),
|
||||
Role: entry.GetAttributeValue(ld.roleAtr),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) DeleteUserAccount(access string) error {
|
||||
delReq := ldap.NewDelRequest(fmt.Sprintf("%v=%v, %v", ld.accessAtr, access, ld.queryBase), nil)
|
||||
|
||||
err := ld.conn.Del(delReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
|
||||
searchFilter := ""
|
||||
for _, el := range ld.objClasses {
|
||||
searchFilter += fmt.Sprintf("(objectClass=%v)", el)
|
||||
}
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
ld.queryBase,
|
||||
ldap.ScopeWholeSubtree,
|
||||
ldap.NeverDerefAliases,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
fmt.Sprintf("(&%v)", searchFilter),
|
||||
[]string{ld.accessAtr, ld.secretAtr, ld.roleAtr},
|
||||
nil,
|
||||
)
|
||||
|
||||
resp, err := ld.conn.Search(searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := []Account{}
|
||||
for _, el := range resp.Entries {
|
||||
result = append(result, Account{
|
||||
Access: el.GetAttributeValue(ld.accessAtr),
|
||||
Secret: el.GetAttributeValue(ld.secretAtr),
|
||||
Role: el.GetAttributeValue(ld.roleAtr),
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Shutdown graceful termination of service
|
||||
func (ld *LdapIAMService) Shutdown() error {
|
||||
return ld.conn.Close()
|
||||
}
|
||||
51
auth/iam_single.go
Normal file
51
auth/iam_single.go
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// IAMServiceSingle manages the single tenant (root-only) IAM service
|
||||
type IAMServiceSingle struct{}
|
||||
|
||||
var _ IAMService = &IAMServiceSingle{}
|
||||
|
||||
var ErrNotSupported = errors.New("method is not supported")
|
||||
|
||||
// CreateAccount not valid in single tenant mode
|
||||
func (IAMServiceSingle) CreateAccount(account Account) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
// GetUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, ErrNotSupported
|
||||
}
|
||||
|
||||
// DeleteUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) DeleteUserAccount(access string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
// ListUserAccounts no accounts in single tenant mode
|
||||
func (IAMServiceSingle) ListUserAccounts() ([]Account, error) {
|
||||
return []Account{}, nil
|
||||
}
|
||||
|
||||
// Shutdown graceful termination of service
|
||||
func (IAMServiceSingle) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
@@ -64,12 +64,13 @@ type Backend interface {
|
||||
SelectObjectContent(context.Context, *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error)
|
||||
|
||||
// object tags operations
|
||||
GetTags(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
SetTags(_ context.Context, bucket, object string, tags map[string]string) error
|
||||
RemoveTags(_ context.Context, bucket, object string) error
|
||||
GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error
|
||||
DeleteObjectTagging(_ context.Context, bucket, object string) error
|
||||
|
||||
// non AWS actions
|
||||
ChangeBucketOwner(_ context.Context, bucket, newOwner string) error
|
||||
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
|
||||
}
|
||||
|
||||
type BackendUnsupported struct{}
|
||||
@@ -165,16 +166,19 @@ func (BackendUnsupported) SelectObjectContent(context.Context, *s3.SelectObjectC
|
||||
return s3response.SelectObjectContentResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetTags(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (BackendUnsupported) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) SetTags(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (BackendUnsupported) PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) RemoveTags(_ context.Context, bucket, object string) error {
|
||||
func (BackendUnsupported) DeleteObjectTagging(_ context.Context, bucket, object string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, newOwner string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error) {
|
||||
return []s3response.Bucket{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
@@ -61,9 +61,9 @@ var (
|
||||
|
||||
// ParseRange parses input range header and returns startoffset, length, and
|
||||
// error. If no endoffset specified, then length is set to -1.
|
||||
func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
func ParseRange(fi fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, file.Size(), nil
|
||||
return 0, fi.Size(), nil
|
||||
}
|
||||
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
|
||||
@@ -29,9 +29,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
@@ -48,19 +46,10 @@ type Posix struct {
|
||||
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
mu sync.RWMutex
|
||||
iamcache []byte
|
||||
iamvalid bool
|
||||
iamexpire time.Time
|
||||
}
|
||||
|
||||
var _ backend.Backend = &Posix{}
|
||||
|
||||
var (
|
||||
cacheDuration = 5 * time.Minute
|
||||
)
|
||||
|
||||
const (
|
||||
metaTmpDir = ".sgwtmp"
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
@@ -70,8 +59,6 @@ const (
|
||||
contentTypeHdr = "content-type"
|
||||
contentEncHdr = "content-encoding"
|
||||
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
iamFile = "users.json"
|
||||
iamBackupFile = "users.json.backup"
|
||||
aclkey = "user.acl"
|
||||
etagkey = "user.etag"
|
||||
)
|
||||
@@ -236,6 +223,12 @@ func (p *Posix) CreateMultipartUpload(_ context.Context, mpu *s3.CreateMultipart
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(*mpu.Key, "/") {
|
||||
// directory objects can't be uploaded with mutlipart uploads
|
||||
// because posix directories can't contain data
|
||||
return nil, s3err.GetAPIError(s3err.ErrDirectoryObjectContainsData)
|
||||
}
|
||||
|
||||
// generate random uuid for upload id
|
||||
uploadID := uuid.New().String()
|
||||
// hash object name for multipart container
|
||||
@@ -413,7 +406,6 @@ func loadUserMetaData(path string, m map[string]string) (contentType, contentEnc
|
||||
if err != nil || len(ents) == 0 {
|
||||
return
|
||||
}
|
||||
fmt.Println(ents)
|
||||
for _, e := range ents {
|
||||
if !isValidMeta(e) {
|
||||
continue
|
||||
@@ -426,7 +418,6 @@ func loadUserMetaData(path string, m map[string]string) (contentType, contentEnc
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fmt.Println(b)
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("user.%v.", metaHdr))] = string(b)
|
||||
}
|
||||
|
||||
@@ -451,6 +442,20 @@ func loadUserMetaData(path string, m map[string]string) (contentType, contentEnc
|
||||
return
|
||||
}
|
||||
|
||||
func compareUserMetadata(meta1, meta2 map[string]string) bool {
|
||||
if len(meta1) != len(meta2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, val := range meta1 {
|
||||
if meta2[key] != val {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isValidMeta(val string) bool {
|
||||
if strings.HasPrefix(val, "user."+metaHdr) {
|
||||
return true
|
||||
@@ -884,7 +889,7 @@ func (p *Posix) UploadPartCopy(_ context.Context, upi *s3.UploadPartCopyInput) (
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size()+1 {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
f, err := openTmpFile(filepath.Join(*upi.Bucket, objdir),
|
||||
@@ -950,6 +955,9 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
if len(p) != 2 {
|
||||
return "", s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
if len(p[0]) > 128 || len(p[1]) > 256 {
|
||||
return "", s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
tags[p[0]] = p[1]
|
||||
}
|
||||
}
|
||||
@@ -958,6 +966,13 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
|
||||
if strings.HasSuffix(*po.Key, "/") {
|
||||
// object is directory
|
||||
if po.ContentLength != 0 {
|
||||
// posix directories can't contain data, send error
|
||||
// if reuests has a data payload associated with a
|
||||
// directory object
|
||||
return "", s3err.GetAPIError(s3err.ErrDirectoryObjectContainsData)
|
||||
}
|
||||
|
||||
err = mkdirAll(name, os.FileMode(0755), *po.Bucket, *po.Key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -974,6 +989,11 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
|
||||
// object is file
|
||||
d, err := os.Stat(name)
|
||||
if err == nil && d.IsDir() {
|
||||
return "", s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
|
||||
}
|
||||
|
||||
f, err := openTmpFile(filepath.Join(*po.Bucket, metaTmpDir),
|
||||
*po.Bucket, *po.Key, po.ContentLength)
|
||||
if err != nil {
|
||||
@@ -1005,7 +1025,7 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
|
||||
if tagsStr != "" {
|
||||
err := p.SetTags(ctx, *po.Bucket, *po.Key, tags)
|
||||
err := p.PutObjectTagging(ctx, *po.Bucket, *po.Key, tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1112,10 +1132,6 @@ func (p *Posix) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput)
|
||||
|
||||
func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
acceptRange := *input.Range
|
||||
var contentRange string
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1124,6 +1140,7 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
object := *input.Key
|
||||
objPath := filepath.Join(bucket, object)
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -1133,21 +1150,59 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
acceptRange := *input.Range
|
||||
startOffset, length, err := backend.ParseRange(fi, acceptRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if length == -1 {
|
||||
length = fi.Size() - startOffset + 1
|
||||
objSize := fi.Size()
|
||||
if fi.IsDir() {
|
||||
// directory objects are always 0 len
|
||||
objSize = 0
|
||||
length = 0
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size()+1 {
|
||||
if length == -1 {
|
||||
length = objSize - startOffset + 1
|
||||
}
|
||||
|
||||
if startOffset+length > objSize+1 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
var contentRange string
|
||||
if acceptRange != "" {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, fi.Size())
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, objSize)
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
tags, err := p.getXattrTags(bucket, object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
ContentLength: length,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentType: &contentType,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: int32(len(tags)),
|
||||
ContentRange: &contentRange,
|
||||
}, nil
|
||||
}
|
||||
|
||||
f, err := os.Open(objPath)
|
||||
@@ -1168,7 +1223,6 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
fmt.Println(userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
etag := string(b)
|
||||
@@ -1241,6 +1295,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
}
|
||||
dstBucket := *input.Bucket
|
||||
dstObject := *input.Key
|
||||
owner := *input.ExpectedBucketOwner
|
||||
|
||||
_, err := os.Stat(srcBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -1258,6 +1313,22 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
dstBucketACLBytes, err := xattr.Get(dstBucket, aclkey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get dst bucket acl tag: %w", err)
|
||||
}
|
||||
|
||||
var dstBucketACL auth.ACL
|
||||
err = json.Unmarshal(dstBucketACLBytes, &dstBucketACL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse dst bucket acl: %w", err)
|
||||
}
|
||||
|
||||
err = auth.VerifyACL(dstBucketACL, owner, types.PermissionWrite, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objPath := filepath.Join(srcBucket, srcObject)
|
||||
f, err := os.Open(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -1273,12 +1344,29 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
etag, err := p.PutObject(ctx, &s3.PutObjectInput{Bucket: &dstBucket, Key: &dstObject, Body: f, ContentLength: fInfo.Size()})
|
||||
meta := make(map[string]string)
|
||||
loadUserMetaData(objPath, meta)
|
||||
|
||||
dstObjdPath := filepath.Join(dstBucket, dstObject)
|
||||
if dstObjdPath == objPath {
|
||||
if compareUserMetadata(meta, input.Metadata) {
|
||||
return &s3.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
} else {
|
||||
for key := range meta {
|
||||
xattr.Remove(dstObjdPath, key)
|
||||
}
|
||||
for k, v := range input.Metadata {
|
||||
xattr.Set(dstObjdPath, fmt.Sprintf("user.%v.%v", metaHdr, k), []byte(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etag, err := p.PutObject(ctx, &s3.PutObjectInput{Bucket: &dstBucket, Key: &dstObject, Body: f, ContentLength: fInfo.Size(), Metadata: meta})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Join(dstBucket, dstObject))
|
||||
fi, err := os.Stat(dstObjdPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat dst object: %w", err)
|
||||
}
|
||||
@@ -1458,7 +1546,7 @@ func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetTags(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
func (p *Posix) GetObjectTagging(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1491,7 +1579,7 @@ func (p *Posix) getXattrTags(bucket, object string) (map[string]string, error) {
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (p *Posix) SetTags(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
func (p *Posix) PutObjectTagging(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1527,8 +1615,8 @@ func (p *Posix) SetTags(_ context.Context, bucket, object string, tags map[strin
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) RemoveTags(ctx context.Context, bucket, object string) error {
|
||||
return p.SetTags(ctx, bucket, object, nil)
|
||||
func (p *Posix) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
return p.PutObjectTagging(ctx, bucket, object, nil)
|
||||
}
|
||||
|
||||
func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
@@ -1566,196 +1654,44 @@ func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket, newOwner string)
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
iamMode = 0600
|
||||
)
|
||||
|
||||
func (p *Posix) InitIAM() error {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
|
||||
_, err := os.ReadFile(iamFile)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
b, err := json.Marshal(auth.IAMConfig{AccessAccounts: map[string]auth.Account{}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal default iam: %w", err)
|
||||
}
|
||||
err = os.WriteFile(iamFile, b, iamMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write default iam: %w", err)
|
||||
}
|
||||
func (p *Posix) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.Bucket, err error) {
|
||||
entries, err := os.ReadDir(".")
|
||||
if err != nil {
|
||||
return buckets, fmt.Errorf("readdir buckets: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetIAM() ([]byte, error) {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
|
||||
if !p.iamvalid || !p.iamexpire.After(time.Now()) {
|
||||
p.mu.RUnlock()
|
||||
err := p.refreshIAM()
|
||||
p.mu.RLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return p.iamcache, nil
|
||||
}
|
||||
|
||||
const (
|
||||
backoff = 100 * time.Millisecond
|
||||
maxretry = 300
|
||||
)
|
||||
|
||||
func (p *Posix) refreshIAM() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// We are going to be racing with other running gateways without any
|
||||
// coordination. So we might find the file does not exist at times.
|
||||
// For this case we need to retry for a while assuming the other gateway
|
||||
// will eventually write the file. If it doesn't after the max retries,
|
||||
// then we will return the error.
|
||||
|
||||
retries := 0
|
||||
|
||||
for {
|
||||
b, err := os.ReadFile(iamFile)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
retries++
|
||||
if retries < maxretry {
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.iamcache = b
|
||||
p.iamvalid = true
|
||||
p.iamexpire = time.Now().Add(cacheDuration)
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) StoreIAM(update auth.UpdateAcctFunc) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// We are going to be racing with other running gateways without any
|
||||
// coordination. So the strategy here is to read the current file data.
|
||||
// If the file doesn't exist, then we assume someone else is currently
|
||||
// updating the file. So we just need to keep retrying. We also need
|
||||
// to make sure the data is consistent within a single update. So racing
|
||||
// writes to a file would possibly leave this in some invalid state.
|
||||
// We can get atomic updates with rename. If we read the data, update
|
||||
// the data, write to a temp file, then rename the tempfile back to the
|
||||
// data file. This should always result in a complete data image.
|
||||
|
||||
// There is at least one unsolved failure mode here.
|
||||
// If a gateway removes the data file and then crashes, all other
|
||||
// gateways will retry forever thinking that the original will eventually
|
||||
// write the file.
|
||||
|
||||
retries := 0
|
||||
|
||||
for {
|
||||
b, err := os.ReadFile(iamFile)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
retries++
|
||||
if retries < maxretry {
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
|
||||
// we have been unsuccessful trying to read the iam file
|
||||
// so this must be the case where something happened and
|
||||
// the file did not get updated successfully, and probably
|
||||
// isn't going to be. The recovery procedure would be to
|
||||
// copy the backup file into place of the original.
|
||||
return fmt.Errorf("no iam file, needs backup recovery")
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("read iam file: %w", err)
|
||||
}
|
||||
|
||||
// reset retries on successful read
|
||||
retries = 0
|
||||
|
||||
err = os.Remove(iamFile)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// racing with someone else updating
|
||||
// keep retrying after backoff
|
||||
time.Sleep(backoff)
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove old iam file: %w", err)
|
||||
}
|
||||
|
||||
// save copy of data
|
||||
datacopy := make([]byte, len(b))
|
||||
copy(datacopy, b)
|
||||
|
||||
// make a backup copy in case we crash before update
|
||||
// this is after remove, so there is a small window something
|
||||
// can go wrong, but the remove should barrier other gateways
|
||||
// from trying to write backup at the same time. Only one
|
||||
// gateway will successfully remove the file.
|
||||
os.WriteFile(iamBackupFile, b, iamMode)
|
||||
|
||||
b, err = update(b)
|
||||
fi, err := entry.Info()
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(iamFile, datacopy, iamMode)
|
||||
return fmt.Errorf("update iam data: %w", err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = writeTempFile(b)
|
||||
aclTag, err := xattr.Get(entry.Name(), aclkey)
|
||||
if err != nil {
|
||||
// update failed, try to write old data back out
|
||||
os.WriteFile(iamFile, datacopy, iamMode)
|
||||
return err
|
||||
return buckets, fmt.Errorf("get acl tag: %w", err)
|
||||
}
|
||||
|
||||
p.iamcache = b
|
||||
p.iamvalid = true
|
||||
p.iamexpire = time.Now().Add(cacheDuration)
|
||||
break
|
||||
var acl auth.ACL
|
||||
err = json.Unmarshal(aclTag, &acl)
|
||||
if err != nil {
|
||||
return buckets, fmt.Errorf("parse acl tag: %w", err)
|
||||
}
|
||||
|
||||
buckets = append(buckets, s3response.Bucket{
|
||||
Name: fi.Name(),
|
||||
Owner: acl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
sort.SliceStable(buckets, func(i, j int) bool {
|
||||
return buckets[i].Name < buckets[j].Name
|
||||
})
|
||||
|
||||
func writeTempFile(b []byte) error {
|
||||
f, err := os.CreateTemp(".", iamFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create temp file: %w", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
_, err = f.Write(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write temp file: %w", err)
|
||||
}
|
||||
|
||||
err = os.Rename(f.Name(), iamFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename temp file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
|
||||
86
backend/s3proxy/client.go
Normal file
86
backend/s3proxy/client.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
"github.com/versity/versitygw/auth"
|
||||
)
|
||||
|
||||
func (s *S3be) getClientFromCtx(ctx context.Context) (*s3.Client, error) {
|
||||
acct, ok := ctx.Value("account").(auth.Account)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid account in context")
|
||||
}
|
||||
|
||||
cfg, err := s.getConfig(ctx, acct.Access, acct.Secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s3.NewFromConfig(cfg), nil
|
||||
}
|
||||
|
||||
func (s *S3be) getConfig(ctx context.Context, access, secret string) (aws.Config, error) {
|
||||
creds := credentials.NewStaticCredentialsProvider(access, secret, "")
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: s.sslSkipVerify},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
|
||||
opts := []func(*config.LoadOptions) error{
|
||||
config.WithRegion(s.awsRegion),
|
||||
config.WithCredentialsProvider(creds),
|
||||
config.WithHTTPClient(client),
|
||||
}
|
||||
|
||||
if s.endpoint != "" {
|
||||
opts = append(opts,
|
||||
config.WithEndpointResolverWithOptions(s))
|
||||
}
|
||||
|
||||
if s.disableChecksum {
|
||||
opts = append(opts,
|
||||
config.WithAPIOptions([]func(*middleware.Stack) error{v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware}))
|
||||
}
|
||||
|
||||
if s.debug {
|
||||
opts = append(opts,
|
||||
config.WithClientLogMode(aws.LogSigning|aws.LogRetries|aws.LogRequest|aws.LogResponse|aws.LogRequestEventMessage|aws.LogResponseEventMessage))
|
||||
}
|
||||
|
||||
return config.LoadDefaultConfig(ctx, opts...)
|
||||
}
|
||||
|
||||
// ResolveEndpoint is used for on prem or non-aws endpoints
|
||||
func (s *S3be) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: s.endpoint,
|
||||
SigningRegion: s.awsRegion,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
}
|
||||
500
backend/s3proxy/s3.go
Normal file
500
backend/s3proxy/s3.go
Normal file
@@ -0,0 +1,500 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type S3be struct {
|
||||
backend.BackendUnsupported
|
||||
|
||||
endpoint string
|
||||
awsRegion string
|
||||
disableChecksum bool
|
||||
sslSkipVerify bool
|
||||
debug bool
|
||||
}
|
||||
|
||||
func New(endpoint, region string, disableChecksum, sslSkipVerify, debug bool) *S3be {
|
||||
return &S3be{
|
||||
endpoint: endpoint,
|
||||
awsRegion: region,
|
||||
disableChecksum: disableChecksum,
|
||||
sslSkipVerify: sslSkipVerify,
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S3be) ListBuckets(ctx context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.ListBuckets(ctx, &s3.ListBucketsInput{})
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{}, err
|
||||
}
|
||||
|
||||
var buckets []s3response.ListAllMyBucketsEntry
|
||||
for _, b := range output.Buckets {
|
||||
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
|
||||
Name: *b.Name,
|
||||
CreationDate: *b.CreationDate,
|
||||
})
|
||||
}
|
||||
|
||||
return s3response.ListAllMyBucketsResult{
|
||||
Owner: s3response.CanonicalUser{
|
||||
ID: *output.Owner.ID,
|
||||
DisplayName: *output.Owner.DisplayName,
|
||||
},
|
||||
Buckets: s3response.ListAllMyBucketsList{
|
||||
Bucket: buckets,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3be) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.HeadBucket(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) CreateBucket(ctx context.Context, input *s3.CreateBucketInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.CreateBucket(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) DeleteBucket(ctx context.Context, input *s3.DeleteBucketInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.DeleteBucket(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.CreateMultipartUpload(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.CompleteMultipartUpload(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.AbortMultipartUpload(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
)
|
||||
|
||||
func (s *S3be) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.ListMultipartUploads(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.ListMultipartUploadsResult{}, err
|
||||
}
|
||||
|
||||
var uploads []s3response.Upload
|
||||
for _, u := range output.Uploads {
|
||||
uploads = append(uploads, s3response.Upload{
|
||||
Key: *u.Key,
|
||||
UploadID: *u.UploadId,
|
||||
Initiator: s3response.Initiator{
|
||||
ID: *u.Initiator.ID,
|
||||
DisplayName: *u.Initiator.DisplayName,
|
||||
},
|
||||
Owner: s3response.Owner{
|
||||
ID: *u.Owner.ID,
|
||||
DisplayName: *u.Owner.DisplayName,
|
||||
},
|
||||
StorageClass: string(u.StorageClass),
|
||||
Initiated: u.Initiated.Format(iso8601Format),
|
||||
})
|
||||
}
|
||||
|
||||
var cps []s3response.CommonPrefix
|
||||
for _, c := range output.CommonPrefixes {
|
||||
cps = append(cps, s3response.CommonPrefix{
|
||||
Prefix: *c.Prefix,
|
||||
})
|
||||
}
|
||||
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: *output.Bucket,
|
||||
KeyMarker: *output.KeyMarker,
|
||||
UploadIDMarker: *output.UploadIdMarker,
|
||||
NextKeyMarker: *output.NextKeyMarker,
|
||||
NextUploadIDMarker: *output.NextUploadIdMarker,
|
||||
Delimiter: *output.Delimiter,
|
||||
Prefix: *output.Prefix,
|
||||
EncodingType: string(output.EncodingType),
|
||||
MaxUploads: int(output.MaxUploads),
|
||||
IsTruncated: output.IsTruncated,
|
||||
Uploads: uploads,
|
||||
CommonPrefixes: cps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3be) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.ListParts(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{}, err
|
||||
}
|
||||
|
||||
var parts []s3response.Part
|
||||
for _, p := range output.Parts {
|
||||
parts = append(parts, s3response.Part{
|
||||
PartNumber: int(p.PartNumber),
|
||||
LastModified: p.LastModified.Format(iso8601Format),
|
||||
ETag: *p.ETag,
|
||||
Size: p.Size,
|
||||
})
|
||||
}
|
||||
pnm, err := strconv.Atoi(*output.PartNumberMarker)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{},
|
||||
fmt.Errorf("parse part number marker: %w", err)
|
||||
}
|
||||
|
||||
npmn, err := strconv.Atoi(*output.NextPartNumberMarker)
|
||||
if err != nil {
|
||||
return s3response.ListPartsResult{},
|
||||
fmt.Errorf("parse next part number marker: %w", err)
|
||||
}
|
||||
|
||||
return s3response.ListPartsResult{
|
||||
Bucket: *output.Bucket,
|
||||
Key: *output.Key,
|
||||
UploadID: *output.UploadId,
|
||||
Initiator: s3response.Initiator{
|
||||
ID: *output.Initiator.ID,
|
||||
DisplayName: *output.Initiator.DisplayName,
|
||||
},
|
||||
Owner: s3response.Owner{
|
||||
ID: *output.Owner.ID,
|
||||
DisplayName: *output.Owner.DisplayName,
|
||||
},
|
||||
StorageClass: string(output.StorageClass),
|
||||
PartNumberMarker: pnm,
|
||||
NextPartNumberMarker: npmn,
|
||||
MaxParts: int(output.MaxParts),
|
||||
IsTruncated: output.IsTruncated,
|
||||
Parts: parts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3be) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
output, err := client.UploadPart(ctx, input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return *output.ETag, nil
|
||||
}
|
||||
|
||||
func (s *S3be) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.UploadPartCopy(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, err
|
||||
}
|
||||
|
||||
return s3response.CopyObjectResult{
|
||||
LastModified: *output.CopyPartResult.LastModified,
|
||||
ETag: *output.CopyPartResult.ETag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3be) PutObject(ctx context.Context, input *s3.PutObjectInput) (string, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
output, err := client.PutObject(ctx, input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return *output.ETag, nil
|
||||
}
|
||||
|
||||
func (s *S3be) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.HeadObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) GetObject(ctx context.Context, input *s3.GetObjectInput, w io.Writer) (*s3.GetObjectOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := client.GetObject(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer output.Body.Close()
|
||||
|
||||
_, err = io.Copy(w, output.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (s *S3be) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.GetObjectAttributes(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.CopyObject(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.ListObjects(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.ListObjectsV2(ctx, input)
|
||||
}
|
||||
|
||||
func (s *S3be) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.DeleteObject(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return s3response.DeleteObjectsResult{}, err
|
||||
}
|
||||
|
||||
output, err := client.DeleteObjects(ctx, input)
|
||||
if err != nil {
|
||||
return s3response.DeleteObjectsResult{}, err
|
||||
}
|
||||
|
||||
return s3response.DeleteObjectsResult{
|
||||
Deleted: output.Deleted,
|
||||
Error: output.Errors,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3be) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := client.GetBucketAcl(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var acl auth.ACL
|
||||
|
||||
acl.Owner = *output.Owner.ID
|
||||
for _, el := range output.Grants {
|
||||
acl.Grantees = append(acl.Grantees, auth.Grantee{
|
||||
Permission: el.Permission,
|
||||
Access: *el.Grantee.ID,
|
||||
})
|
||||
}
|
||||
|
||||
return json.Marshal(acl)
|
||||
}
|
||||
|
||||
func (s S3be) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acl, err := auth.ParseACL(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
input := &s3.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
ACL: acl.ACL,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Owner: &types.Owner{
|
||||
ID: &acl.Owner,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, el := range acl.Grantees {
|
||||
input.AccessControlPolicy.Grants = append(input.AccessControlPolicy.Grants, types.Grant{
|
||||
Permission: el.Permission,
|
||||
Grantee: &types.Grantee{
|
||||
ID: &el.Access,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
_, err = client.PutBucketAcl(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tagging := &types.Tagging{
|
||||
TagSet: []types.Tag{},
|
||||
}
|
||||
for key, val := range tags {
|
||||
tagging.TagSet = append(tagging.TagSet, types.Tag{
|
||||
Key: &key,
|
||||
Value: &val,
|
||||
})
|
||||
}
|
||||
|
||||
_, err = client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
Tagging: tagging,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S3be) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
for _, el := range output.TagSet {
|
||||
tags[*el.Key] = *el.Value
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *S3be) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
|
||||
client, err := s.getClientFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/scoutfs-go"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
@@ -188,7 +187,7 @@ func (s *ScoutFS) CompleteMultipartUpload(_ context.Context, input *s3.CompleteM
|
||||
// scoutfs move data is a metadata only operation that moves the data
|
||||
// extent references from the source, appeding to the destination.
|
||||
// this needs to be 4k aligned.
|
||||
err = scoutfs.MoveData(pf, f.f)
|
||||
err = moveData(pf, f.f)
|
||||
pf.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move blocks part %v: %v", p.PartNumber, err)
|
||||
@@ -392,7 +391,7 @@ func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will set storage class to glacier.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -466,7 +465,7 @@ func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -666,7 +665,7 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the InvalidObjectState error.
|
||||
st, err := scoutfs.StatMore(objPath)
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build linux && amd64
|
||||
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
@@ -26,6 +28,7 @@ import (
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/versity/scoutfs-go"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
)
|
||||
|
||||
@@ -182,3 +185,25 @@ func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
tmp.f.Close()
|
||||
}
|
||||
|
||||
func moveData(from *os.File, to *os.File) error {
|
||||
return scoutfs.MoveData(from, to)
|
||||
}
|
||||
|
||||
func statMore(path string) (stat, error) {
|
||||
st, err := scoutfs.StatMore(path)
|
||||
if err != nil {
|
||||
return stat{}, err
|
||||
}
|
||||
var s stat
|
||||
|
||||
s.Meta_seq = st.Meta_seq
|
||||
s.Data_seq = st.Data_seq
|
||||
s.Data_version = st.Data_version
|
||||
s.Online_blocks = st.Online_blocks
|
||||
s.Offline_blocks = st.Offline_blocks
|
||||
s.Crtime_sec = st.Crtime_sec
|
||||
s.Crtime_nsec = st.Crtime_nsec
|
||||
|
||||
return s, nil
|
||||
}
|
||||
@@ -12,6 +12,8 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
//go:build !(linux && amd64)
|
||||
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
@@ -46,3 +48,11 @@ func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
}
|
||||
|
||||
func moveData(from *os.File, to *os.File) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
func statMore(path string) (stat, error) {
|
||||
return stat{}, errNotSupported
|
||||
}
|
||||
@@ -14,35 +14,12 @@
|
||||
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func New(rootdir string, opts ...Option) (*ScoutFS, error) {
|
||||
return nil, fmt.Errorf("scoutfs only available on linux")
|
||||
}
|
||||
|
||||
type tmpfile struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
var (
|
||||
errNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
func openTmpFile(dir, bucket, obj string, size int64) (*tmpfile, error) {
|
||||
return nil, errNotSupported
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
type stat struct {
|
||||
Meta_seq uint64
|
||||
Data_seq uint64
|
||||
Data_version uint64
|
||||
Online_blocks uint64
|
||||
Offline_blocks uint64
|
||||
Crtime_sec uint64
|
||||
Crtime_nsec uint32
|
||||
}
|
||||
@@ -64,8 +64,10 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj
|
||||
}
|
||||
|
||||
if pastMax {
|
||||
newMarker = path
|
||||
truncated = true
|
||||
if len(objects) != 0 {
|
||||
newMarker = *objects[len(objects)-1].Key
|
||||
truncated = true
|
||||
}
|
||||
return fs.SkipAll
|
||||
}
|
||||
|
||||
@@ -75,8 +77,8 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj
|
||||
// match this prefix. Make sure to append the / at the end of
|
||||
// directories since this is implied as a directory path name.
|
||||
// If path is a prefix of prefix, then path could still be
|
||||
// building to match. So only skip if path isnt a prefix of prefix
|
||||
// and prefix isnt a prefix of path.
|
||||
// building to match. So only skip if path isn't a prefix of prefix
|
||||
// and prefix isn't a prefix of path.
|
||||
if prefix != "" &&
|
||||
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
|
||||
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
|
||||
@@ -104,10 +106,13 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj
|
||||
}
|
||||
|
||||
if !pastMarker {
|
||||
if path != marker {
|
||||
if path == marker {
|
||||
pastMarker = true
|
||||
return nil
|
||||
}
|
||||
if path < marker {
|
||||
return nil
|
||||
}
|
||||
pastMarker = true
|
||||
}
|
||||
|
||||
// If object doesn't have prefix, don't include in results.
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@@ -29,6 +30,7 @@ import (
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -66,6 +68,21 @@ func adminCommand() *cli.Command {
|
||||
Required: true,
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "user-id",
|
||||
Usage: "userID for the new user",
|
||||
Aliases: []string{"ui"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "group-id",
|
||||
Usage: "groupID for the new user",
|
||||
Aliases: []string{"gi"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "project-id",
|
||||
Usage: "projectID for the new user",
|
||||
Aliases: []string{"pi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -105,6 +122,11 @@ func adminCommand() *cli.Command {
|
||||
},
|
||||
Action: changeBucketOwner,
|
||||
},
|
||||
{
|
||||
Name: "list-buckets",
|
||||
Usage: "Lists all the gateway buckets and owners.",
|
||||
Action: listBuckets,
|
||||
},
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
// TODO: create a configuration file for this
|
||||
@@ -138,6 +160,7 @@ func adminCommand() *cli.Command {
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("projectID")
|
||||
if access == "" || secret == "" {
|
||||
return fmt.Errorf("invalid input parameters for the new user")
|
||||
}
|
||||
@@ -145,14 +168,28 @@ func createUser(ctx *cli.Context) error {
|
||||
return fmt.Errorf("invalid input parameter for role")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user?access=%v&secret=%v&role=%v", adminEndpoint, access, secret, role), nil)
|
||||
acc := auth.Account{
|
||||
Access: access,
|
||||
Secret: secret,
|
||||
Role: role,
|
||||
UserID: userID,
|
||||
GroupID: groupID,
|
||||
ProjectID: projectID,
|
||||
}
|
||||
|
||||
accJson, err := json.Marshal(acc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse user data: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user", adminEndpoint), bytes.NewBuffer(accJson))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hashedPayload := sha256.Sum256(accJson)
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
@@ -256,6 +293,7 @@ func listUsers(ctx *cli.Context) error {
|
||||
if err := json.Unmarshal(body, &accs); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(accs)
|
||||
|
||||
printAcctTable(accs)
|
||||
|
||||
@@ -274,10 +312,10 @@ const (
|
||||
func printAcctTable(accs []auth.Account) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
fmt.Fprintln(w, "Account\tRole")
|
||||
fmt.Fprintln(w, "-------\t----")
|
||||
fmt.Fprintln(w, "Account\tRole\tUserID\tGroupID\tProjectID")
|
||||
fmt.Fprintln(w, "-------\t----\t------\t-------\t---------")
|
||||
for _, acc := range accs {
|
||||
fmt.Fprintf(w, "%v\t%v\n", acc.Access, acc.Role)
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\n", acc.Access, acc.Role, acc.UserID, acc.GroupID, acc.ProjectID)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
@@ -319,3 +357,60 @@ func changeBucketOwner(ctx *cli.Context) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printBuckets(buckets []s3response.Bucket) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
fmt.Fprintln(w, "Bucket\tOwner")
|
||||
fmt.Fprintln(w, "-------\t----")
|
||||
for _, acc := range buckets {
|
||||
fmt.Fprintf(w, "%v\t%v\n", acc.Name, acc.Owner)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func listBuckets(ctx *cli.Context) error {
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-buckets", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf(string(body))
|
||||
}
|
||||
|
||||
var buckets []s3response.Bucket
|
||||
if err := json.Unmarshal(body, &buckets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printBuckets(buckets)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -32,16 +33,24 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
port string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
logWebhookURL string
|
||||
accessLog string
|
||||
debug bool
|
||||
port, admPort string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
admCertFile, admKeyFile string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
logWebhookURL string
|
||||
accessLog string
|
||||
debug bool
|
||||
iamDir string
|
||||
ldapURL, ldapBindDN, ldapPassword string
|
||||
ldapQueryBase, ldapObjClasses string
|
||||
ldapAccessAtr, ldapSecAtr, ldapRoleAtr string
|
||||
iamCacheDisable bool
|
||||
iamCacheTTL int
|
||||
iamCachePrune int
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -61,6 +70,7 @@ func main() {
|
||||
app.Commands = []*cli.Command{
|
||||
posixCommand(),
|
||||
scoutfsCommand(),
|
||||
s3Command(),
|
||||
adminCommand(),
|
||||
testCommand(),
|
||||
}
|
||||
@@ -142,6 +152,22 @@ func initFlags() []cli.Flag {
|
||||
Usage: "TLS key file",
|
||||
Destination: &keyFile,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "admin-port",
|
||||
Usage: "gateway admin server listen address <ip>:<port> or :<port>",
|
||||
Destination: &admPort,
|
||||
Aliases: []string{"ap"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "admin-cert",
|
||||
Usage: "TLS cert file for admin server",
|
||||
Destination: &admCertFile,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "admin-cert-key",
|
||||
Usage: "TLS key file for admin server",
|
||||
Destination: &admKeyFile,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
@@ -189,14 +215,83 @@ func initFlags() []cli.Flag {
|
||||
Destination: &natsTopic,
|
||||
Aliases: []string{"ent"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-dir",
|
||||
Usage: "if defined, run internal iam service within this directory",
|
||||
Destination: &iamDir,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-url",
|
||||
Usage: "ldap server url to store iam data",
|
||||
Destination: &ldapURL,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-bind-dn",
|
||||
Usage: "ldap server binding dn, example: 'cn=admin,dc=example,dc=com'",
|
||||
Destination: &ldapBindDN,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-bind-pass",
|
||||
Usage: "ldap server user password",
|
||||
Destination: &ldapPassword,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-query-base",
|
||||
Usage: "ldap server destination query, example: 'ou=iam,dc=example,dc=com'",
|
||||
Destination: &ldapQueryBase,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-object-classes",
|
||||
Usage: "ldap server object classes used to store the data. provide it as comma separated string, example: 'top,person'",
|
||||
Destination: &ldapObjClasses,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-access-atr",
|
||||
Usage: "ldap server user access key id attribute name",
|
||||
Destination: &ldapAccessAtr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-secret-atr",
|
||||
Usage: "ldap server user secret access key attribute name",
|
||||
Destination: &ldapSecAtr,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "iam-ldap-role-atr",
|
||||
Usage: "ldap server user role attribute name",
|
||||
Destination: &ldapRoleAtr,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "iam-cache-disable",
|
||||
Usage: "disable local iam cache",
|
||||
Destination: &iamCacheDisable,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "iam-cache-ttl",
|
||||
Usage: "local iam cache entry ttl (seconds)",
|
||||
Value: 120,
|
||||
Destination: &iamCacheTTL,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "iam-cache-prune",
|
||||
Usage: "local iam cache cleanup interval (seconds)",
|
||||
Value: 3600,
|
||||
Destination: &iamCachePrune,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runGateway(ctx *cli.Context, be backend.Backend, s auth.Storer) error {
|
||||
func runGateway(ctx *cli.Context, be backend.Backend) error {
|
||||
// int32 max for 32 bit arch
|
||||
blimit := int64(2*1024*1024*1024 - 1)
|
||||
if strconv.IntSize > 32 {
|
||||
// 5GB max for 64 bit arch
|
||||
blimit = int64(5 * 1024 * 1024 * 1024)
|
||||
}
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
BodyLimit: 5 * 1024 * 1024 * 1024,
|
||||
BodyLimit: int(blimit),
|
||||
})
|
||||
|
||||
var opts []s3api.Option
|
||||
@@ -215,19 +310,51 @@ func runGateway(ctx *cli.Context, be backend.Backend, s auth.Storer) error {
|
||||
}
|
||||
opts = append(opts, s3api.WithTLS(cert))
|
||||
}
|
||||
|
||||
if debug {
|
||||
opts = append(opts, s3api.WithDebug())
|
||||
}
|
||||
|
||||
err := s.InitIAM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("init iam: %w", err)
|
||||
if admPort == "" {
|
||||
opts = append(opts, s3api.WithAdminServer())
|
||||
}
|
||||
|
||||
iam, err := auth.NewInternal(s)
|
||||
admApp := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
})
|
||||
|
||||
var admOpts []s3api.AdminOpt
|
||||
|
||||
if admCertFile != "" || admKeyFile != "" {
|
||||
if admCertFile == "" {
|
||||
return fmt.Errorf("TLS key specified without cert file")
|
||||
}
|
||||
if admKeyFile == "" {
|
||||
return fmt.Errorf("TLS cert specified without key file")
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(admCertFile, admKeyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
admOpts = append(admOpts, s3api.WithAdminSrvTLS(cert))
|
||||
}
|
||||
|
||||
iam, err := auth.New(&auth.Opts{
|
||||
Dir: iamDir,
|
||||
LDAPServerURL: ldapURL,
|
||||
LDAPBindDN: ldapBindDN,
|
||||
LDAPPassword: ldapPassword,
|
||||
LDAPQueryBase: ldapQueryBase,
|
||||
LDAPObjClasses: ldapObjClasses,
|
||||
LDAPAccessAtr: ldapAccessAtr,
|
||||
LDAPSecretAtr: ldapSecAtr,
|
||||
LDAPRoleAtr: ldapRoleAtr,
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup internal iam service: %w", err)
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
}
|
||||
|
||||
logger, err := s3log.InitLogger(&s3log.LogConfig{
|
||||
@@ -257,8 +384,13 @@ func runGateway(ctx *cli.Context, be backend.Backend, s auth.Storer) error {
|
||||
return fmt.Errorf("init gateway: %v", err)
|
||||
}
|
||||
|
||||
c := make(chan error, 1)
|
||||
admSrv := s3api.NewAdminServer(admApp, be, middlewares.RootUserConfig{Access: rootUserAccess, Secret: rootUserSecret}, admPort, region, iam, admOpts...)
|
||||
|
||||
c := make(chan error, 2)
|
||||
go func() { c <- srv.Serve() }()
|
||||
if admPort != "" {
|
||||
go func() { c <- admSrv.Serve() }()
|
||||
}
|
||||
|
||||
// for/select blocks until shutdown
|
||||
Loop:
|
||||
@@ -279,13 +411,21 @@ Loop:
|
||||
}
|
||||
}
|
||||
}
|
||||
saveErr := err
|
||||
|
||||
be.Shutdown()
|
||||
|
||||
err = iam.Shutdown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown iam: %v\n", err)
|
||||
}
|
||||
|
||||
if logger != nil {
|
||||
lerr := logger.Shutdown()
|
||||
if lerr != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown logger: %v\n", lerr)
|
||||
err := logger.Shutdown()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown logger: %v\n", err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
return saveErr
|
||||
}
|
||||
|
||||
@@ -49,5 +49,5 @@ func runPosix(ctx *cli.Context) error {
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(ctx, be, be)
|
||||
return runGateway(ctx, be)
|
||||
}
|
||||
|
||||
76
cmd/versitygw/s3.go
Normal file
76
cmd/versitygw/s3.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend/s3proxy"
|
||||
)
|
||||
|
||||
var (
|
||||
s3proxyEndpoint string
|
||||
s3proxyRegion string
|
||||
s3proxyDisableChecksum bool
|
||||
s3proxySslSkipVerify bool
|
||||
s3proxyDebug bool
|
||||
)
|
||||
|
||||
func s3Command() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "s3",
|
||||
Usage: "s3 storage backend",
|
||||
Description: `This runs the gateway like an s3 proxy redirecting requests
|
||||
to an s3 storage backend service.`,
|
||||
Action: runS3,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "endpoint",
|
||||
Usage: "s3 service endpoint, default AWS if not specified",
|
||||
Value: "",
|
||||
Destination: &s3proxyEndpoint,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "region",
|
||||
Usage: "s3 service region, default 'us-east-1' if not specified",
|
||||
Value: "us-east-1",
|
||||
Destination: &s3proxyRegion,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-checksum",
|
||||
Usage: "disable gateway to server object checksums",
|
||||
Value: false,
|
||||
Destination: &s3proxyDisableChecksum,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "ssl-skip-verify",
|
||||
Usage: "skip ssl cert verification for s3 service",
|
||||
Value: false,
|
||||
Destination: &s3proxySslSkipVerify,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "output extra debug tracing",
|
||||
Value: false,
|
||||
Destination: &s3proxyDebug,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runS3(ctx *cli.Context) error {
|
||||
be := s3proxy.New(s3proxyEndpoint, s3proxyRegion,
|
||||
s3proxyDisableChecksum, s3proxySslSkipVerify, s3proxyDebug)
|
||||
return runGateway(ctx, be)
|
||||
}
|
||||
@@ -69,5 +69,5 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
return fmt.Errorf("init scoutfs: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(ctx, be, be)
|
||||
return runGateway(ctx, be)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/integration"
|
||||
@@ -13,10 +16,12 @@ var (
|
||||
endpoint string
|
||||
prefix string
|
||||
dstBucket string
|
||||
proxyURL string
|
||||
partSize int64
|
||||
objSize int64
|
||||
concurrency int
|
||||
files int
|
||||
totalReqs int
|
||||
upload bool
|
||||
download bool
|
||||
pathStyle bool
|
||||
@@ -73,6 +78,11 @@ func initTestCommands() []*cli.Command {
|
||||
Description: `Runs all the available tests to test the full flow of the gateway.`,
|
||||
Action: getAction(integration.TestFullFlow),
|
||||
},
|
||||
{
|
||||
Name: "posix",
|
||||
Usage: "Tests posix specific features",
|
||||
Action: getAction(integration.TestPosix),
|
||||
},
|
||||
{
|
||||
Name: "bench",
|
||||
Usage: "Runs download/upload performance test on the gateway",
|
||||
@@ -112,6 +122,7 @@ func initTestCommands() []*cli.Command {
|
||||
Name: "bucket",
|
||||
Usage: "Destination bucket name to read/write data",
|
||||
Destination: &dstBucket,
|
||||
Required: true,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "partSize",
|
||||
@@ -137,6 +148,11 @@ func initTestCommands() []*cli.Command {
|
||||
Value: false,
|
||||
Destination: &checksumDisable,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "proxy-url",
|
||||
Usage: "S3 proxy server url to compare",
|
||||
Destination: &proxyURL,
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
if upload && download {
|
||||
@@ -146,10 +162,6 @@ func initTestCommands() []*cli.Command {
|
||||
return fmt.Errorf("must specify one of upload or download")
|
||||
}
|
||||
|
||||
if dstBucket == "" {
|
||||
return fmt.Errorf("must specify bucket")
|
||||
}
|
||||
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
@@ -170,7 +182,123 @@ func initTestCommands() []*cli.Command {
|
||||
|
||||
s3conf := integration.NewS3Conf(opts...)
|
||||
|
||||
return integration.TestPerformance(s3conf, upload, download, files, objSize, dstBucket, prefix)
|
||||
if upload {
|
||||
if proxyURL == "" {
|
||||
integration.TestUpload(s3conf, files, objSize, dstBucket, prefix)
|
||||
return nil
|
||||
} else {
|
||||
size, elapsed, err := integration.TestUpload(s3conf, files, objSize, dstBucket, prefix)
|
||||
opts = append(opts, integration.WithEndpoint(proxyURL))
|
||||
proxyS3Conf := integration.NewS3Conf(opts...)
|
||||
proxySize, proxyElapsed, proxyErr := integration.TestUpload(proxyS3Conf, files, objSize, dstBucket, prefix)
|
||||
if err != nil || proxyErr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
printProxyResultsTable([][4]string{
|
||||
{" # ", "Total Size", "Time Taken", "Speed(MB/S)"},
|
||||
{"---------", "----------", "----------", "-----------"},
|
||||
{"S3 Server", fmt.Sprint(size), fmt.Sprintf("%v", elapsed), fmt.Sprint(int(math.Ceil(float64(size)/elapsed.Seconds()) / 1048576))},
|
||||
{"S3 Proxy", fmt.Sprint(proxySize), fmt.Sprintf("%v", proxyElapsed), fmt.Sprint(int(math.Ceil(float64(proxySize)/proxyElapsed.Seconds()) / 1048576))},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if proxyURL == "" {
|
||||
integration.TestDownload(s3conf, files, objSize, dstBucket, prefix)
|
||||
return nil
|
||||
} else {
|
||||
size, elapsed, err := integration.TestDownload(s3conf, files, objSize, dstBucket, prefix)
|
||||
opts = append(opts, integration.WithEndpoint(proxyURL))
|
||||
proxyS3Conf := integration.NewS3Conf(opts...)
|
||||
proxySize, proxyElapsed, proxyErr := integration.TestDownload(proxyS3Conf, files, objSize, dstBucket, prefix)
|
||||
if err != nil || proxyErr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
printProxyResultsTable([][4]string{
|
||||
{" # ", "Total Size", "Time Taken", "Speed(MB/S)"},
|
||||
{"---------", "----------", "----------", "-----------"},
|
||||
{"S3 server", fmt.Sprint(size), fmt.Sprintf("%v", elapsed), fmt.Sprint(int(math.Ceil(float64(size)/elapsed.Seconds()) / 1048576))},
|
||||
{"S3 proxy", fmt.Sprint(proxySize), fmt.Sprintf("%v", proxyElapsed), fmt.Sprint(int(math.Ceil(float64(proxySize)/proxyElapsed.Seconds()) / 1048576))},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "throughput",
|
||||
Usage: "Runs throughput performance test on the gateway",
|
||||
Description: `Calls HeadBucket action the number of times and concurrency level specified with flags by measuring gateway throughput.`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "reqs",
|
||||
Usage: "Total number of requests to send.",
|
||||
Value: 1000,
|
||||
Destination: &totalReqs,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "bucket",
|
||||
Usage: "Destination bucket name to make the requests",
|
||||
Destination: &dstBucket,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "concurrency",
|
||||
Usage: "threads per request",
|
||||
Value: 1,
|
||||
Destination: &concurrency,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "checksumDis",
|
||||
Usage: "Disable server checksum",
|
||||
Value: false,
|
||||
Destination: &checksumDisable,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "proxy-url",
|
||||
Usage: "S3 proxy server url to compare",
|
||||
Destination: &proxyURL,
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithConcurrency(concurrency),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
if checksumDisable {
|
||||
opts = append(opts, integration.WithDisableChecksum())
|
||||
}
|
||||
|
||||
s3conf := integration.NewS3Conf(opts...)
|
||||
|
||||
if proxyURL == "" {
|
||||
_, _, err := integration.TestReqPerSec(s3conf, totalReqs, dstBucket)
|
||||
return err
|
||||
} else {
|
||||
elapsed, rps, err := integration.TestReqPerSec(s3conf, totalReqs, dstBucket)
|
||||
opts = append(opts, integration.WithEndpoint(proxyURL))
|
||||
s3proxy := integration.NewS3Conf(opts...)
|
||||
proxyElapsed, proxyRPS, proxyErr := integration.TestReqPerSec(s3proxy, totalReqs, dstBucket)
|
||||
if err != nil || proxyErr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
printProxyResultsTable([][4]string{
|
||||
{" # ", "Total Requests", "Time Taken", "Requests Per Second(Req/Sec)"},
|
||||
{"---------", "--------------", "----------", "----------------------------"},
|
||||
{"S3 Server", fmt.Sprint(totalReqs), fmt.Sprintf("%v", elapsed), fmt.Sprint(rps)},
|
||||
{"S3 Proxy", fmt.Sprint(totalReqs), fmt.Sprintf("%v", proxyElapsed), fmt.Sprint(proxyRPS)},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -201,3 +329,13 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func printProxyResultsTable(stats [][4]string) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
for _, elem := range stats {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", elem[0], elem[1], elem[2], elem[3])
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
62
go.mod
62
go.mod
@@ -3,52 +3,52 @@ module github.com/versity/versitygw
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1
|
||||
github.com/aws/smithy-go v1.14.0
|
||||
github.com/gofiber/fiber/v2 v2.48.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/nats-io/nats.go v1.28.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.22.2
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1
|
||||
github.com/aws/smithy-go v1.16.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.6
|
||||
github.com/gofiber/fiber/v2 v2.50.0
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/nats-io/nats.go v1.31.0
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/segmentio/kafka-go v0.4.42
|
||||
github.com/segmentio/kafka-go v0.4.44
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
github.com/valyala/fasthttp v1.48.0
|
||||
github.com/valyala/fasthttp v1.50.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9
|
||||
golang.org/x/sys v0.10.0
|
||||
golang.org/x/sys v0.14.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.9.20 // indirect
|
||||
github.com/nats-io/nkeys v0.4.4 // indirect
|
||||
github.com/nats-io/nkeys v0.4.6 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.18 // indirect
|
||||
github.com/stretchr/testify v1.8.1 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.32
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.31
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.24.0
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.2
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/compress v1.17.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
|
||||
156
go.sum
156
go.sum
@@ -1,65 +1,69 @@
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
|
||||
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.0 h1:INUDpYLt4oiPOJl0XwZDK2OVAVf0Rzo+MGVTv9f+gy8=
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.0/go.mod h1:uWOr0m0jDsiWw8nnXiqZ+YG6LdvAlGYDLLf2NmHZoy4=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11 h1:/MS8AzqYNAhhRNalOmxUvYs8VEbNGifTnzhPFdcRQkQ=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11/go.mod h1:va22++AdXht4ccO3kH2SHkHHYvZ2G9Utz+CXKmm2CaU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.32 h1:tqEOvkbTxwEV7hToRcJ1xZRjcATqwDVsWbAscgRKyNI=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.32/go.mod h1:U3ZF0fQRRA4gnbn9GGvOWLoT2EzzZfAWeKwnVrm1rDc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.31 h1:vJyON3lG7R8VOErpJJBclBADiWTwzcwdkQpTKx8D2sk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.31/go.mod h1:T4sESjBtY2lNxLgkIASmeP57b5j7hTQqCbqG0tWnxC4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7 h1:X3H6+SU21x+76LRglk21dFRgMTJMa5QcpW+SqUf5BBg=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7/go.mod h1:3we0V09SwcJBzNlnyovrR2wWJhWmVdqAsmVs4uronv8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76 h1:DJ1kHj0GI9BbX+XhF0kHxlzOVjcncmDUXmCvXdbfdAE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76/go.mod h1:/AZCdswMSgwpB2yMSFfY5H4pVeBLnCuPehdmO/r3xSM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37 h1:zr/gxAZkMcvP71ZhQOcvdm8ReLjFgIXnIn0fw5AM7mo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37/go.mod h1:Pdn4j43v49Kk6+82spO3Tu5gSeQXRsxo56ePPQAvFiA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31 h1:0HCMIkAkVY9KMgueD8tf4bRTUanzEYvhw7KkPXIMpO0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31/go.mod h1:fTJDMe8LOFYtqiFFFeHA+SVMAwqLhoq0kcInYoLa9Js=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38 h1:+i1DOFrW3YZ3apE45tCal9+aDKK6kNEbW6Ib7e1nFxE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38/go.mod h1:1/jLp0OgOaWIetycOmycW+vYTYgTZFPttJQRgsI1PoU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0 h1:U5yySdwt2HPo/pnQec04DImLzWORbeWML1fJiLkKruI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0/go.mod h1:EhC/83j8/hL/UB1WmExo3gkElaja/KlmZM/gl1rTfjM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12 h1:uAiiHnWihGP2rVp64fHwzLDrswGjEjsPszwRYMiYQPU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12/go.mod h1:fUTHpOXqRQpXvEpDPSa3zxCc2fnpW6YnBoba+eQr+Bg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32 h1:kvN1jPHr9UffqqG3bSgZ8tx4+1zKVHz/Ktw/BwW6hX8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32/go.mod h1:QmMEM7es84EUkbYWcpnkx8i5EW2uERPfrTFeOch128Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31 h1:auGDJ0aLZahF5SPvkJ6WcUuX7iQ7kyl2MamV7Tm8QBk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31/go.mod h1:3+lloe3sZuBQw1aBc5MyndvodzQlyqCZ7x1QPDHaWP4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0 h1:Wgjft9X4W5pMeuqgPCHIQtbZ87wsgom7S5F8obreg+c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0/go.mod h1:FWNzS4+zcWAP05IF7TDYTY1ysZAzIvogxWaDT9p8fsA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1 h1:mTgFVlfQT8gikc5+/HwD8UL9jnUro5MGv8n/VEYF12I=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1/go.mod h1:6SOWLiobcZZshbmECRTADIRYliPL0etqFSigauQEeT0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1 h1:DSNpSbfEgFXRV+IfEcKE5kTbqxm+MeF5WgyeRlsLnHY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1/go.mod h1:TC9BubuFMVScIU+TLKamO6VZiYTkYoEHqlSQwAe2omw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1 h1:hd0SKLMdOL/Sl6Z0np1PX9LeH2gqNtBe0MhTedA8MGI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1/go.mod h1:XO/VcyoQ8nKyKfFW/3DMsRQXsfh/052tHTWmg3xBXRg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1 h1:pAOJj+80tC8sPVgSDHzMYD6KLWsaLQ1kZw31PTeORbs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1/go.mod h1:G8SbvL0rFk4WOJroU8tKBczhsbhj2p/YY7qeJezJ3CI=
|
||||
github.com/aws/smithy-go v1.14.0 h1:+X90sB94fizKjDmwb4vyl2cTTPXTE5E2G/1mjByb0io=
|
||||
github.com/aws/smithy-go v1.14.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 h1:hHgLiIrTRtddC0AKcJr5s7i/hLgcpTt+q/FKxf1Zayk=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0/go.mod h1:w4I/v3NOWgD+qvs1NPEwhd++1h3XPHFaVxasfY6HlYQ=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.24.0 h1:4LEk29JO3w+y9dEo/5Tq5QTP7uIEw+KQrKiHOs4xlu4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.24.0/go.mod h1:11nNDAuK86kOUHeuEQo8f3CkcV5xuUxvPwFjTZE/PnQ=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.2 h1:rKH7khRMxPdD0u3dHecd0Q7NOVw3EUe7AqdkUOkiOGI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.2/go.mod h1:tXM8wmaeAhfC7nZoCxb0FzM/aRaB1m1WQ7x0qlBLq80=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 h1:G5KawTAkyHH6WyKQCdHiW4h3PmAXNJpOgwKg3H7sDRE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3/go.mod h1:hugKmSFnZB+HgNI1sYGT14BUPZkO6alC/e0AWu+0IAQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6 h1:IpQbitxCZeC64C1ALz9QZu6AHHWundnU2evQ9xbp5k8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6/go.mod h1:27jIVQK+al9s0yTo3pkMdahRinbscqSC6zNGfNWXPZc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 h1:AaQsr5vvGR7rmeSWBtTCcw16tT9r51mWijuCQhzLnq8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2/go.mod h1:o1IiRn7CWocIFTXJjGKJDOwxv1ibL53NpcvcqGWyRBA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 h1:UZx8SXZ0YtzRiALzYAWcjb9Y9hZUR7MBKaBQ5ouOjPs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2/go.mod h1:ipuRpcSaklmxR6C39G187TpBAO132gUfleTGccUPs8c=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 h1:usgqiJtamuGIBj+OvYmMq89+Z1hIKkMJToz1WpoeNUY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2 h1:pyVrNAf7Hwz0u39dLKN5t+n0+K/3rMYKuiOoIum3AsU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2/go.mod h1:mydrfOb9uiOYCxuCPR8YHQNQyGQwUQ7gPMZGBKbH8NY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 h1:CJxo7ZBbaIzmXfV3hjcx36n9V87gJsIUPJflwqEHl3Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0/go.mod h1:yjVfjuY4nD1EW9i387Kau+I6V5cBA5YnC/mWNopjZrI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2 h1:f2LhPofnjcdOQKRtumKjMvIHkfSQ8aH/rwKUDEQ/SB4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2/go.mod h1:q+xX0H4OfuWDuBy7y/LDi4v8IBOWuF+vtp8Z6ex+lw4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 h1:h7j73yuAVVjic8pqswh+L/7r2IHP43QwRyOu6zcCDDE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2/go.mod h1:H07AHdK5LSy8F7EJUQhoxyiCNkePoHj2D8P2yGTWafo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2 h1:gbIaOzpXixUpoPK+js/bCBK1QBDXM22SigsnzGZio0U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2/go.mod h1:p+S7RNbdGN8qgHDSg2SCQJ9FeMAmvcETQiVpeGhYnNM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1 h1:o6MCcX1rJW8Y3g+hvg2xpjF6JR6DftuYhfl3Nc1WV9Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1/go.mod h1:UDtxEWbREX6y4KREapT+jjtjoH0TiVSS6f5nfaY1UaM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 h1:km+ZNjtLtpXYf42RdaDZnNHm9s7SYAuDGTafy6nd89A=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1/go.mod h1:aHBr3pvBSD5MbzOvQtYutyPLLRPbl/y9x86XyJJnUXQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 h1:iRFNqZH4a67IqPvK8xxtyQYnyrlsvwmpHOe9r55ggBA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1/go.mod h1:pTy5WM+6sNv2tB24JNKFtn6EvciQ5k40ZJ0pq/Iaxj0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 h1:txgVXIXWPXyqdiVn92BV6a/rgtpX31HYdsOYj0sVQQQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1/go.mod h1:VAiJiNaoP1L89STFlEMgmHX1bKixY+FaP+TpRFrmyZ4=
|
||||
github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik=
|
||||
github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/gofiber/fiber/v2 v2.48.0 h1:cRVMCb9aUJDsyHxGFLwz/sGzDggdailZZyptU9F9cU0=
|
||||
github.com/gofiber/fiber/v2 v2.48.0/go.mod h1:xqJgfqrc23FJuqGOW6DVgi3HyZEm2Mn9pRqUb2kHSX8=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A=
|
||||
github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc=
|
||||
github.com/gofiber/fiber/v2 v2.50.0 h1:ia0JaB+uw3GpNSCR5nvC5dsaxXjRU5OEu36aytx+zGw=
|
||||
github.com/gofiber/fiber/v2 v2.50.0/go.mod h1:21eytvay9Is7S6z+OgPi7c7n4++tnClWmhpimVHMimw=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
|
||||
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
@@ -67,14 +71,10 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/nats-io/jwt/v2 v2.4.1 h1:Y35W1dgbbz2SQUYDPCaclXcuqleVmpbRa7646Jf2EX4=
|
||||
github.com/nats-io/nats-server/v2 v2.9.20 h1:bt1dW6xsL1hWWwv7Hovm+EJt5L6iplyqlgEFkoEUk0k=
|
||||
github.com/nats-io/nats-server/v2 v2.9.20/go.mod h1:aTb/xtLCGKhfTFLxP591CMWfkdgBmcUUSkiSOe5A3gw=
|
||||
github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c=
|
||||
github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc=
|
||||
github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA=
|
||||
github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
|
||||
github.com/nats-io/nats.go v1.31.0 h1:/WFBHEc/dOKBF6qf1TZhrdEfTmOZ5JzdJ+Y3m6Y/p7E=
|
||||
github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8=
|
||||
github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY=
|
||||
github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
@@ -89,8 +89,8 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/segmentio/kafka-go v0.4.42 h1:qffhBZCz4WcWyNuHEclHjIMLs2slp6mZO8px+5W5tfU=
|
||||
github.com/segmentio/kafka-go v0.4.42/go.mod h1:d0g15xPMqoUookug0OU75DhGZxXwCFxSLeJ4uphwJzg=
|
||||
github.com/segmentio/kafka-go v0.4.44 h1:Vjjksniy0WSTZ7CuVJrz1k04UoZeTc77UV6Yyk6tLY4=
|
||||
github.com/segmentio/kafka-go v0.4.44/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -102,8 +102,8 @@ github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.48.0 h1:oJWvHb9BIZToTQS3MuQ2R3bJZiNSa2KiNdeI8A+79Tc=
|
||||
github.com/valyala/fasthttp v1.48.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
|
||||
github.com/valyala/fasthttp v1.50.0 h1:H7fweIlBm0rXLs2q0XbalvJ6r0CUPFWK3/bB4N13e9M=
|
||||
github.com/valyala/fasthttp v1.50.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9 h1:ZfmQR01Kk6/kQh6+zlqfBYszVY02fzf9xYrchOY4NFM=
|
||||
@@ -119,16 +119,21 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -138,27 +143,30 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
142
integration/bench.go
Normal file
142
integration/bench.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
)
|
||||
|
||||
type prefResult struct {
|
||||
elapsed time.Duration
|
||||
size int64
|
||||
err error
|
||||
}
|
||||
|
||||
func TestUpload(s *S3Conf, files int, objSize int64, bucket, prefix string) (size int64, elapsed time.Duration, err error) {
|
||||
var sg sync.WaitGroup
|
||||
results := make([]prefResult, files)
|
||||
start := time.Now()
|
||||
if objSize == 0 {
|
||||
return 0, time.Since(start), fmt.Errorf("must specify object size for upload")
|
||||
}
|
||||
|
||||
if objSize > (int64(10000) * s.PartSize) {
|
||||
return 0, time.Since(start), fmt.Errorf("object size can not exceed 10000 * chunksize")
|
||||
}
|
||||
|
||||
runF("performance test: upload objects")
|
||||
|
||||
for i := 0; i < files; i++ {
|
||||
sg.Add(1)
|
||||
go func(i int) {
|
||||
var r io.Reader = NewDataReader(int(objSize), int(s.PartSize))
|
||||
|
||||
start := time.Now()
|
||||
err := s.UploadData(r, bucket, fmt.Sprintf("%v%v", prefix, i))
|
||||
results[i].elapsed = time.Since(start)
|
||||
results[i].err = err
|
||||
results[i].size = objSize
|
||||
sg.Done()
|
||||
}(i)
|
||||
}
|
||||
sg.Wait()
|
||||
elapsed = time.Since(start)
|
||||
|
||||
var tot int64
|
||||
for i, res := range results {
|
||||
if res.err != nil {
|
||||
failF("%v: %v\n", i, res.err)
|
||||
return 0, time.Since(start), res.err
|
||||
}
|
||||
tot += res.size
|
||||
fmt.Printf("%v: %v in %v (%v MB/s)\n",
|
||||
i, res.size, res.elapsed,
|
||||
int(math.Ceil(float64(res.size)/res.elapsed.Seconds())/1048576))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
passF("run upload: %v in %v (%v MB/s)\n",
|
||||
tot, elapsed, int(math.Ceil(float64(tot)/elapsed.Seconds())/1048576))
|
||||
|
||||
return tot, time.Since(start), nil
|
||||
}
|
||||
|
||||
func TestDownload(s *S3Conf, files int, objSize int64, bucket, prefix string) (size int64, elapsed time.Duration, err error) {
|
||||
var sg sync.WaitGroup
|
||||
results := make([]prefResult, files)
|
||||
start := time.Now()
|
||||
|
||||
runF("performance test: download objects")
|
||||
|
||||
for i := 0; i < files; i++ {
|
||||
sg.Add(1)
|
||||
go func(i int) {
|
||||
nw := NewNullWriter()
|
||||
start := time.Now()
|
||||
n, err := s.DownloadData(nw, bucket, fmt.Sprintf("%v%v", prefix, i))
|
||||
results[i].elapsed = time.Since(start)
|
||||
results[i].err = err
|
||||
results[i].size = n
|
||||
sg.Done()
|
||||
}(i)
|
||||
}
|
||||
sg.Wait()
|
||||
elapsed = time.Since(start)
|
||||
|
||||
var tot int64
|
||||
for i, res := range results {
|
||||
if res.err != nil {
|
||||
failF("%v: %v\n", i, res.err)
|
||||
return 0, elapsed, err
|
||||
}
|
||||
tot += res.size
|
||||
fmt.Printf("%v: %v in %v (%v MB/s)\n",
|
||||
i, res.size, res.elapsed,
|
||||
int(math.Ceil(float64(res.size)/res.elapsed.Seconds())/1048576))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
passF("run download: %v in %v (%v MB/s)\n",
|
||||
tot, elapsed, int(math.Ceil(float64(tot)/elapsed.Seconds())/1048576))
|
||||
|
||||
return tot, elapsed, nil
|
||||
}
|
||||
|
||||
func TestReqPerSec(s *S3Conf, totalReqs int, bucket string) (time.Duration, int, error) {
|
||||
client := s3.NewFromConfig(s.Config())
|
||||
var wg sync.WaitGroup
|
||||
var resErr error
|
||||
|
||||
// Record the start time
|
||||
startTime := time.Now()
|
||||
runF("performance test: measuring request per second")
|
||||
|
||||
for i := 0; i < s.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < totalReqs/s.Concurrency; i++ {
|
||||
_, err := client.HeadBucket(context.Background(), &s3.HeadBucketInput{Bucket: &bucket})
|
||||
if err != nil && resErr != nil {
|
||||
resErr = err
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if resErr != nil {
|
||||
failF("performance test failed with error: %w", resErr)
|
||||
return time.Since(startTime), 0, resErr
|
||||
}
|
||||
elapsedTime := time.Since(startTime)
|
||||
rps := int(float64(totalReqs) / elapsedTime.Seconds())
|
||||
|
||||
passF("Success\nTotal Requests: %d,\nConcurrency Level: %d,\nTime Taken: %s,\nRequests Per Second: %dreq/sec", totalReqs, s.Concurrency, elapsedTime, rps)
|
||||
return elapsedTime, rps, nil
|
||||
}
|
||||
@@ -25,6 +25,7 @@ func TestAuthentication(s *S3Conf) {
|
||||
func TestCreateBucket(s *S3Conf) {
|
||||
CreateBucket_invalid_bucket_name(s)
|
||||
CreateBucket_existing_bucket(s)
|
||||
CreateBucket_as_user(s)
|
||||
CreateDeleteBucket_success(s)
|
||||
}
|
||||
|
||||
@@ -42,13 +43,13 @@ func TestListBuckets(s *S3Conf) {
|
||||
func TestDeleteBucket(s *S3Conf) {
|
||||
DeleteBucket_non_existing_bucket(s)
|
||||
DeleteBucket_non_empty_bucket(s)
|
||||
DeleteBucket_success_status_code(s)
|
||||
}
|
||||
|
||||
func TestPutObject(s *S3Conf) {
|
||||
PutObject_non_existing_bucket(s)
|
||||
PutObject_special_chars(s)
|
||||
PutObject_existing_dir_obj(s)
|
||||
PutObject_obj_parent_is_file(s)
|
||||
PutObject_invalid_long_tags(s)
|
||||
PutObject_success(s)
|
||||
}
|
||||
|
||||
@@ -71,11 +72,15 @@ func TestListObjects(s *S3Conf) {
|
||||
ListObject_truncated(s)
|
||||
ListObjects_invalid_max_keys(s)
|
||||
ListObjects_max_keys_0(s)
|
||||
ListObjects_delimiter(s)
|
||||
ListObjects_max_keys_none(s)
|
||||
ListObjects_marker_not_from_obj_list(s)
|
||||
}
|
||||
|
||||
func TestDeleteObject(s *S3Conf) {
|
||||
DeleteObject_non_existing_object(s)
|
||||
DeleteObject_success(s)
|
||||
DeleteObject_success_status_code(s)
|
||||
}
|
||||
|
||||
func TestDeleteObjects(s *S3Conf) {
|
||||
@@ -86,11 +91,15 @@ func TestDeleteObjects(s *S3Conf) {
|
||||
|
||||
func TestCopyObject(s *S3Conf) {
|
||||
CopyObject_non_existing_dst_bucket(s)
|
||||
CopyObject_not_owned_source_bucket(s)
|
||||
CopyObject_copy_to_itself(s)
|
||||
CopyObject_to_itself_with_new_metadata(s)
|
||||
CopyObject_success(s)
|
||||
}
|
||||
|
||||
func TestPutObjectTagging(s *S3Conf) {
|
||||
PutObjectTagging_non_existing_object(s)
|
||||
PutObjectTagging_long_tags(s)
|
||||
PutObjectTagging_success(s)
|
||||
}
|
||||
|
||||
@@ -101,6 +110,7 @@ func TestGetObjectTagging(s *S3Conf) {
|
||||
|
||||
func TestDeleteObjectTagging(s *S3Conf) {
|
||||
DeleteObjectTagging_non_existing_object(s)
|
||||
DeleteObjectTagging_success_status(s)
|
||||
DeleteObjectTagging_success(s)
|
||||
}
|
||||
|
||||
@@ -127,6 +137,7 @@ func TestUploadPartCopy(s *S3Conf) {
|
||||
UploadPartCopy_non_existing_source_object_key(s)
|
||||
UploadPartCopy_success(s)
|
||||
UploadPartCopy_by_range_invalid_range(s)
|
||||
UploadPartCopy_greater_range_than_obj_size(s)
|
||||
UploadPartCopy_by_range_success(s)
|
||||
}
|
||||
|
||||
@@ -151,6 +162,7 @@ func TestAbortMultipartUpload(s *S3Conf) {
|
||||
AbortMultipartUpload_incorrect_uploadId(s)
|
||||
AbortMultipartUpload_incorrect_object_key(s)
|
||||
AbortMultipartUpload_success(s)
|
||||
AbortMultipartUpload_success_status_code(s)
|
||||
}
|
||||
|
||||
func TestCompleteMultipartUpload(s *S3Conf) {
|
||||
@@ -203,3 +215,10 @@ func TestFullFlow(s *S3Conf) {
|
||||
TestPutBucketAcl(s)
|
||||
TestGetBucketAcl(s)
|
||||
}
|
||||
|
||||
func TestPosix(s *S3Conf) {
|
||||
PutObject_overwrite_dir_obj(s)
|
||||
PutObject_overwrite_file_obj(s)
|
||||
PutObject_dir_obj_with_data(s)
|
||||
CreateMultipartUpload_dir_obj(s)
|
||||
}
|
||||
@@ -7,11 +7,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
@@ -656,6 +654,32 @@ func CreateBucket_invalid_bucket_name(s *S3Conf) {
|
||||
passF(testName)
|
||||
}
|
||||
|
||||
func CreateBucket_as_user(s *S3Conf) {
|
||||
testName := "CreateBucket_as_user"
|
||||
runF(testName)
|
||||
usr := user{
|
||||
access: "grt1",
|
||||
secret: "grt1secret",
|
||||
role: "user",
|
||||
}
|
||||
cfg := *s
|
||||
cfg.awsID = usr.access
|
||||
cfg.awsSecret = usr.secret
|
||||
err := createUsers(s, []user{usr})
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = setup(&cfg, getBucketName())
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrAccessDenied)); err != nil {
|
||||
failF("%v: %v", testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
passF(testName)
|
||||
}
|
||||
|
||||
func CreateBucket_existing_bucket(s *S3Conf) {
|
||||
testName := "CreateBucket_existing_bucket"
|
||||
runF(testName)
|
||||
@@ -952,6 +976,41 @@ func DeleteBucket_non_empty_bucket(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func DeleteBucket_success_status_code(s *S3Conf) {
|
||||
testName := "DeleteBucket_success_status_code"
|
||||
runF(testName)
|
||||
bucket := getBucketName()
|
||||
|
||||
err := setup(s, bucket)
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
req, err := createSignedReq(http.MethodDelete, s.endpoint, bucket, s.awsID, s.awsSecret, "s3", s.awsRegion, nil, time.Now())
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
failF("%v: expected response status to be %v, instead got %v", testName, http.StatusNoContent, resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
passF(testName)
|
||||
}
|
||||
|
||||
func PutObject_non_existing_bucket(s *S3Conf) {
|
||||
testName := "PutObject_non_existing_bucket"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -966,7 +1025,7 @@ func PutObject_non_existing_bucket(s *S3Conf) {
|
||||
func PutObject_special_chars(s *S3Conf) {
|
||||
testName := "PutObject_special_chars"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo%%", "bar^", "baz**"}, bucket)
|
||||
err := putObjects(s3client, []string{"my%key", "my^key", "my*key", "my.key", "my-key", "my_key", "my!key", "my'key", "my(key", "my)key", "my\\key", "my{}key", "my[]key", "my`key", "my+key", "my%25key", "my@key"}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -974,24 +1033,38 @@ func PutObject_special_chars(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func PutObject_existing_dir_obj(s *S3Conf) {
|
||||
testName := "PutObject_existing_dir_obj"
|
||||
func PutObject_invalid_long_tags(s *S3Conf) {
|
||||
testName := "PutObject_invalid_long_tags"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo/bar", "foo"}, bucket)
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
key := "my-obj"
|
||||
tagging := fmt.Sprintf("%v=val", genRandString(200))
|
||||
|
||||
func PutObject_obj_parent_is_file(s *S3Conf) {
|
||||
testName := "PutObject_obj_parent_is_file"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo", "foo/bar/"}, bucket)
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrObjectParentIsFile)); err != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
Tagging: &tagging,
|
||||
})
|
||||
cancel()
|
||||
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidTag)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tagging = fmt.Sprintf("key=%v", genRandString(300))
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
Tagging: &tagging,
|
||||
})
|
||||
cancel()
|
||||
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidTag)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -1321,7 +1394,7 @@ func ListObject_truncated(s *S3Conf) {
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.ListObjects(ctx, &s3.ListObjectsInput{
|
||||
out1, err := s3client.ListObjects(ctx, &s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
MaxKeys: maxKeys,
|
||||
})
|
||||
@@ -1330,35 +1403,41 @@ func ListObject_truncated(s *S3Conf) {
|
||||
return err
|
||||
}
|
||||
|
||||
if !out.IsTruncated {
|
||||
return fmt.Errorf("expected output to be truncated")
|
||||
if !out1.IsTruncated {
|
||||
return fmt.Errorf("expected out1put to be truncated")
|
||||
}
|
||||
|
||||
if out.MaxKeys != maxKeys {
|
||||
return fmt.Errorf("expected max-keys to be %v, instead got %v", maxKeys, out.MaxKeys)
|
||||
if out1.MaxKeys != maxKeys {
|
||||
return fmt.Errorf("expected max-keys to be %v, instead got %v", maxKeys, out1.MaxKeys)
|
||||
}
|
||||
|
||||
if !compareObjects([]string{"bar", "baz"}, out.Contents) {
|
||||
if *out1.NextMarker != "baz" {
|
||||
return fmt.Errorf("expected nex-marker to be baz, instead got %v", *out1.NextMarker)
|
||||
}
|
||||
|
||||
if !compareObjects([]string{"bar", "baz"}, out1.Contents) {
|
||||
return fmt.Errorf("unexpected output for list objects with max-keys")
|
||||
}
|
||||
|
||||
//TODO: Add next marker checker after bug-fixing
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err = s3client.ListObjects(ctx, &s3.ListObjectsInput{
|
||||
out2, err := s3client.ListObjects(ctx, &s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Marker: out.NextMarker,
|
||||
Marker: out1.NextMarker,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out.IsTruncated {
|
||||
if out2.IsTruncated {
|
||||
return fmt.Errorf("expected output not to be truncated")
|
||||
}
|
||||
|
||||
if !compareObjects([]string{"foo"}, out.Contents) {
|
||||
if *out2.Marker != *out1.NextMarker {
|
||||
return fmt.Errorf("expected marker to be %v, instead got %v", *out1.NextMarker, *out2.Marker)
|
||||
}
|
||||
|
||||
if !compareObjects([]string{"foo"}, out2.Contents) {
|
||||
return fmt.Errorf("unexpected output for list objects with max-keys")
|
||||
}
|
||||
return nil
|
||||
@@ -1408,7 +1487,93 @@ func ListObjects_max_keys_0(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
//TODO: Add a test case for delimiter after bug-fixing, as delimiter doesn't work as intended
|
||||
func ListObjects_delimiter(s *S3Conf) {
|
||||
testName := "ListObjects_delimiter"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo/bar/baz", "foo/bar/xyzzy", "quux/thud", "asdf"}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.ListObjects(ctx, &s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Delimiter: getPtr("/"),
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *out.Delimiter != "/" {
|
||||
return fmt.Errorf("expected delimiter to be /, instead got %v", *out.Delimiter)
|
||||
}
|
||||
if len(out.Contents) != 1 || *out.Contents[0].Key != "asdf" {
|
||||
return fmt.Errorf("expected result [\"asdf\"], instead got %v", out.Contents)
|
||||
}
|
||||
|
||||
if !comparePrefixes([]string{"foo/", "quux/"}, out.CommonPrefixes) {
|
||||
return fmt.Errorf("expected common prefixes to be %v, instead got %v", []string{"foo/", "quux/"}, out.CommonPrefixes)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func ListObjects_max_keys_none(s *S3Conf) {
|
||||
testName := "ListObjects_max_keys_none"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.ListObjects(ctx, &s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out.MaxKeys != 1000 {
|
||||
return fmt.Errorf("expected max-keys to be 1000, instead got %v", out.MaxKeys)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func ListObjects_marker_not_from_obj_list(s *S3Conf) {
|
||||
testName := "ListObjects_marker_not_from_obj_list"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo", "bar", "baz", "qux", "hello", "xyz"}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.ListObjects(ctx, &s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Marker: getPtr("ceil"),
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, el := range out.Contents {
|
||||
fmt.Println(*el.Key)
|
||||
}
|
||||
|
||||
if !compareObjects([]string{"foo", "qux", "hello", "xyz"}, out.Contents) {
|
||||
return fmt.Errorf("expected output to be %v, instead got %v", []string{"foo", "qux", "hello", "xyz"}, out.Contents)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func DeleteObject_non_existing_object(s *S3Conf) {
|
||||
testName := "DeleteObject_non_existing_object"
|
||||
@@ -1458,6 +1623,37 @@ func DeleteObject_success(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func DeleteObject_success_status_code(s *S3Conf) {
|
||||
testName := "DeleteObject_success_status_code"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
err := putObjects(s3client, []string{obj}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := createSignedReq(http.MethodDelete, s.endpoint, fmt.Sprintf("%v/%v", bucket, obj), s.awsID, s.awsSecret, "s3", s.awsRegion, nil, time.Now())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return fmt.Errorf("expected response status to be %v, instead got %v", http.StatusNoContent, resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func DeleteObjects_empty_input(s *S3Conf) {
|
||||
testName := "DeleteObjects_empty_input"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -1613,6 +1809,103 @@ func CopyObject_non_existing_dst_bucket(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func CopyObject_not_owned_source_bucket(s *S3Conf) {
|
||||
testName := "CopyObject_not_owned_source_bucket"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
srcObj := "my-obj"
|
||||
err := putObjects(s3client, []string{srcObj}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usr := user{
|
||||
access: "admin1",
|
||||
secret: "admin1secret",
|
||||
role: "admin",
|
||||
}
|
||||
|
||||
cfg := *s
|
||||
cfg.awsID = usr.access
|
||||
cfg.awsSecret = usr.secret
|
||||
|
||||
err = createUsers(s, []user{usr})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstBucket := getBucketName()
|
||||
err = setup(&cfg, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: getPtr("obj-1"),
|
||||
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, srcObj)),
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrAccessDenied)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = teardown(s, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func CopyObject_copy_to_itself(s *S3Conf) {
|
||||
testName := "CopyObject_copy_to_itself"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
err := putObjects(s3client, []string{obj}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, obj)),
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidCopyDest)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func CopyObject_to_itself_with_new_metadata(s *S3Conf) {
|
||||
testName := "CopyObject_to_itself_with_new_metadata"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
err := putObjects(s3client, []string{obj}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
CopySource: getPtr(fmt.Sprintf("%v/%v", bucket, obj)),
|
||||
Metadata: map[string]string{
|
||||
"Hello": "World",
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func CopyObject_success(s *S3Conf) {
|
||||
testName := "CopyObject_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -1690,6 +1983,42 @@ func PutObjectTagging_non_existing_object(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func PutObjectTagging_long_tags(s *S3Conf) {
|
||||
testName := "PutObjectTagging_long_tags"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr(genRandString(129)), Value: getPtr("val")}}}
|
||||
err := putObjects(s3client, []string{obj}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
Tagging: &tagging})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidTag)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tagging = types.Tagging{TagSet: []types.Tag{{Key: getPtr("key"), Value: getPtr(genRandString(257))}}}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
Tagging: &tagging})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidTag)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func PutObjectTagging_success(s *S3Conf) {
|
||||
testName := "PutObjectTagging_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -1784,6 +2113,57 @@ func DeleteObjectTagging_non_existing_object(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func DeleteObjectTagging_success_status(s *S3Conf) {
|
||||
testName := "DeleteObjectTagging_success_status"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
err := putObjects(s3client, []string{obj}, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tagging := types.Tagging{
|
||||
TagSet: []types.Tag{
|
||||
{
|
||||
Key: getPtr("Hello"),
|
||||
Value: getPtr("World"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
Tagging: &tagging,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := createSignedReq(http.MethodDelete, s.endpoint, fmt.Sprintf("%v/%v?tagging", bucket, obj), s.awsID, s.awsSecret, "s3", s.awsRegion, nil, time.Now())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return fmt.Errorf("expected response status to be %v, instead got %v", http.StatusNoContent, resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func DeleteObjectTagging_success(s *S3Conf) {
|
||||
testName := "DeleteObjectTagging_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -1836,7 +2216,7 @@ func CreateMultipartUpload_non_existing_bucket(s *S3Conf) {
|
||||
testName := "CreateMultipartUpload_non_existing_bucket"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
bucketName := getBucketName()
|
||||
_, err := CreateMp(s3client, bucketName, "my-obj")
|
||||
_, err := createMp(s3client, bucketName, "my-obj")
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucket)); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1849,7 +2229,7 @@ func CreateMultipartUpload_success(s *S3Conf) {
|
||||
testName := "CreateMultipartUpload_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1927,7 +2307,7 @@ func UploadPart_non_existing_key(s *S3Conf) {
|
||||
testName := "UploadPart_non_existing_key"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1951,7 +2331,7 @@ func UploadPart_success(s *S3Conf) {
|
||||
testName := "UploadPart_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2006,7 +2386,7 @@ func UploadPartCopy_incorrect_uploadId(s *S3Conf) {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = CreateMp(s3client, bucket, obj)
|
||||
_, err = createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2046,7 +2426,7 @@ func UploadPartCopy_incorrect_object_key(s *S3Conf) {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2098,7 +2478,7 @@ func UploadPartCopy_invalid_copy_source(s *S3Conf) {
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2125,7 +2505,7 @@ func UploadPartCopy_non_existing_source_bucket(s *S3Conf) {
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2157,7 +2537,7 @@ func UploadPartCopy_non_existing_source_object_key(s *S3Conf) {
|
||||
return nil
|
||||
}
|
||||
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2201,7 +2581,7 @@ func UploadPartCopy_success(s *S3Conf) {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2269,7 +2649,7 @@ func UploadPartCopy_by_range_invalid_range(s *S3Conf) {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2297,6 +2677,51 @@ func UploadPartCopy_by_range_invalid_range(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func UploadPartCopy_greater_range_than_obj_size(s *S3Conf) {
|
||||
testName := "UploadPartCopy_greater_range_than_obj_size"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj, srcBucket, srcObj := "my-obj", getBucketName(), "src-obj"
|
||||
err := setup(s, srcBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcObjSize := 5 * 1024 * 1024
|
||||
_, _, err = putObjectWithData(int64(srcObjSize), &s3.PutObjectInput{
|
||||
Bucket: &srcBucket,
|
||||
Key: &srcObj,
|
||||
}, s3client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.UploadPartCopy(ctx, &s3.UploadPartCopyInput{
|
||||
Bucket: &bucket,
|
||||
CopySource: getPtr(srcBucket + "/" + srcObj),
|
||||
UploadId: out.UploadId,
|
||||
Key: &obj,
|
||||
CopySourceRange: getPtr(fmt.Sprintf("bytes=0-%v", srcObjSize+50)), // The specified range is greater than the actual object size
|
||||
PartNumber: 1,
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrInvalidRange)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = teardown(s, srcBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func UploadPartCopy_by_range_success(s *S3Conf) {
|
||||
testName := "UploadPartCopy_by_range_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -2314,7 +2739,7 @@ func UploadPartCopy_by_range_success(s *S3Conf) {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2388,7 +2813,7 @@ func ListParts_incorrect_object_key(s *S3Conf) {
|
||||
testName := "ListParts_incorrect_object_key"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2412,7 +2837,7 @@ func ListParts_success(s *S3Conf) {
|
||||
testName := "ListParts_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2499,7 +2924,7 @@ func ListMultipartUploads_max_uploads(s *S3Conf) {
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
uploads := []types.MultipartUpload{}
|
||||
for i := 1; i < 6; i++ {
|
||||
out, err := CreateMp(s3client, bucket, fmt.Sprintf("obj%v", i))
|
||||
out, err := createMp(s3client, bucket, fmt.Sprintf("obj%v", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2551,7 +2976,7 @@ func ListMultipartUploads_incorrect_next_key_marker(s *S3Conf) {
|
||||
testName := "ListMultipartUploads_incorrect_next_key_marker"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
for i := 1; i < 6; i++ {
|
||||
_, err := CreateMp(s3client, bucket, fmt.Sprintf("obj%v", i))
|
||||
_, err := createMp(s3client, bucket, fmt.Sprintf("obj%v", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2579,7 +3004,7 @@ func ListMultipartUploads_ignore_upload_id_marker(s *S3Conf) {
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
uploads := []types.MultipartUpload{}
|
||||
for i := 1; i < 6; i++ {
|
||||
out, err := CreateMp(s3client, bucket, fmt.Sprintf("obj%v", i))
|
||||
out, err := createMp(s3client, bucket, fmt.Sprintf("obj%v", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2606,12 +3031,12 @@ func ListMultipartUploads_success(s *S3Conf) {
|
||||
testName := "ListMultipartUploads_max_uploads"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj1, obj2 := "my-obj-1", "my-obj-2"
|
||||
out1, err := CreateMp(s3client, bucket, obj1)
|
||||
out1, err := createMp(s3client, bucket, obj1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out2, err := CreateMp(s3client, bucket, obj2)
|
||||
out2, err := createMp(s3client, bucket, obj2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2687,7 +3112,7 @@ func AbortMultipartUpload_incorrect_object_key(s *S3Conf) {
|
||||
testName := "AbortMultipartUpload_incorrect_object_key"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2711,7 +3136,7 @@ func AbortMultipartUpload_success(s *S3Conf) {
|
||||
testName := "AbortMultipartUpload_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2744,6 +3169,37 @@ func AbortMultipartUpload_success(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
func AbortMultipartUpload_success_status_code(s *S3Conf) {
|
||||
testName := "AbortMultipartUpload_success_status_code"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := createSignedReq(http.MethodDelete, s.endpoint, fmt.Sprintf("%v/%v?uploadId=%v", bucket, obj, *out.UploadId), s.awsID, s.awsSecret, "s3", s.awsRegion, nil, time.Now())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return fmt.Errorf("expected response status to be %v, instead got %v", http.StatusNoContent, resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func CompletedMultipartUpload_non_existing_bucket(s *S3Conf) {
|
||||
testName := "CompletedMultipartUpload_non_existing_bucket"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -2766,7 +3222,7 @@ func CompleteMultipartUpload_invalid_part_number(s *S3Conf) {
|
||||
testName := "CompleteMultipartUpload_invalid_part_number"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2809,7 +3265,7 @@ func CompleteMultipartUpload_invalid_ETag(s *S3Conf) {
|
||||
testName := "CompleteMultipartUpload_invalid_ETag"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2852,7 +3308,7 @@ func CompleteMultipartUpload_success(s *S3Conf) {
|
||||
testName := "CompleteMultipartUpload_success"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
out, err := CreateMp(s3client, bucket, obj)
|
||||
out, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3319,73 +3775,50 @@ func GetBucketAcl_success(s *S3Conf) {
|
||||
})
|
||||
}
|
||||
|
||||
type prefResult struct {
|
||||
elapsed time.Duration
|
||||
size int64
|
||||
err error
|
||||
// Posix related tests
|
||||
func PutObject_overwrite_dir_obj(s *S3Conf) {
|
||||
testName := "PutObject_overwrite_dir_obj"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo/", "foo"}, bucket)
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestPerformance(s *S3Conf, upload, download bool, files int, objectSize int64, bucket, prefix string) error {
|
||||
var sg sync.WaitGroup
|
||||
results := make([]prefResult, files)
|
||||
start := time.Now()
|
||||
if upload {
|
||||
if objectSize == 0 {
|
||||
return fmt.Errorf("must specify object size for upload")
|
||||
func PutObject_overwrite_file_obj(s *S3Conf) {
|
||||
testName := "PutObject_overwrite_file_obj"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
err := putObjects(s3client, []string{"foo", "foo/"}, bucket)
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrObjectParentIsFile)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if objectSize > (int64(10000) * s.PartSize) {
|
||||
return fmt.Errorf("object size can not exceed 10000 * chunksize")
|
||||
}
|
||||
|
||||
runF("performance test: upload/download objects")
|
||||
|
||||
for i := 0; i < files; i++ {
|
||||
sg.Add(1)
|
||||
go func(i int) {
|
||||
var r io.Reader = NewDataReader(int(objectSize), int(s.PartSize))
|
||||
|
||||
start := time.Now()
|
||||
err := s.UploadData(r, bucket, fmt.Sprintf("%v%v", prefix, i))
|
||||
results[i].elapsed = time.Since(start)
|
||||
results[i].err = err
|
||||
results[i].size = objectSize
|
||||
sg.Done()
|
||||
}(i)
|
||||
}
|
||||
}
|
||||
if download {
|
||||
for i := 0; i < files; i++ {
|
||||
sg.Add(1)
|
||||
go func(i int) {
|
||||
nw := NewNullWriter()
|
||||
start := time.Now()
|
||||
n, err := s.DownloadData(nw, bucket, fmt.Sprintf("%v%v", prefix, i))
|
||||
results[i].elapsed = time.Since(start)
|
||||
results[i].err = err
|
||||
results[i].size = n
|
||||
sg.Done()
|
||||
}(i)
|
||||
}
|
||||
}
|
||||
sg.Wait()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
var tot int64
|
||||
for i, res := range results {
|
||||
if res.err != nil {
|
||||
failF("%v: %v\n", i, res.err)
|
||||
break
|
||||
}
|
||||
tot += res.size
|
||||
fmt.Printf("%v: %v in %v (%v MB/s)\n",
|
||||
i, res.size, res.elapsed,
|
||||
int(math.Ceil(float64(res.size)/res.elapsed.Seconds())/1048576))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
passF("run perf: %v in %v (%v MB/s)\n",
|
||||
tot, elapsed, int(math.Ceil(float64(tot)/elapsed.Seconds())/1048576))
|
||||
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func PutObject_dir_obj_with_data(s *S3Conf) {
|
||||
testName := "PutObject_dir_obj_with_data"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
_, _, err := putObjectWithData(int64(20), &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: getPtr("obj/"),
|
||||
}, s3client)
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrDirectoryObjectContainsData)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func CreateMultipartUpload_dir_obj(s *S3Conf) {
|
||||
testName := "CreateMultipartUpload_dir_obj"
|
||||
actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
_, err := createMp(s3client, bucket, "obj/")
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrDirectoryObjectContainsData)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
rnd "math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -133,22 +134,9 @@ type authConfig struct {
|
||||
|
||||
func authHandler(s *S3Conf, cfg *authConfig, handler func(req *http.Request) error) {
|
||||
runF(cfg.testName)
|
||||
req, err := http.NewRequest(cfg.method, fmt.Sprintf("%v/%v", s.endpoint, cfg.path), bytes.NewReader(cfg.body))
|
||||
req, err := createSignedReq(cfg.method, s.endpoint, cfg.path, s.awsID, s.awsSecret, cfg.service, s.awsRegion, cfg.body, cfg.date)
|
||||
if err != nil {
|
||||
failF("%v: failed to send the request: %v", cfg.testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: s.awsID, SecretAccessKey: s.awsSecret}, req, hexPayload, cfg.service, s.awsRegion, cfg.date)
|
||||
if signErr != nil {
|
||||
failF("%v: failed to sign the request: %v", cfg.testName, err.Error())
|
||||
failF("%v: %v", cfg.testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -160,6 +148,27 @@ func authHandler(s *S3Conf, cfg *authConfig, handler func(req *http.Request) err
|
||||
passF(cfg.testName)
|
||||
}
|
||||
|
||||
func createSignedReq(method, endpoint, path, access, secret, service, region string, body []byte, date time.Time) (*http.Request, error) {
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("%v/%v", endpoint, path), bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256(body)
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: access, SecretAccessKey: secret}, req, hexPayload, service, region, date)
|
||||
if signErr != nil {
|
||||
return nil, fmt.Errorf("failed to sign the request: %w", signErr)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func checkAuthErr(resp *http.Response, apiErr s3err.APIError) error {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
@@ -241,7 +250,7 @@ func putObjectWithData(lgth int64, input *s3.PutObjectInput, client *s3.Client)
|
||||
return
|
||||
}
|
||||
|
||||
func CreateMp(s3client *s3.Client, bucket, key string) (*s3.CreateMultipartUploadOutput, error) {
|
||||
func createMp(s3client *s3.Client, bucket, key string) (*s3.CreateMultipartUploadOutput, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: &bucket,
|
||||
@@ -401,6 +410,26 @@ func compareObjects(list1 []string, list2 []types.Object) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func comparePrefixes(list1 []string, list2 []types.CommonPrefix) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
|
||||
elementMap := make(map[string]bool)
|
||||
|
||||
for _, elem := range list1 {
|
||||
elementMap[elem] = true
|
||||
}
|
||||
|
||||
for _, elem := range list2 {
|
||||
if _, found := elementMap[*elem.Prefix]; !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareDelObjects(list1 []string, list2 []types.DeletedObject) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
@@ -506,3 +535,15 @@ func changeBucketsOwner(s *S3Conf, buckets []string, owner string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
|
||||
func genRandString(length int) string {
|
||||
source := rnd.NewSource(time.Now().UnixNano())
|
||||
random := rnd.New(source)
|
||||
result := make([]byte, length)
|
||||
for i := range result {
|
||||
result[i] = charset[random.Intn(len(charset))]
|
||||
}
|
||||
return string(result)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ rm -rf /tmp/covdata
|
||||
mkdir /tmp/covdata
|
||||
|
||||
# run server in background
|
||||
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass posix /tmp/gw &
|
||||
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
|
||||
GW_PID=$!
|
||||
|
||||
# wait a second for server to start up
|
||||
|
||||
43
s3api/admin-router.go
Normal file
43
s3api/admin-router.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
)
|
||||
|
||||
type S3AdminRouter struct{}
|
||||
|
||||
func (ar *S3AdminRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService) {
|
||||
controller := controllers.NewAdminController(iam, be)
|
||||
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user", controller.CreateUser)
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user", controller.DeleteUser)
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users", controller.ListUsers)
|
||||
|
||||
// ChangeBucketOwner admin api
|
||||
app.Patch("/change-bucket-owner", controller.ChangeBucketOwner)
|
||||
|
||||
// ListBucketsAndOwners admin api
|
||||
app.Patch("/list-buckets", controller.ListBuckets)
|
||||
}
|
||||
72
s3api/admin-server.go
Normal file
72
s3api/admin-server.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
)
|
||||
|
||||
type S3AdminServer struct {
|
||||
app *fiber.App
|
||||
backend backend.Backend
|
||||
router *S3AdminRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
}
|
||||
|
||||
func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, opts ...AdminOpt) *S3AdminServer {
|
||||
server := &S3AdminServer{
|
||||
app: app,
|
||||
backend: be,
|
||||
router: new(S3AdminRouter),
|
||||
port: port,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(server)
|
||||
}
|
||||
|
||||
// Logging middlewares
|
||||
app.Use(logger.New())
|
||||
app.Use(middlewares.DecodeURL(nil))
|
||||
|
||||
// Authentication middlewares
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, nil, region, false))
|
||||
app.Use(middlewares.VerifyMD5Body(nil))
|
||||
app.Use(middlewares.AclParser(be, nil))
|
||||
|
||||
server.router.Init(app, be, iam)
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
type AdminOpt func(s *S3AdminServer)
|
||||
|
||||
func WithAdminSrvTLS(cert tls.Certificate) AdminOpt {
|
||||
return func(s *S3AdminServer) { s.cert = &cert }
|
||||
}
|
||||
|
||||
func (sa *S3AdminServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
@@ -15,6 +15,7 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
@@ -32,19 +33,21 @@ func NewAdminController(iam auth.IAMService, be backend.Backend) AdminController
|
||||
}
|
||||
|
||||
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
|
||||
access, secret, role := ctx.Query("access"), ctx.Query("secret"), ctx.Query("role")
|
||||
requesterRole := ctx.Locals("role").(string)
|
||||
|
||||
if requesterRole != "admin" {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
if role != "user" && role != "admin" {
|
||||
var usr auth.Account
|
||||
err := json.Unmarshal(ctx.Body(), &usr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse request body: %w", err)
|
||||
}
|
||||
|
||||
if usr.Role != "user" && usr.Role != "admin" {
|
||||
return fmt.Errorf("invalid parameters: user role have to be one of the following: 'user', 'admin'")
|
||||
}
|
||||
|
||||
user := auth.Account{Secret: secret, Role: role}
|
||||
|
||||
err := c.iam.CreateAccount(access, user)
|
||||
err = c.iam.CreateAccount(usr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create a user: %w", err)
|
||||
}
|
||||
@@ -54,8 +57,8 @@ func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
|
||||
|
||||
func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
|
||||
access := ctx.Query("access")
|
||||
requesterRole := ctx.Locals("role").(string)
|
||||
if requesterRole != "admin" {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
|
||||
@@ -68,8 +71,8 @@ func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
func (c AdminController) ListUsers(ctx *fiber.Ctx) error {
|
||||
role := ctx.Locals("role").(string)
|
||||
if role != "admin" {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
accs, err := c.iam.ListUserAccounts()
|
||||
@@ -81,8 +84,8 @@ func (c AdminController) ListUsers(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
|
||||
role := ctx.Locals("role").(string)
|
||||
if role != "admin" {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
owner := ctx.Query("owner")
|
||||
@@ -103,3 +106,17 @@ func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
|
||||
|
||||
return ctx.Status(201).SendString("Bucket owner has been updated successfully")
|
||||
}
|
||||
|
||||
func (c AdminController) ListBuckets(ctx *fiber.Ctx) error {
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
if acct.Role != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
|
||||
buckets, err := c.be.ListBucketsAndOwners(ctx.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.JSON(buckets)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,9 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -23,6 +25,7 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
func TestAdminController_CreateUser(t *testing.T) {
|
||||
@@ -32,7 +35,7 @@ func TestAdminController_CreateUser(t *testing.T) {
|
||||
|
||||
adminController := AdminController{
|
||||
iam: &IAMServiceMock{
|
||||
CreateAccountFunc: func(access string, account auth.Account) error {
|
||||
CreateAccountFunc: func(account auth.Account) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -41,7 +44,7 @@ func TestAdminController_CreateUser(t *testing.T) {
|
||||
app := fiber.New()
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -50,10 +53,22 @@ func TestAdminController_CreateUser(t *testing.T) {
|
||||
appErr := fiber.New()
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
usr := auth.Account{
|
||||
Access: "access",
|
||||
Secret: "secret",
|
||||
Role: "invalid role",
|
||||
}
|
||||
|
||||
user, _ := json.Marshal(&usr)
|
||||
|
||||
usr.Role = "admin"
|
||||
|
||||
succUsr, _ := json.Marshal(&usr)
|
||||
|
||||
appErr.Patch("/create-user", adminController.CreateUser)
|
||||
|
||||
tests := []struct {
|
||||
@@ -67,7 +82,7 @@ func TestAdminController_CreateUser(t *testing.T) {
|
||||
name: "Admin-create-user-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=user", nil),
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user", bytes.NewBuffer(succUsr)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
@@ -76,7 +91,7 @@ func TestAdminController_CreateUser(t *testing.T) {
|
||||
name: "Admin-create-user-invalid-user-role",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=invalid", nil),
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user", bytes.NewBuffer(user)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
@@ -85,7 +100,7 @@ func TestAdminController_CreateUser(t *testing.T) {
|
||||
name: "Admin-create-user-invalid-requester-role",
|
||||
app: appErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=admin", nil),
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
@@ -120,7 +135,7 @@ func TestAdminController_DeleteUser(t *testing.T) {
|
||||
app := fiber.New()
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -129,7 +144,7 @@ func TestAdminController_DeleteUser(t *testing.T) {
|
||||
appErr := fiber.New()
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -198,7 +213,7 @@ func TestAdminController_ListUsers(t *testing.T) {
|
||||
appErr := fiber.New()
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -207,7 +222,7 @@ func TestAdminController_ListUsers(t *testing.T) {
|
||||
appRoleErr := fiber.New()
|
||||
|
||||
appRoleErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -216,7 +231,7 @@ func TestAdminController_ListUsers(t *testing.T) {
|
||||
appSucc := fiber.New()
|
||||
|
||||
appSucc.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -306,7 +321,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
app := fiber.New()
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -315,7 +330,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
appRoleErr := fiber.New()
|
||||
|
||||
appRoleErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -324,7 +339,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
appIamErr := fiber.New()
|
||||
|
||||
appIamErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -333,7 +348,7 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
appIamNoSuchUser := fiber.New()
|
||||
|
||||
appIamNoSuchUser.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
@@ -395,3 +410,72 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminController_ListBuckets(t *testing.T) {
|
||||
type args struct {
|
||||
req *http.Request
|
||||
}
|
||||
adminController := AdminController{
|
||||
be: &BackendMock{
|
||||
ListBucketsAndOwnersFunc: func(contextMoqParam context.Context) ([]s3response.Bucket, error) {
|
||||
return []s3response.Bucket{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "admin1", Secret: "secret", Role: "admin"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
app.Patch("/list-buckets", adminController.ListBuckets)
|
||||
|
||||
appRoleErr := fiber.New()
|
||||
|
||||
appRoleErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "user1", Secret: "secret", Role: "user"})
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appRoleErr.Patch("/list-buckets", adminController.ListBuckets)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
args args
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "List-buckets-incorrect-role",
|
||||
app: appRoleErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/list-buckets", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "List-buckets-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/list-buckets", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AdminController.ListBuckets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("AdminController.ListBuckets() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,6 +46,9 @@ var _ backend.Backend = &BackendMock{}
|
||||
// DeleteObjectFunc: func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error {
|
||||
// panic("mock out the DeleteObject method")
|
||||
// },
|
||||
// DeleteObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) error {
|
||||
// panic("mock out the DeleteObjectTagging method")
|
||||
// },
|
||||
// DeleteObjectsFunc: func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
// panic("mock out the DeleteObjects method")
|
||||
// },
|
||||
@@ -61,8 +64,8 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetObjectAttributesFunc: func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
// panic("mock out the GetObjectAttributes method")
|
||||
// },
|
||||
// GetTagsFunc: func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
// panic("mock out the GetTags method")
|
||||
// GetObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
// panic("mock out the GetObjectTagging method")
|
||||
// },
|
||||
// HeadBucketFunc: func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
// panic("mock out the HeadBucket method")
|
||||
@@ -70,9 +73,12 @@ var _ backend.Backend = &BackendMock{}
|
||||
// HeadObjectFunc: func(contextMoqParam context.Context, headObjectInput *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
// panic("mock out the HeadObject method")
|
||||
// },
|
||||
// ListBucketsFunc: func(contextMoqParam context.Context, owner string, isRoot bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
// ListBucketsFunc: func(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
// panic("mock out the ListBuckets method")
|
||||
// },
|
||||
// ListBucketsAndOwnersFunc: func(contextMoqParam context.Context) ([]s3response.Bucket, error) {
|
||||
// panic("mock out the ListBucketsAndOwners method")
|
||||
// },
|
||||
// ListMultipartUploadsFunc: func(contextMoqParam context.Context, listMultipartUploadsInput *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
// panic("mock out the ListMultipartUploads method")
|
||||
// },
|
||||
@@ -94,8 +100,8 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutObjectAclFunc: func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error {
|
||||
// panic("mock out the PutObjectAcl method")
|
||||
// },
|
||||
// RemoveTagsFunc: func(contextMoqParam context.Context, bucket string, object string) error {
|
||||
// panic("mock out the RemoveTags method")
|
||||
// PutObjectTaggingFunc: func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
// panic("mock out the PutObjectTagging method")
|
||||
// },
|
||||
// RestoreObjectFunc: func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error {
|
||||
// panic("mock out the RestoreObject method")
|
||||
@@ -103,9 +109,6 @@ var _ backend.Backend = &BackendMock{}
|
||||
// SelectObjectContentFunc: func(contextMoqParam context.Context, selectObjectContentInput *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error) {
|
||||
// panic("mock out the SelectObjectContent method")
|
||||
// },
|
||||
// SetTagsFunc: func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
// panic("mock out the SetTags method")
|
||||
// },
|
||||
// ShutdownFunc: func() {
|
||||
// panic("mock out the Shutdown method")
|
||||
// },
|
||||
@@ -149,6 +152,9 @@ type BackendMock struct {
|
||||
// DeleteObjectFunc mocks the DeleteObject method.
|
||||
DeleteObjectFunc func(contextMoqParam context.Context, deleteObjectInput *s3.DeleteObjectInput) error
|
||||
|
||||
// DeleteObjectTaggingFunc mocks the DeleteObjectTagging method.
|
||||
DeleteObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) error
|
||||
|
||||
// DeleteObjectsFunc mocks the DeleteObjects method.
|
||||
DeleteObjectsFunc func(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error)
|
||||
|
||||
@@ -164,8 +170,8 @@ type BackendMock struct {
|
||||
// GetObjectAttributesFunc mocks the GetObjectAttributes method.
|
||||
GetObjectAttributesFunc func(contextMoqParam context.Context, getObjectAttributesInput *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
|
||||
|
||||
// GetTagsFunc mocks the GetTags method.
|
||||
GetTagsFunc func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error)
|
||||
// GetObjectTaggingFunc mocks the GetObjectTagging method.
|
||||
GetObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string) (map[string]string, error)
|
||||
|
||||
// HeadBucketFunc mocks the HeadBucket method.
|
||||
HeadBucketFunc func(contextMoqParam context.Context, headBucketInput *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
@@ -174,7 +180,10 @@ type BackendMock struct {
|
||||
HeadObjectFunc func(contextMoqParam context.Context, headObjectInput *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
|
||||
// ListBucketsFunc mocks the ListBuckets method.
|
||||
ListBucketsFunc func(contextMoqParam context.Context, owner string, isRoot bool) (s3response.ListAllMyBucketsResult, error)
|
||||
ListBucketsFunc func(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
|
||||
|
||||
// ListBucketsAndOwnersFunc mocks the ListBucketsAndOwners method.
|
||||
ListBucketsAndOwnersFunc func(contextMoqParam context.Context) ([]s3response.Bucket, error)
|
||||
|
||||
// ListMultipartUploadsFunc mocks the ListMultipartUploads method.
|
||||
ListMultipartUploadsFunc func(contextMoqParam context.Context, listMultipartUploadsInput *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
|
||||
@@ -197,8 +206,8 @@ type BackendMock struct {
|
||||
// PutObjectAclFunc mocks the PutObjectAcl method.
|
||||
PutObjectAclFunc func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error
|
||||
|
||||
// RemoveTagsFunc mocks the RemoveTags method.
|
||||
RemoveTagsFunc func(contextMoqParam context.Context, bucket string, object string) error
|
||||
// PutObjectTaggingFunc mocks the PutObjectTagging method.
|
||||
PutObjectTaggingFunc func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error
|
||||
|
||||
// RestoreObjectFunc mocks the RestoreObject method.
|
||||
RestoreObjectFunc func(contextMoqParam context.Context, restoreObjectInput *s3.RestoreObjectInput) error
|
||||
@@ -206,9 +215,6 @@ type BackendMock struct {
|
||||
// SelectObjectContentFunc mocks the SelectObjectContent method.
|
||||
SelectObjectContentFunc func(contextMoqParam context.Context, selectObjectContentInput *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error)
|
||||
|
||||
// SetTagsFunc mocks the SetTags method.
|
||||
SetTagsFunc func(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error
|
||||
|
||||
// ShutdownFunc mocks the Shutdown method.
|
||||
ShutdownFunc func()
|
||||
|
||||
@@ -281,6 +287,15 @@ type BackendMock struct {
|
||||
// DeleteObjectInput is the deleteObjectInput argument value.
|
||||
DeleteObjectInput *s3.DeleteObjectInput
|
||||
}
|
||||
// DeleteObjectTagging holds details about calls to the DeleteObjectTagging method.
|
||||
DeleteObjectTagging []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
}
|
||||
// DeleteObjects holds details about calls to the DeleteObjects method.
|
||||
DeleteObjects []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
@@ -318,8 +333,8 @@ type BackendMock struct {
|
||||
// GetObjectAttributesInput is the getObjectAttributesInput argument value.
|
||||
GetObjectAttributesInput *s3.GetObjectAttributesInput
|
||||
}
|
||||
// GetTags holds details about calls to the GetTags method.
|
||||
GetTags []struct {
|
||||
// GetObjectTagging holds details about calls to the GetObjectTagging method.
|
||||
GetObjectTagging []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
@@ -347,8 +362,13 @@ type BackendMock struct {
|
||||
ContextMoqParam context.Context
|
||||
// Owner is the owner argument value.
|
||||
Owner string
|
||||
// IsRoot is the isRoot argument value.
|
||||
IsRoot bool
|
||||
// IsAdmin is the isAdmin argument value.
|
||||
IsAdmin bool
|
||||
}
|
||||
// ListBucketsAndOwners holds details about calls to the ListBucketsAndOwners method.
|
||||
ListBucketsAndOwners []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
}
|
||||
// ListMultipartUploads holds details about calls to the ListMultipartUploads method.
|
||||
ListMultipartUploads []struct {
|
||||
@@ -401,14 +421,16 @@ type BackendMock struct {
|
||||
// PutObjectAclInput is the putObjectAclInput argument value.
|
||||
PutObjectAclInput *s3.PutObjectAclInput
|
||||
}
|
||||
// RemoveTags holds details about calls to the RemoveTags method.
|
||||
RemoveTags []struct {
|
||||
// PutObjectTagging holds details about calls to the PutObjectTagging method.
|
||||
PutObjectTagging []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// Tags is the tags argument value.
|
||||
Tags map[string]string
|
||||
}
|
||||
// RestoreObject holds details about calls to the RestoreObject method.
|
||||
RestoreObject []struct {
|
||||
@@ -424,17 +446,6 @@ type BackendMock struct {
|
||||
// SelectObjectContentInput is the selectObjectContentInput argument value.
|
||||
SelectObjectContentInput *s3.SelectObjectContentInput
|
||||
}
|
||||
// SetTags holds details about calls to the SetTags method.
|
||||
SetTags []struct {
|
||||
// ContextMoqParam is the contextMoqParam argument value.
|
||||
ContextMoqParam context.Context
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// Tags is the tags argument value.
|
||||
Tags map[string]string
|
||||
}
|
||||
// Shutdown holds details about calls to the Shutdown method.
|
||||
Shutdown []struct {
|
||||
}
|
||||
@@ -464,15 +475,17 @@ type BackendMock struct {
|
||||
lockCreateMultipartUpload sync.RWMutex
|
||||
lockDeleteBucket sync.RWMutex
|
||||
lockDeleteObject sync.RWMutex
|
||||
lockDeleteObjectTagging sync.RWMutex
|
||||
lockDeleteObjects sync.RWMutex
|
||||
lockGetBucketAcl sync.RWMutex
|
||||
lockGetObject sync.RWMutex
|
||||
lockGetObjectAcl sync.RWMutex
|
||||
lockGetObjectAttributes sync.RWMutex
|
||||
lockGetTags sync.RWMutex
|
||||
lockGetObjectTagging sync.RWMutex
|
||||
lockHeadBucket sync.RWMutex
|
||||
lockHeadObject sync.RWMutex
|
||||
lockListBuckets sync.RWMutex
|
||||
lockListBucketsAndOwners sync.RWMutex
|
||||
lockListMultipartUploads sync.RWMutex
|
||||
lockListObjects sync.RWMutex
|
||||
lockListObjectsV2 sync.RWMutex
|
||||
@@ -480,10 +493,9 @@ type BackendMock struct {
|
||||
lockPutBucketAcl sync.RWMutex
|
||||
lockPutObject sync.RWMutex
|
||||
lockPutObjectAcl sync.RWMutex
|
||||
lockRemoveTags sync.RWMutex
|
||||
lockPutObjectTagging sync.RWMutex
|
||||
lockRestoreObject sync.RWMutex
|
||||
lockSelectObjectContent sync.RWMutex
|
||||
lockSetTags sync.RWMutex
|
||||
lockShutdown sync.RWMutex
|
||||
lockString sync.RWMutex
|
||||
lockUploadPart sync.RWMutex
|
||||
@@ -782,6 +794,46 @@ func (mock *BackendMock) DeleteObjectCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteObjectTagging calls DeleteObjectTaggingFunc.
|
||||
func (mock *BackendMock) DeleteObjectTagging(contextMoqParam context.Context, bucket string, object string) error {
|
||||
if mock.DeleteObjectTaggingFunc == nil {
|
||||
panic("BackendMock.DeleteObjectTaggingFunc: method is nil but Backend.DeleteObjectTagging was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
mock.lockDeleteObjectTagging.Lock()
|
||||
mock.calls.DeleteObjectTagging = append(mock.calls.DeleteObjectTagging, callInfo)
|
||||
mock.lockDeleteObjectTagging.Unlock()
|
||||
return mock.DeleteObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
}
|
||||
|
||||
// DeleteObjectTaggingCalls gets all the calls that were made to DeleteObjectTagging.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.DeleteObjectTaggingCalls())
|
||||
func (mock *BackendMock) DeleteObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
mock.lockDeleteObjectTagging.RLock()
|
||||
calls = mock.calls.DeleteObjectTagging
|
||||
mock.lockDeleteObjectTagging.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteObjects calls DeleteObjectsFunc.
|
||||
func (mock *BackendMock) DeleteObjects(contextMoqParam context.Context, deleteObjectsInput *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
if mock.DeleteObjectsFunc == nil {
|
||||
@@ -966,10 +1018,10 @@ func (mock *BackendMock) GetObjectAttributesCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetTags calls GetTagsFunc.
|
||||
func (mock *BackendMock) GetTags(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
if mock.GetTagsFunc == nil {
|
||||
panic("BackendMock.GetTagsFunc: method is nil but Backend.GetTags was just called")
|
||||
// GetObjectTagging calls GetObjectTaggingFunc.
|
||||
func (mock *BackendMock) GetObjectTagging(contextMoqParam context.Context, bucket string, object string) (map[string]string, error) {
|
||||
if mock.GetObjectTaggingFunc == nil {
|
||||
panic("BackendMock.GetObjectTaggingFunc: method is nil but Backend.GetObjectTagging was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
@@ -980,17 +1032,17 @@ func (mock *BackendMock) GetTags(contextMoqParam context.Context, bucket string,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
mock.lockGetTags.Lock()
|
||||
mock.calls.GetTags = append(mock.calls.GetTags, callInfo)
|
||||
mock.lockGetTags.Unlock()
|
||||
return mock.GetTagsFunc(contextMoqParam, bucket, object)
|
||||
mock.lockGetObjectTagging.Lock()
|
||||
mock.calls.GetObjectTagging = append(mock.calls.GetObjectTagging, callInfo)
|
||||
mock.lockGetObjectTagging.Unlock()
|
||||
return mock.GetObjectTaggingFunc(contextMoqParam, bucket, object)
|
||||
}
|
||||
|
||||
// GetTagsCalls gets all the calls that were made to GetTags.
|
||||
// GetObjectTaggingCalls gets all the calls that were made to GetObjectTagging.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.GetTagsCalls())
|
||||
func (mock *BackendMock) GetTagsCalls() []struct {
|
||||
// len(mockedBackend.GetObjectTaggingCalls())
|
||||
func (mock *BackendMock) GetObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
@@ -1000,9 +1052,9 @@ func (mock *BackendMock) GetTagsCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
mock.lockGetTags.RLock()
|
||||
calls = mock.calls.GetTags
|
||||
mock.lockGetTags.RUnlock()
|
||||
mock.lockGetObjectTagging.RLock()
|
||||
calls = mock.calls.GetObjectTagging
|
||||
mock.lockGetObjectTagging.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
@@ -1079,23 +1131,23 @@ func (mock *BackendMock) HeadObjectCalls() []struct {
|
||||
}
|
||||
|
||||
// ListBuckets calls ListBucketsFunc.
|
||||
func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, owner string, isRoot bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
if mock.ListBucketsFunc == nil {
|
||||
panic("BackendMock.ListBucketsFunc: method is nil but Backend.ListBuckets was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Owner string
|
||||
IsRoot bool
|
||||
IsAdmin bool
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Owner: owner,
|
||||
IsRoot: isRoot,
|
||||
IsAdmin: isAdmin,
|
||||
}
|
||||
mock.lockListBuckets.Lock()
|
||||
mock.calls.ListBuckets = append(mock.calls.ListBuckets, callInfo)
|
||||
mock.lockListBuckets.Unlock()
|
||||
return mock.ListBucketsFunc(contextMoqParam, owner, isRoot)
|
||||
return mock.ListBucketsFunc(contextMoqParam, owner, isAdmin)
|
||||
}
|
||||
|
||||
// ListBucketsCalls gets all the calls that were made to ListBuckets.
|
||||
@@ -1105,12 +1157,12 @@ func (mock *BackendMock) ListBuckets(contextMoqParam context.Context, owner stri
|
||||
func (mock *BackendMock) ListBucketsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Owner string
|
||||
IsRoot bool
|
||||
IsAdmin bool
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Owner string
|
||||
IsRoot bool
|
||||
IsAdmin bool
|
||||
}
|
||||
mock.lockListBuckets.RLock()
|
||||
calls = mock.calls.ListBuckets
|
||||
@@ -1118,6 +1170,38 @@ func (mock *BackendMock) ListBucketsCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// ListBucketsAndOwners calls ListBucketsAndOwnersFunc.
|
||||
func (mock *BackendMock) ListBucketsAndOwners(contextMoqParam context.Context) ([]s3response.Bucket, error) {
|
||||
if mock.ListBucketsAndOwnersFunc == nil {
|
||||
panic("BackendMock.ListBucketsAndOwnersFunc: method is nil but Backend.ListBucketsAndOwners was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
}
|
||||
mock.lockListBucketsAndOwners.Lock()
|
||||
mock.calls.ListBucketsAndOwners = append(mock.calls.ListBucketsAndOwners, callInfo)
|
||||
mock.lockListBucketsAndOwners.Unlock()
|
||||
return mock.ListBucketsAndOwnersFunc(contextMoqParam)
|
||||
}
|
||||
|
||||
// ListBucketsAndOwnersCalls gets all the calls that were made to ListBucketsAndOwners.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.ListBucketsAndOwnersCalls())
|
||||
func (mock *BackendMock) ListBucketsAndOwnersCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
}
|
||||
mock.lockListBucketsAndOwners.RLock()
|
||||
calls = mock.calls.ListBucketsAndOwners
|
||||
mock.lockListBucketsAndOwners.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// ListMultipartUploads calls ListMultipartUploadsFunc.
|
||||
func (mock *BackendMock) ListMultipartUploads(contextMoqParam context.Context, listMultipartUploadsInput *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
if mock.ListMultipartUploadsFunc == nil {
|
||||
@@ -1374,43 +1458,47 @@ func (mock *BackendMock) PutObjectAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// RemoveTags calls RemoveTagsFunc.
|
||||
func (mock *BackendMock) RemoveTags(contextMoqParam context.Context, bucket string, object string) error {
|
||||
if mock.RemoveTagsFunc == nil {
|
||||
panic("BackendMock.RemoveTagsFunc: method is nil but Backend.RemoveTags was just called")
|
||||
// PutObjectTagging calls PutObjectTaggingFunc.
|
||||
func (mock *BackendMock) PutObjectTagging(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
if mock.PutObjectTaggingFunc == nil {
|
||||
panic("BackendMock.PutObjectTaggingFunc: method is nil but Backend.PutObjectTagging was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
Tags map[string]string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Tags: tags,
|
||||
}
|
||||
mock.lockRemoveTags.Lock()
|
||||
mock.calls.RemoveTags = append(mock.calls.RemoveTags, callInfo)
|
||||
mock.lockRemoveTags.Unlock()
|
||||
return mock.RemoveTagsFunc(contextMoqParam, bucket, object)
|
||||
mock.lockPutObjectTagging.Lock()
|
||||
mock.calls.PutObjectTagging = append(mock.calls.PutObjectTagging, callInfo)
|
||||
mock.lockPutObjectTagging.Unlock()
|
||||
return mock.PutObjectTaggingFunc(contextMoqParam, bucket, object, tags)
|
||||
}
|
||||
|
||||
// RemoveTagsCalls gets all the calls that were made to RemoveTags.
|
||||
// PutObjectTaggingCalls gets all the calls that were made to PutObjectTagging.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.RemoveTagsCalls())
|
||||
func (mock *BackendMock) RemoveTagsCalls() []struct {
|
||||
// len(mockedBackend.PutObjectTaggingCalls())
|
||||
func (mock *BackendMock) PutObjectTaggingCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
Tags map[string]string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
Tags map[string]string
|
||||
}
|
||||
mock.lockRemoveTags.RLock()
|
||||
calls = mock.calls.RemoveTags
|
||||
mock.lockRemoveTags.RUnlock()
|
||||
mock.lockPutObjectTagging.RLock()
|
||||
calls = mock.calls.PutObjectTagging
|
||||
mock.lockPutObjectTagging.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
@@ -1486,50 +1574,6 @@ func (mock *BackendMock) SelectObjectContentCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// SetTags calls SetTagsFunc.
|
||||
func (mock *BackendMock) SetTags(contextMoqParam context.Context, bucket string, object string, tags map[string]string) error {
|
||||
if mock.SetTagsFunc == nil {
|
||||
panic("BackendMock.SetTagsFunc: method is nil but Backend.SetTags was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
Tags map[string]string
|
||||
}{
|
||||
ContextMoqParam: contextMoqParam,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Tags: tags,
|
||||
}
|
||||
mock.lockSetTags.Lock()
|
||||
mock.calls.SetTags = append(mock.calls.SetTags, callInfo)
|
||||
mock.lockSetTags.Unlock()
|
||||
return mock.SetTagsFunc(contextMoqParam, bucket, object, tags)
|
||||
}
|
||||
|
||||
// SetTagsCalls gets all the calls that were made to SetTags.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.SetTagsCalls())
|
||||
func (mock *BackendMock) SetTagsCalls() []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
Tags map[string]string
|
||||
} {
|
||||
var calls []struct {
|
||||
ContextMoqParam context.Context
|
||||
Bucket string
|
||||
Object string
|
||||
Tags map[string]string
|
||||
}
|
||||
mock.lockSetTags.RLock()
|
||||
calls = mock.calls.SetTags
|
||||
mock.lockSetTags.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// Shutdown calls ShutdownFunc.
|
||||
func (mock *BackendMock) Shutdown() {
|
||||
if mock.ShutdownFunc == nil {
|
||||
|
||||
@@ -53,8 +53,8 @@ func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs
|
||||
}
|
||||
|
||||
func (c S3ApiController) ListBuckets(ctx *fiber.Ctx) error {
|
||||
access, role := ctx.Locals("access").(string), ctx.Locals("role").(string)
|
||||
res, err := c.be.ListBuckets(ctx.Context(), access, role == "admin")
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
res, err := c.be.ListBuckets(ctx.Context(), acct.Access, acct.Role == "admin")
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListBucket"})
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
maxParts := ctx.QueryInt("max-parts", 0)
|
||||
partNumberMarker := ctx.Query("part-number-marker")
|
||||
acceptRange := ctx.Get("Range")
|
||||
access := ctx.Locals("access").(string)
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
if keyEnd != "" {
|
||||
@@ -74,11 +74,11 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("tagging") {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetObjectTagging", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
tags, err := c.be.GetTags(ctx.Context(), bucket, key)
|
||||
tags, err := c.be.GetObjectTagging(ctx.Context(), bucket, key)
|
||||
if err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetObjectTagging", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
@@ -102,7 +102,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListParts", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ_ACP", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ_ACP", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetObjectAcl", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
res, err := c.be.GetObjectAcl(ctx.Context(), &s3.GetObjectAclInput{
|
||||
@@ -128,7 +128,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if attrs := ctx.Get("X-Amz-Object-Attributes"); attrs != "" {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetObjectAttributes", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
var oattrs []types.ObjectAttributes
|
||||
@@ -143,7 +143,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "GetObjectAttributes", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ_ACP", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ_ACP", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "GetObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -224,12 +224,12 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
keyMarker := ctx.Query("key-marker")
|
||||
maxUploadsStr := ctx.Query("max-uploads")
|
||||
uploadIdMarker := ctx.Query("upload-id-marker")
|
||||
access := ctx.Locals("access").(string)
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ_ACP", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ_ACP", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetBucketAcl", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -243,7 +243,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("uploads") {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListMultipartUploads", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
maxUploads, err := utils.ParseUint(maxUploadsStr)
|
||||
@@ -266,7 +266,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if ctx.QueryInt("list-type") == 2 {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListObjectsV2", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
maxkeys, err := utils.ParseUint(maxkeysStr)
|
||||
@@ -287,7 +287,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListObjectsV2", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListObjects", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -311,7 +311,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
bucket, acl, grantFullControl, grantRead, grantReadACP, granWrite, grantWriteACP, access, isRoot :=
|
||||
bucket, acl, grantFullControl, grantRead, grantReadACP, granWrite, grantWriteACP, acct, isRoot :=
|
||||
ctx.Params("bucket"),
|
||||
ctx.Get("X-Amz-Acl"),
|
||||
ctx.Get("X-Amz-Grant-Full-Control"),
|
||||
@@ -319,7 +319,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
ctx.Get("X-Amz-Grant-Read-Acp"),
|
||||
ctx.Get("X-Amz-Grant-Write"),
|
||||
ctx.Get("X-Amz-Grant-Write-Acp"),
|
||||
ctx.Locals("access").(string),
|
||||
ctx.Locals("account").(auth.Account),
|
||||
ctx.Locals("isRoot").(bool)
|
||||
|
||||
grants := grantFullControl + grantRead + grantReadACP + granWrite + grantWriteACP
|
||||
@@ -329,7 +329,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
var accessControlPolicy auth.AccessControlPolicy
|
||||
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE_ACP", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE_ACP", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutBucketAcl", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -391,9 +391,9 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
|
||||
err := c.be.CreateBucket(ctx.Context(), &s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: types.ObjectOwnership(access),
|
||||
ObjectOwnership: types.ObjectOwnership(acct.Access),
|
||||
})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "CreateBucket", BucketOwner: access})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "CreateBucket", BucketOwner: acct.Access})
|
||||
}
|
||||
|
||||
func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
@@ -401,7 +401,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
keyStart := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
access := ctx.Locals("access").(string)
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
tagging := ctx.Get("x-amz-tagging")
|
||||
@@ -446,14 +446,17 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
tags := make(map[string]string, len(objTagging.TagSet.Tags))
|
||||
|
||||
for _, tag := range objTagging.TagSet.Tags {
|
||||
if len(tag.Key) > 128 || len(tag.Value) > 256 {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidTag), &MetaOpts{Logger: c.logger, Action: "PutObjectTagging", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
tags[tag.Key] = tag.Value
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutObjectTagging", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err = c.be.SetTags(ctx.Context(), bucket, keyStart, tags)
|
||||
err = c.be.PutObjectTagging(ctx.Context(), bucket, keyStart, tags)
|
||||
return SendResponse(ctx, err, &MetaOpts{
|
||||
Logger: c.logger,
|
||||
EvSender: c.evSender,
|
||||
@@ -487,7 +490,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPart), &MetaOpts{Logger: c.logger, Action: "UploadPart", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "UploadPart", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -571,7 +574,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if copySource != "" {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "CopyObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -590,6 +593,9 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidCopySource), &MetaOpts{Logger: c.logger, Action: "CopyObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
}
|
||||
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
res, err := c.be.CopyObject(ctx.Context(), &s3.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
@@ -598,6 +604,8 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
CopySourceIfNoneMatch: ©SrcIfNoneMatch,
|
||||
CopySourceIfModifiedSince: &mtime,
|
||||
CopySourceIfUnmodifiedSince: &umtime,
|
||||
ExpectedBucketOwner: &acct.Access,
|
||||
Metadata: metadata,
|
||||
})
|
||||
if err == nil {
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{
|
||||
@@ -620,7 +628,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -651,27 +659,27 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
bucket, access, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("access").(string), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
bucket, acct, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("account").(auth.Account), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err := c.be.DeleteBucket(ctx.Context(), &s3.DeleteBucketInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner, Status: 204})
|
||||
}
|
||||
|
||||
func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
bucket, access, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("access").(string), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
bucket, acct, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("account").(auth.Account), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
var dObj s3response.DeleteObjects
|
||||
|
||||
if err := xml.Unmarshal(ctx.Body(), &dObj); err != nil {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), &MetaOpts{Logger: c.logger, Action: "DeleteObjects", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteObjects", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -689,7 +697,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
access := ctx.Locals("access").(string)
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
|
||||
@@ -698,15 +706,16 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("tagging") {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "RemoveObjectTagging", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err := c.be.RemoveTags(ctx.Context(), bucket, key)
|
||||
err := c.be.DeleteObjectTagging(ctx.Context(), bucket, key)
|
||||
return SendResponse(ctx, err, &MetaOpts{
|
||||
Status: http.StatusNoContent,
|
||||
Logger: c.logger,
|
||||
EvSender: c.evSender,
|
||||
Action: "RemoveObjectTagging",
|
||||
Action: "DeleteObjectTagging",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
EventName: s3event.EventObjectTaggingDelete,
|
||||
})
|
||||
@@ -715,7 +724,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
if uploadId != "" {
|
||||
expectedBucketOwner, requestPayer := ctx.Get("X-Amz-Expected-Bucket-Owner"), ctx.Get("X-Amz-Request-Payer")
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "AbortMultipartUpload", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -726,10 +735,10 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
ExpectedBucketOwner: &expectedBucketOwner,
|
||||
RequestPayer: types.RequestPayer(requestPayer),
|
||||
})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "AbortMultipartUpload", BucketOwner: parsedAcl.Owner})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "AbortMultipartUpload", BucketOwner: parsedAcl.Owner, Status: 204})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -743,13 +752,14 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
Action: "DeleteObject",
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
EventName: s3event.EventObjectDelete,
|
||||
Status: 204,
|
||||
})
|
||||
}
|
||||
|
||||
func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) error {
|
||||
bucket, access, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("access").(string), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
bucket, acct, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("account").(auth.Account), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadBucket", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -765,14 +775,14 @@ const (
|
||||
)
|
||||
|
||||
func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
bucket, access, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("access").(string), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
bucket, acct, isRoot, parsedAcl := ctx.Params("bucket"), ctx.Locals("account").(auth.Account), ctx.Locals("isRoot").(bool), ctx.Locals("parsedAcl").(auth.ACL)
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -831,7 +841,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
access := ctx.Locals("access").(string)
|
||||
acct := ctx.Locals("account").(auth.Account)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
parsedAcl := ctx.Locals("parsedAcl").(auth.ACL)
|
||||
|
||||
@@ -839,6 +849,11 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
path := ctx.Path()
|
||||
if path[len(path)-1:] == "/" && key[len(key)-1:] != "/" {
|
||||
key = key + "/"
|
||||
}
|
||||
|
||||
var restoreRequest s3.RestoreObjectInput
|
||||
if ctx.Request().URI().QueryArgs().Has("restore") {
|
||||
err := xml.Unmarshal(ctx.Body(), &restoreRequest)
|
||||
@@ -846,7 +861,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "RestoreObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "RestoreObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -874,7 +889,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "SelectObjectContent", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -904,7 +919,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "CompleteMultipartUpload", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -935,7 +950,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
if err := auth.VerifyACL(parsedAcl, acct.Access, "WRITE", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "CreateMultipartUpload", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -953,6 +968,7 @@ type MetaOpts struct {
|
||||
EventName s3event.EventType
|
||||
ObjectETag *string
|
||||
VersionId *string
|
||||
Status int
|
||||
}
|
||||
|
||||
func SendResponse(ctx *fiber.Ctx, err error, l *MetaOpts) error {
|
||||
@@ -987,9 +1003,12 @@ func SendResponse(ctx *fiber.Ctx, err error, l *MetaOpts) error {
|
||||
|
||||
utils.LogCtxDetails(ctx, []byte{})
|
||||
|
||||
if l.Status == 0 {
|
||||
l.Status = http.StatusOK
|
||||
}
|
||||
// https://github.com/gofiber/fiber/issues/2080
|
||||
// ctx.SendStatus() sets incorrect content length on HEAD request
|
||||
ctx.Status(http.StatusOK)
|
||||
ctx.Status(l.Status)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1023,6 +1042,7 @@ func SendXMLResponse(ctx *fiber.Ctx, resp any, err error, l *MetaOpts) error {
|
||||
}
|
||||
|
||||
if len(b) > 0 {
|
||||
ctx.Response().Header.Set("Content-Length", fmt.Sprint(len(b)))
|
||||
ctx.Response().Header.SetContentType(fiber.MIMEApplicationXML)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
@@ -97,8 +98,7 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
|
||||
ctx.Locals("isDebug", false)
|
||||
return ctx.Next()
|
||||
})
|
||||
@@ -115,8 +115,7 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
|
||||
}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("role", "admin")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access", Role: "admin:"})
|
||||
ctx.Locals("isDebug", false)
|
||||
return ctx.Next()
|
||||
})
|
||||
@@ -200,13 +199,13 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
StorageClass: "storage class",
|
||||
}, nil
|
||||
},
|
||||
GetTagsFunc: func(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
GetObjectTaggingFunc: func(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
return map[string]string{"hello": "world"}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -346,7 +345,7 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -368,7 +367,7 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
}
|
||||
appError := fiber.New()
|
||||
appError.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -506,7 +505,7 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
}
|
||||
// Mock ctx.Locals
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{Owner: "valid access"})
|
||||
@@ -670,7 +669,7 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (string, error) {
|
||||
return "hello", nil
|
||||
},
|
||||
SetTagsFunc: func(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
PutObjectTaggingFunc: func(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
return nil
|
||||
},
|
||||
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
@@ -679,7 +678,7 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
},
|
||||
}
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -878,7 +877,7 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -901,7 +900,7 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
req: httptest.NewRequest(http.MethodDelete, "/my-bucket", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
statusCode: 204,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -935,7 +934,7 @@ func TestS3ApiController_DeleteObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1005,14 +1004,14 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
AbortMultipartUploadFunc: func(context.Context, *s3.AbortMultipartUploadInput) error {
|
||||
return nil
|
||||
},
|
||||
RemoveTagsFunc: func(_ context.Context, bucket, object string) error {
|
||||
DeleteObjectTaggingFunc: func(_ context.Context, bucket, object string) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1033,7 +1032,7 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
}}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1055,7 +1054,7 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
req: httptest.NewRequest(http.MethodDelete, "/my-bucket/my-key?uploadId=324234", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
statusCode: 204,
|
||||
},
|
||||
{
|
||||
name: "Remove-object-tagging-success",
|
||||
@@ -1064,7 +1063,7 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
req: httptest.NewRequest(http.MethodDelete, "/my-bucket/my-key/key2?tagging", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
statusCode: 204,
|
||||
},
|
||||
{
|
||||
name: "Delete-object-success",
|
||||
@@ -1073,7 +1072,7 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
req: httptest.NewRequest(http.MethodDelete, "/my-bucket/my-key", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
statusCode: 204,
|
||||
},
|
||||
{
|
||||
name: "Delete-object-error",
|
||||
@@ -1116,7 +1115,7 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1139,7 +1138,7 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1218,7 +1217,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
}
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1241,7 +1240,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
}
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1321,7 +1320,7 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
`
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("access", "valid access")
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
ctx.Locals("isDebug", false)
|
||||
ctx.Locals("parsedAcl", auth.ACL{})
|
||||
@@ -1419,16 +1418,9 @@ func Test_XMLresponse(t *testing.T) {
|
||||
resp any
|
||||
err error
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
var ctx fiber.Ctx
|
||||
// Mocking the fiber ctx
|
||||
app.Get("/:bucket/:key", func(c *fiber.Ctx) error {
|
||||
ctx = *c
|
||||
return nil
|
||||
})
|
||||
|
||||
app.Test(httptest.NewRequest(http.MethodGet, "/my-bucket/my-key", nil))
|
||||
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1439,7 +1431,7 @@ func Test_XMLresponse(t *testing.T) {
|
||||
{
|
||||
name: "Internal-server-error",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: nil,
|
||||
err: s3err.GetAPIError(s3err.ErrInternalError),
|
||||
},
|
||||
@@ -1449,7 +1441,7 @@ func Test_XMLresponse(t *testing.T) {
|
||||
{
|
||||
name: "Error-not-implemented",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: nil,
|
||||
err: s3err.GetAPIError(s3err.ErrNotImplemented),
|
||||
},
|
||||
@@ -1459,7 +1451,7 @@ func Test_XMLresponse(t *testing.T) {
|
||||
{
|
||||
name: "Invalid-request-body",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: make(chan int),
|
||||
err: nil,
|
||||
},
|
||||
@@ -1469,7 +1461,7 @@ func Test_XMLresponse(t *testing.T) {
|
||||
{
|
||||
name: "Successful-response",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: "Valid response",
|
||||
err: nil,
|
||||
},
|
||||
@@ -1499,16 +1491,11 @@ func Test_response(t *testing.T) {
|
||||
ctx *fiber.Ctx
|
||||
resp any
|
||||
err error
|
||||
opts *MetaOpts
|
||||
}
|
||||
app := fiber.New()
|
||||
var ctx fiber.Ctx
|
||||
// Mocking the fiber ctx
|
||||
app.Get("/:bucket/:key", func(c *fiber.Ctx) error {
|
||||
ctx = *c
|
||||
return nil
|
||||
})
|
||||
|
||||
app.Test(httptest.NewRequest(http.MethodGet, "/my-bucket/my-key", nil))
|
||||
app := fiber.New()
|
||||
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1519,9 +1506,10 @@ func Test_response(t *testing.T) {
|
||||
{
|
||||
name: "Internal-server-error",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: nil,
|
||||
err: s3err.GetAPIError(s3err.ErrInternalError),
|
||||
opts: &MetaOpts{},
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
@@ -1529,9 +1517,10 @@ func Test_response(t *testing.T) {
|
||||
{
|
||||
name: "Internal-server-error-not-api",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: nil,
|
||||
err: fmt.Errorf("custom error"),
|
||||
opts: &MetaOpts{},
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
@@ -1539,9 +1528,10 @@ func Test_response(t *testing.T) {
|
||||
{
|
||||
name: "Error-not-implemented",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: nil,
|
||||
err: s3err.GetAPIError(s3err.ErrNotImplemented),
|
||||
opts: &MetaOpts{},
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 501,
|
||||
@@ -1549,17 +1539,31 @@ func Test_response(t *testing.T) {
|
||||
{
|
||||
name: "Successful-response",
|
||||
args: args{
|
||||
ctx: &ctx,
|
||||
ctx: ctx,
|
||||
resp: "Valid response",
|
||||
err: nil,
|
||||
opts: &MetaOpts{},
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Successful-response-status-204",
|
||||
args: args{
|
||||
ctx: ctx,
|
||||
resp: "Valid response",
|
||||
err: nil,
|
||||
opts: &MetaOpts{
|
||||
Status: 204,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 204,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := SendResponse(tt.args.ctx, tt.args.err, &MetaOpts{}); (err != nil) != tt.wantErr {
|
||||
if err := SendResponse(tt.args.ctx, tt.args.err, tt.args.opts); (err != nil) != tt.wantErr {
|
||||
t.Errorf("response() %v error = %v, wantErr %v", tt.name, err, tt.wantErr)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ var _ auth.IAMService = &IAMServiceMock{}
|
||||
//
|
||||
// // make and configure a mocked auth.IAMService
|
||||
// mockedIAMService := &IAMServiceMock{
|
||||
// CreateAccountFunc: func(access string, account auth.Account) error {
|
||||
// CreateAccountFunc: func(account auth.Account) error {
|
||||
// panic("mock out the CreateAccount method")
|
||||
// },
|
||||
// DeleteUserAccountFunc: func(access string) error {
|
||||
@@ -30,6 +30,9 @@ var _ auth.IAMService = &IAMServiceMock{}
|
||||
// ListUserAccountsFunc: func() ([]auth.Account, error) {
|
||||
// panic("mock out the ListUserAccounts method")
|
||||
// },
|
||||
// ShutdownFunc: func() error {
|
||||
// panic("mock out the Shutdown method")
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// // use mockedIAMService in code that requires auth.IAMService
|
||||
@@ -38,7 +41,7 @@ var _ auth.IAMService = &IAMServiceMock{}
|
||||
// }
|
||||
type IAMServiceMock struct {
|
||||
// CreateAccountFunc mocks the CreateAccount method.
|
||||
CreateAccountFunc func(access string, account auth.Account) error
|
||||
CreateAccountFunc func(account auth.Account) error
|
||||
|
||||
// DeleteUserAccountFunc mocks the DeleteUserAccount method.
|
||||
DeleteUserAccountFunc func(access string) error
|
||||
@@ -49,12 +52,13 @@ type IAMServiceMock struct {
|
||||
// ListUserAccountsFunc mocks the ListUserAccounts method.
|
||||
ListUserAccountsFunc func() ([]auth.Account, error)
|
||||
|
||||
// ShutdownFunc mocks the Shutdown method.
|
||||
ShutdownFunc func() error
|
||||
|
||||
// calls tracks calls to the methods.
|
||||
calls struct {
|
||||
// CreateAccount holds details about calls to the CreateAccount method.
|
||||
CreateAccount []struct {
|
||||
// Access is the access argument value.
|
||||
Access string
|
||||
// Account is the account argument value.
|
||||
Account auth.Account
|
||||
}
|
||||
@@ -71,29 +75,31 @@ type IAMServiceMock struct {
|
||||
// ListUserAccounts holds details about calls to the ListUserAccounts method.
|
||||
ListUserAccounts []struct {
|
||||
}
|
||||
// Shutdown holds details about calls to the Shutdown method.
|
||||
Shutdown []struct {
|
||||
}
|
||||
}
|
||||
lockCreateAccount sync.RWMutex
|
||||
lockDeleteUserAccount sync.RWMutex
|
||||
lockGetUserAccount sync.RWMutex
|
||||
lockListUserAccounts sync.RWMutex
|
||||
lockShutdown sync.RWMutex
|
||||
}
|
||||
|
||||
// CreateAccount calls CreateAccountFunc.
|
||||
func (mock *IAMServiceMock) CreateAccount(access string, account auth.Account) error {
|
||||
func (mock *IAMServiceMock) CreateAccount(account auth.Account) error {
|
||||
if mock.CreateAccountFunc == nil {
|
||||
panic("IAMServiceMock.CreateAccountFunc: method is nil but IAMService.CreateAccount was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Access string
|
||||
Account auth.Account
|
||||
}{
|
||||
Access: access,
|
||||
Account: account,
|
||||
}
|
||||
mock.lockCreateAccount.Lock()
|
||||
mock.calls.CreateAccount = append(mock.calls.CreateAccount, callInfo)
|
||||
mock.lockCreateAccount.Unlock()
|
||||
return mock.CreateAccountFunc(access, account)
|
||||
return mock.CreateAccountFunc(account)
|
||||
}
|
||||
|
||||
// CreateAccountCalls gets all the calls that were made to CreateAccount.
|
||||
@@ -101,11 +107,9 @@ func (mock *IAMServiceMock) CreateAccount(access string, account auth.Account) e
|
||||
//
|
||||
// len(mockedIAMService.CreateAccountCalls())
|
||||
func (mock *IAMServiceMock) CreateAccountCalls() []struct {
|
||||
Access string
|
||||
Account auth.Account
|
||||
} {
|
||||
var calls []struct {
|
||||
Access string
|
||||
Account auth.Account
|
||||
}
|
||||
mock.lockCreateAccount.RLock()
|
||||
@@ -204,3 +208,30 @@ func (mock *IAMServiceMock) ListUserAccountsCalls() []struct {
|
||||
mock.lockListUserAccounts.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// Shutdown calls ShutdownFunc.
|
||||
func (mock *IAMServiceMock) Shutdown() error {
|
||||
if mock.ShutdownFunc == nil {
|
||||
panic("IAMServiceMock.ShutdownFunc: method is nil but IAMService.Shutdown was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
}{}
|
||||
mock.lockShutdown.Lock()
|
||||
mock.calls.Shutdown = append(mock.calls.Shutdown, callInfo)
|
||||
mock.lockShutdown.Unlock()
|
||||
return mock.ShutdownFunc()
|
||||
}
|
||||
|
||||
// ShutdownCalls gets all the calls that were made to Shutdown.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedIAMService.ShutdownCalls())
|
||||
func (mock *IAMServiceMock) ShutdownCalls() []struct {
|
||||
} {
|
||||
var calls []struct {
|
||||
}
|
||||
mock.lockShutdown.RLock()
|
||||
calls = mock.calls.Shutdown
|
||||
mock.lockShutdown.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
func AclParser(be backend.Backend, logger s3log.AuditLogger) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
isRoot, access := ctx.Locals("isRoot").(bool), ctx.Locals("access").(string)
|
||||
isRoot, acct := ctx.Locals("isRoot").(bool), ctx.Locals("account").(auth.Account)
|
||||
path := ctx.Path()
|
||||
pathParts := strings.Split(path, "/")
|
||||
bucket := pathParts[1]
|
||||
@@ -39,7 +39,7 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger) fiber.Handler {
|
||||
return ctx.Next()
|
||||
}
|
||||
if len(pathParts) == 2 && pathParts[1] != "" && ctx.Method() == http.MethodPut && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
if err := auth.IsAdmin(access, isRoot); err != nil {
|
||||
if err := auth.IsAdmin(acct, isRoot); err != nil {
|
||||
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
|
||||
}
|
||||
return ctx.Next()
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -94,15 +95,13 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
}, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
// Validate the dates difference
|
||||
err := validateDate(creds[1])
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
ctx.Locals("access", creds[0])
|
||||
ctx.Locals("isRoot", creds[0] == root.Access)
|
||||
|
||||
_, err := time.Parse(YYYYMMDD, creds[1])
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
signHdrKv := strings.Split(authParts[1], "=")
|
||||
if len(signHdrKv) != 2 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
|
||||
@@ -116,7 +115,7 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
ctx.Locals("role", account.Role)
|
||||
ctx.Locals("account", account)
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
@@ -134,6 +133,12 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
// Validate the dates difference
|
||||
err = validateDate(tdate)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
hashPayloadHeader := ctx.Get("X-Amz-Content-Sha256")
|
||||
ok := isSpecialPayload(hashPayloadHeader)
|
||||
|
||||
@@ -193,6 +198,7 @@ type accounts struct {
|
||||
func (a accounts) getAccount(access string) (auth.Account, error) {
|
||||
if access == a.root.Access {
|
||||
return auth.Account{
|
||||
Access: a.root.Access,
|
||||
Secret: a.root.Secret,
|
||||
Role: "admin",
|
||||
}, nil
|
||||
@@ -214,25 +220,24 @@ func isSpecialPayload(str string) bool {
|
||||
return specialValues[str]
|
||||
}
|
||||
|
||||
func validateDate(date string) error {
|
||||
credDate, err := time.Parse(YYYYMMDD, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
}
|
||||
func validateDate(date time.Time) error {
|
||||
now := time.Now().UTC()
|
||||
diff := date.Unix() - now.Unix()
|
||||
|
||||
today := time.Now()
|
||||
if credDate.Year() > today.Year() || (credDate.Year() == today.Year() && credDate.YearDay() > today.YearDay()) {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature not yet current: %s is still later than %s", credDate.Format(YYYYMMDD), today.Format(YYYYMMDD)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
if credDate.Year() < today.Year() || (credDate.Year() == today.Year() && credDate.YearDay() < today.YearDay()) {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature expired: %s is now earlier than %s", credDate.Format(YYYYMMDD), today.Format(YYYYMMDD)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
// Checks the dates difference to be less than a minute
|
||||
if math.Abs(float64(diff)) > 60 {
|
||||
if diff > 0 {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature not yet current: %s is still later than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
} else {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature expired: %s is now earlier than %s", date.Format(iso8601Format), now.Format(iso8601Format)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,11 +31,6 @@ func DecodeURL(logger s3log.AuditLogger) fiber.Handler {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
ctx.Path(decoded.Path)
|
||||
decodedURL, err := url.QueryUnescape(reqURL)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
ctx.Request().SetRequestURI(decodedURL)
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,28 +23,36 @@ import (
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3ApiRouter struct{}
|
||||
type S3ApiRouter struct {
|
||||
WithAdmSrv bool
|
||||
}
|
||||
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender) {
|
||||
s3ApiController := controllers.New(be, iam, logger, evs)
|
||||
adminController := controllers.NewAdminController(iam, be)
|
||||
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user", adminController.CreateUser)
|
||||
if sa.WithAdmSrv {
|
||||
adminController := controllers.NewAdminController(iam, be)
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user", adminController.DeleteUser)
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user", adminController.CreateUser)
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users", adminController.ListUsers)
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user", adminController.DeleteUser)
|
||||
|
||||
// ChangeBucketOwner
|
||||
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users", adminController.ListUsers)
|
||||
|
||||
// ChangeBucketOwner admin api
|
||||
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
|
||||
|
||||
// ListBucketsAndOwners admin api
|
||||
app.Patch("/list-buckets", adminController.ListBuckets)
|
||||
}
|
||||
|
||||
// ListBuckets action
|
||||
app.Get("/", s3ApiController.ListBuckets)
|
||||
|
||||
// PutBucket action
|
||||
// CreateBucket action
|
||||
// PutBucketAcl action
|
||||
app.Put("/:bucket", s3ApiController.PutBucketActions)
|
||||
|
||||
@@ -53,6 +61,7 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
|
||||
// HeadBucket
|
||||
app.Head("/:bucket", s3ApiController.HeadBucket)
|
||||
|
||||
// GetBucketAcl action
|
||||
// ListMultipartUploads action
|
||||
// ListObjects action
|
||||
@@ -61,22 +70,34 @@ func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMServ
|
||||
|
||||
// HeadObject action
|
||||
app.Head("/:bucket/:key/*", s3ApiController.HeadObject)
|
||||
|
||||
// GetObjectAcl action
|
||||
// GetObject action
|
||||
// ListObjectParts action
|
||||
// GetObjectTagging action
|
||||
// ListParts action
|
||||
// GetObjectAttributes action
|
||||
app.Get("/:bucket/:key/*", s3ApiController.GetActions)
|
||||
|
||||
// DeleteObject action
|
||||
// AbortMultipartUpload action
|
||||
// DeleteObjectTagging action
|
||||
app.Delete("/:bucket/:key/*", s3ApiController.DeleteActions)
|
||||
|
||||
// DeleteObjects action
|
||||
app.Post("/:bucket", s3ApiController.DeleteObjects)
|
||||
|
||||
// CompleteMultipartUpload action
|
||||
// CreateMultipartUpload
|
||||
// RestoreObject action
|
||||
// SelectObjectContent action
|
||||
app.Post("/:bucket/:key/*", s3ApiController.CreateActions)
|
||||
|
||||
// CopyObject action
|
||||
// PutObject action
|
||||
// UploadPart action
|
||||
// UploadPartCopy action
|
||||
// PutObjectTagging action
|
||||
// PutObjectAcl action
|
||||
app.Put("/:bucket/:key/*", s3ApiController.PutActions)
|
||||
}
|
||||
|
||||
@@ -70,6 +70,11 @@ func WithTLS(cert tls.Certificate) Option {
|
||||
return func(s *S3ApiServer) { s.cert = &cert }
|
||||
}
|
||||
|
||||
// WithAdminServer runs admin endpoints with the gateway in the same network
|
||||
func WithAdminServer() Option {
|
||||
return func(s *S3ApiServer) { s.router.WithAdmSrv = true }
|
||||
}
|
||||
|
||||
// WithDebug sets debug output
|
||||
func WithDebug() Option {
|
||||
return func(s *S3ApiServer) { s.debug = true }
|
||||
|
||||
@@ -35,13 +35,16 @@ var (
|
||||
|
||||
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
|
||||
metadata = make(map[string]string)
|
||||
headers.VisitAll(func(key, value []byte) {
|
||||
if strings.HasPrefix(string(key), "X-Amz-Meta-") {
|
||||
trimmedKey := strings.TrimPrefix(string(key), "X-Amz-Meta-")
|
||||
headers.DisableNormalizing()
|
||||
headers.VisitAllInOrder(func(key, value []byte) {
|
||||
hKey := string(key)
|
||||
if strings.HasPrefix(strings.ToLower(hKey), "x-amz-meta-") {
|
||||
trimmedKey := hKey[11:]
|
||||
headerValue := string(value)
|
||||
metadata[trimmedKey] = headerValue
|
||||
}
|
||||
})
|
||||
headers.EnableNormalizing()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -49,7 +52,7 @@ func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]strin
|
||||
func CreateHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string) (*http.Request, error) {
|
||||
req := ctx.Request()
|
||||
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), req.URI().String(), bytes.NewReader(req.Body()))
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), string(ctx.Context().RequestURI()), bytes.NewReader(req.Body()))
|
||||
if err != nil {
|
||||
return nil, errors.New("error in creating an http request")
|
||||
}
|
||||
@@ -75,18 +78,20 @@ func CreateHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string) (*http.Reques
|
||||
}
|
||||
|
||||
func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
|
||||
ctx.Response().Header.DisableNormalizing()
|
||||
for key, val := range meta {
|
||||
ctx.Set(fmt.Sprintf("X-Amz-Meta-%s", key), val)
|
||||
ctx.Response().Header.Set(fmt.Sprintf("X-Amz-Meta-%s", key), val)
|
||||
}
|
||||
ctx.Response().Header.EnableNormalizing()
|
||||
}
|
||||
|
||||
func ParseUint(str string) (int32, error) {
|
||||
if str == "" {
|
||||
return -1, nil
|
||||
return 1000, nil
|
||||
}
|
||||
num, err := strconv.ParseUint(str, 10, 16)
|
||||
if err != nil {
|
||||
return -1, s3err.GetAPIError(s3err.ErrInvalidMaxKeys)
|
||||
return 1000, s3err.GetAPIError(s3err.ErrInvalidMaxKeys)
|
||||
}
|
||||
return int32(num), nil
|
||||
}
|
||||
|
||||
@@ -79,13 +79,6 @@ func TestGetUserMetaData(t *testing.T) {
|
||||
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
req := ctx.Request()
|
||||
|
||||
// Case 2
|
||||
ctx2 := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
req2 := ctx2.Request()
|
||||
|
||||
req2.Header.Add("X-Amz-Meta-Name", "Nick")
|
||||
req2.Header.Add("X-Amz-Meta-Age", "27")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -98,16 +91,6 @@ func TestGetUserMetaData(t *testing.T) {
|
||||
},
|
||||
wantMetadata: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Success-non-empty-response",
|
||||
args: args{
|
||||
headers: &req2.Header,
|
||||
},
|
||||
wantMetadata: map[string]string{
|
||||
"Age": "27",
|
||||
"Name": "Nick",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -237,7 +220,7 @@ func TestParseUint(t *testing.T) {
|
||||
args: args{
|
||||
str: "",
|
||||
},
|
||||
want: -1,
|
||||
want: 1000,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
@@ -245,7 +228,7 @@ func TestParseUint(t *testing.T) {
|
||||
args: args{
|
||||
str: "bla",
|
||||
},
|
||||
want: -1,
|
||||
want: 1000,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
@@ -253,9 +236,17 @@ func TestParseUint(t *testing.T) {
|
||||
args: args{
|
||||
str: "-5",
|
||||
},
|
||||
want: -1,
|
||||
want: 1000,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Parse-uint-success",
|
||||
args: args{
|
||||
str: "23",
|
||||
},
|
||||
want: 23,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -115,6 +115,7 @@ const (
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
ErrObjectParentIsFile
|
||||
ErrDirectoryObjectContainsData
|
||||
)
|
||||
|
||||
var errorCodeResponse = map[ErrorCode]APIError{
|
||||
@@ -408,6 +409,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Object parent already exists as a file.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrDirectoryObjectContainsData: {
|
||||
Code: "DirectoryObjectContainsData",
|
||||
Description: "Directory object contains data payload.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
// GetAPIError provides API Error for input API error code.
|
||||
|
||||
@@ -131,6 +131,11 @@ type SelectObjectContentResult struct {
|
||||
Records *types.RecordsEvent
|
||||
Stats *types.StatsEvent
|
||||
Progress *types.ProgressEvent
|
||||
Cont *string
|
||||
End *string
|
||||
Cont *types.ContinuationEvent
|
||||
End *types.EndEvent
|
||||
}
|
||||
|
||||
type Bucket struct {
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner"`
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user