mirror of
https://github.com/versity/versitygw.git
synced 2026-01-29 14:32:02 +00:00
Compare commits
25 Commits
ben/read_o
...
v0.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5853c3240b | ||
|
|
8bd068c22c | ||
|
|
f08ccacd0f | ||
|
|
46aab041cc | ||
|
|
a7a8ea9e61 | ||
|
|
07b01a738a | ||
|
|
6f35a5fbaf | ||
|
|
05530e02c9 | ||
|
|
b2f028939e | ||
|
|
7ccd1dd619 | ||
|
|
b10d08a8df | ||
|
|
c81403fe90 | ||
|
|
5f422fefd8 | ||
|
|
0a74509d00 | ||
|
|
65abac9823 | ||
|
|
5ec2de544c | ||
|
|
53a50df742 | ||
|
|
936ba1f84b | ||
|
|
ffe1fc4ad3 | ||
|
|
020b2db975 | ||
|
|
17b1dbe025 | ||
|
|
5937af22c6 | ||
|
|
5c2e7cce05 | ||
|
|
6b9ee3a587 | ||
|
|
e9a036d100 |
2
.github/workflows/system.yml
vendored
2
.github/workflows/system.yml
vendored
@@ -51,7 +51,7 @@ jobs:
|
||||
export WORKSPACE=$GITHUB_WORKSPACE
|
||||
openssl genpkey -algorithm RSA -out versitygw.pem -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key versitygw.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
mkdir cover
|
||||
mkdir cover iam
|
||||
VERSITYGW_TEST_ENV=./tests/.env.default ./tests/run_all.sh
|
||||
|
||||
#- name: Build and run, s3 backend
|
||||
|
||||
@@ -15,7 +15,8 @@ Download [latest release](https://github.com/versity/versitygw/releases)
|
||||
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ |
|
||||
|
||||
### News
|
||||
* New performance analysis article [https://github.com/versity/versitygw/wiki/Performance](https://github.com/versity/versitygw/wiki/Performance)
|
||||
* New performance (scale up) analysis article [https://github.com/versity/versitygw/wiki/Performance](https://github.com/versity/versitygw/wiki/Performance)
|
||||
* New performance (scale out) Part 2 analysis article [https://github.com/versity/versitygw/wiki/Performance-Part-2](https://github.com/versity/versitygw/wiki/Performance-Part-2)
|
||||
|
||||
### Mailing List
|
||||
Keep up to date with latest gateway announcements by signing up to the [versitygw mailing list](https://www.versity.com/products/versitygw#signup).
|
||||
|
||||
@@ -22,7 +22,7 @@ func NewLDAPService(url, bindDN, pass, queryBase, accAtr, secAtr, roleAtr, objCl
|
||||
if url == "" || bindDN == "" || pass == "" || queryBase == "" || accAtr == "" || secAtr == "" || roleAtr == "" || objClasses == "" {
|
||||
return nil, fmt.Errorf("required parameters list not fully provided")
|
||||
}
|
||||
conn, err := ldap.Dial("tcp", url)
|
||||
conn, err := ldap.DialURL(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to LDAP server: %w", err)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func (IAMServiceSingle) CreateAccount(account Account) error {
|
||||
|
||||
// GetUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, ErrNotSupported
|
||||
return Account{}, ErrNoSuchUser
|
||||
}
|
||||
|
||||
// DeleteUserAccount no accounts in single tenant mode
|
||||
|
||||
26
backend/meta/meta.go
Normal file
26
backend/meta/meta.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package meta
|
||||
|
||||
// MetadataStorer defines the interface for managing metadata.
|
||||
// When object == "", the operation is on the bucket.
|
||||
type MetadataStorer interface {
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
|
||||
// Returns the value of the attribute, or an error if the attribute does not exist.
|
||||
RetrieveAttribute(bucket, object, attribute string) ([]byte, error)
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
|
||||
// If attribute already exists, new attribute should replace existing.
|
||||
// Returns an error if the operation fails.
|
||||
StoreAttribute(bucket, object, attribute string, value []byte) error
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
|
||||
// Returns an error if the operation fails.
|
||||
DeleteAttribute(bucket, object, attribute string) error
|
||||
|
||||
// ListAttributes lists all attributes for an object or a bucket.
|
||||
// Returns list of attribute names, or an error if the operation fails.
|
||||
ListAttributes(bucket, object string) ([]string, error)
|
||||
|
||||
// DeleteAttributes removes all attributes for an object or a bucket.
|
||||
// Returns an error if the operation fails.
|
||||
DeleteAttributes(bucket, object string) error
|
||||
}
|
||||
76
backend/meta/xattr.go
Normal file
76
backend/meta/xattr.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
)
|
||||
|
||||
const (
|
||||
xattrPrefix = "user."
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoSuchKey is returned when the key does not exist.
|
||||
ErrNoSuchKey = errors.New("no such key")
|
||||
)
|
||||
|
||||
type XattrMeta struct{}
|
||||
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object in a bucket.
|
||||
func (x XattrMeta) RetrieveAttribute(bucket, object, attribute string) ([]byte, error) {
|
||||
b, err := xattr.Get(filepath.Join(bucket, object), xattrPrefix+attribute)
|
||||
if errors.Is(err, errNoData) {
|
||||
return nil, ErrNoSuchKey
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object in a bucket.
|
||||
func (x XattrMeta) StoreAttribute(bucket, object, attribute string, value []byte) error {
|
||||
return xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
|
||||
}
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object in a bucket.
|
||||
func (x XattrMeta) DeleteAttribute(bucket, object, attribute string) error {
|
||||
err := xattr.Remove(filepath.Join(bucket, object), xattrPrefix+attribute)
|
||||
if errors.Is(err, errNoData) {
|
||||
return ErrNoSuchKey
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAttributes is not implemented for xattr since xattrs
|
||||
// are automatically removed when the file is deleted.
|
||||
func (x XattrMeta) DeleteAttributes(bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAttributes lists all attributes for an object in a bucket.
|
||||
func (x XattrMeta) ListAttributes(bucket, object string) ([]string, error) {
|
||||
attrs, err := xattr.List(filepath.Join(bucket, object))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attributes := make([]string, 0, len(attrs))
|
||||
for _, attr := range attrs {
|
||||
if !isUserAttr(attr) {
|
||||
continue
|
||||
}
|
||||
attributes = append(attributes, strings.TrimPrefix(attr, xattrPrefix))
|
||||
}
|
||||
return attributes, nil
|
||||
}
|
||||
|
||||
func isUserAttr(attr string) bool {
|
||||
return strings.HasPrefix(attr, xattrPrefix)
|
||||
}
|
||||
|
||||
// Test is a helper function to test if xattrs are supported.
|
||||
func (x XattrMeta) Test(path string) bool {
|
||||
_, err := xattr.Get(path, "user.test")
|
||||
return !errors.Is(err, syscall.ENOTSUP)
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
//go:build !freebsd && !openbsd && !netbsd
|
||||
// +build !freebsd,!openbsd,!netbsd
|
||||
|
||||
package posix
|
||||
package meta
|
||||
|
||||
import "syscall"
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
//go:build freebsd || openbsd || netbsd
|
||||
// +build freebsd openbsd netbsd
|
||||
|
||||
package posix
|
||||
package meta
|
||||
|
||||
import "syscall"
|
||||
|
||||
@@ -34,9 +34,9 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
@@ -44,6 +44,9 @@ import (
|
||||
type Posix struct {
|
||||
backend.BackendUnsupported
|
||||
|
||||
// bucket/object metadata storage facility
|
||||
meta meta.MetadataStorer
|
||||
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
|
||||
@@ -63,15 +66,15 @@ var _ backend.Backend = &Posix{}
|
||||
const (
|
||||
metaTmpDir = ".sgwtmp"
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
onameAttr = "user.objname"
|
||||
onameAttr = "objname"
|
||||
tagHdr = "X-Amz-Tagging"
|
||||
metaHdr = "X-Amz-Meta"
|
||||
contentTypeHdr = "content-type"
|
||||
contentEncHdr = "content-encoding"
|
||||
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
aclkey = "user.acl"
|
||||
etagkey = "user.etag"
|
||||
policykey = "user.policy"
|
||||
aclkey = "acl"
|
||||
etagkey = "etag"
|
||||
policykey = "policy"
|
||||
)
|
||||
|
||||
type PosixOpts struct {
|
||||
@@ -79,7 +82,7 @@ type PosixOpts struct {
|
||||
ChownGID bool
|
||||
}
|
||||
|
||||
func New(rootdir string, opts PosixOpts) (*Posix, error) {
|
||||
func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, error) {
|
||||
err := os.Chdir(rootdir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("chdir %v: %w", rootdir, err)
|
||||
@@ -90,13 +93,8 @@ func New(rootdir string, opts PosixOpts) (*Posix, error) {
|
||||
return nil, fmt.Errorf("open %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
_, err = xattr.FGet(f, "user.test")
|
||||
if errors.Is(err, syscall.ENOTSUP) {
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("xattr not supported on %v", rootdir)
|
||||
}
|
||||
|
||||
return &Posix{
|
||||
meta: meta,
|
||||
rootfd: f,
|
||||
rootdir: rootdir,
|
||||
euid: os.Geteuid(),
|
||||
@@ -143,7 +141,7 @@ func (p *Posix) ListBuckets(_ context.Context, owner string, isAdmin bool) (s3re
|
||||
continue
|
||||
}
|
||||
|
||||
aclTag, err := xattr.Get(entry.Name(), aclkey)
|
||||
aclTag, err := p.meta.RetrieveAttribute(entry.Name(), "", aclkey)
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{}, fmt.Errorf("get acl tag: %w", err)
|
||||
}
|
||||
@@ -224,7 +222,7 @@ func (p *Posix) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, a
|
||||
}
|
||||
}
|
||||
|
||||
if err := xattr.Set(bucket, aclkey, acl); err != nil {
|
||||
if err := p.meta.StoreAttribute(bucket, "", aclkey, acl); err != nil {
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
|
||||
@@ -261,6 +259,11 @@ func (p *Posix) DeleteBucket(_ context.Context, input *s3.DeleteBucketInput) err
|
||||
return fmt.Errorf("remove bucket: %w", err)
|
||||
}
|
||||
|
||||
err = p.meta.DeleteAttributes(*input.Bucket, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove bucket attributes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -295,30 +298,37 @@ func (p *Posix) CreateMultipartUpload(_ context.Context, mpu *s3.CreateMultipart
|
||||
objNameSum := sha256.Sum256([]byte(*mpu.Key))
|
||||
// multiple uploads for same object name allowed,
|
||||
// they will all go into the same hashed name directory
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir,
|
||||
fmt.Sprintf("%x", objNameSum))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", objNameSum))
|
||||
tmppath := filepath.Join(bucket, objdir)
|
||||
// the unique upload id is a directory for all of the parts
|
||||
// associated with this specific multipart upload
|
||||
err = os.MkdirAll(filepath.Join(objdir, uploadID), 0755)
|
||||
err = os.MkdirAll(filepath.Join(tmppath, uploadID), 0755)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create upload temp dir: %w", err)
|
||||
}
|
||||
|
||||
// set an xattr with the original object name so that we can
|
||||
// set an attribute with the original object name so that we can
|
||||
// map the hashed name back to the original object name
|
||||
err = xattr.Set(objdir, onameAttr, []byte(object))
|
||||
err = p.meta.StoreAttribute(bucket, objdir, onameAttr, []byte(object))
|
||||
if err != nil {
|
||||
// if we fail, cleanup the container directories
|
||||
// but ignore errors because there might still be
|
||||
// other uploads for the same object name outstanding
|
||||
os.RemoveAll(filepath.Join(objdir, uploadID))
|
||||
os.Remove(objdir)
|
||||
os.RemoveAll(filepath.Join(tmppath, uploadID))
|
||||
os.Remove(tmppath)
|
||||
return nil, fmt.Errorf("set name attr for upload: %w", err)
|
||||
}
|
||||
|
||||
// set user attrs
|
||||
for k, v := range mpu.Metadata {
|
||||
xattr.Set(filepath.Join(objdir, uploadID), "user."+k, []byte(v))
|
||||
err := p.meta.StoreAttribute(bucket, filepath.Join(objdir, uploadID),
|
||||
k, []byte(v))
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.RemoveAll(filepath.Join(tmppath, uploadID))
|
||||
os.Remove(tmppath)
|
||||
return nil, fmt.Errorf("set user attr %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &s3.CreateMultipartUploadOutput{
|
||||
@@ -384,15 +394,16 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
// check all parts ok
|
||||
last := len(parts) - 1
|
||||
partsize := int64(0)
|
||||
var totalsize int64
|
||||
for i, p := range parts {
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber))
|
||||
fi, err := os.Lstat(partPath)
|
||||
for i, part := range parts {
|
||||
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
|
||||
fullPartPath := filepath.Join(bucket, partObjPath)
|
||||
fi, err := os.Lstat(fullPartPath)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
@@ -406,7 +417,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(partPath, etagkey)
|
||||
b, err := p.meta.RetrieveAttribute(bucket, partObjPath, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
@@ -426,10 +437,12 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
}
|
||||
defer f.cleanup()
|
||||
|
||||
for _, p := range parts {
|
||||
pf, err := os.Open(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *p.PartNumber)))
|
||||
for _, part := range parts {
|
||||
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
|
||||
fullPartPath := filepath.Join(bucket, partObjPath)
|
||||
pf, err := os.Open(fullPartPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open part %v: %v", p.PartNumber, err)
|
||||
return nil, fmt.Errorf("open part %v: %v", *part.PartNumber, err)
|
||||
}
|
||||
_, err = io.Copy(f, pf)
|
||||
pf.Close()
|
||||
@@ -437,13 +450,13 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
if errors.Is(err, syscall.EDQUOT) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrQuotaExceeded)
|
||||
}
|
||||
return nil, fmt.Errorf("copy part %v: %v", p.PartNumber, err)
|
||||
return nil, fmt.Errorf("copy part %v: %v", part.PartNumber, err)
|
||||
}
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
upiddir := filepath.Join(objdir, uploadID)
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
p.loadUserMetaData(bucket, objdir, userMetaData)
|
||||
|
||||
objname := filepath.Join(bucket, object)
|
||||
dir := filepath.Dir(objname)
|
||||
@@ -460,7 +473,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
}
|
||||
|
||||
for k, v := range userMetaData {
|
||||
err = xattr.Set(objname, "user."+k, []byte(v))
|
||||
err = p.meta.StoreAttribute(bucket, object, k, []byte(v))
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.Remove(objname)
|
||||
@@ -471,7 +484,7 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
// Calculate s3 compatible md5sum for complete multipart.
|
||||
s3MD5 := backend.GetMultipartMD5(parts)
|
||||
|
||||
err = xattr.Set(objname, etagkey, []byte(s3MD5))
|
||||
err = p.meta.StoreAttribute(bucket, object, etagkey, []byte(s3MD5))
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.Remove(objname)
|
||||
@@ -481,8 +494,8 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
|
||||
// cleanup tmp dirs
|
||||
os.RemoveAll(upiddir)
|
||||
// use Remove for objdir in case there are still other uploads
|
||||
// for same object name outstanding
|
||||
os.Remove(objdir)
|
||||
// for same object name outstanding, this will fail if there are
|
||||
os.Remove(filepath.Join(bucket, objdir))
|
||||
|
||||
return &s3.CompleteMultipartUploadOutput{
|
||||
Bucket: &bucket,
|
||||
@@ -505,45 +518,42 @@ func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte,
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func loadUserMetaData(path string, m map[string]string) (contentType, contentEncoding string) {
|
||||
ents, err := xattr.List(path)
|
||||
// fll out the user metadata map with the metadata for the object
|
||||
// and return the content type and encoding
|
||||
func (p *Posix) loadUserMetaData(bucket, object string, m map[string]string) (string, string) {
|
||||
ents, err := p.meta.ListAttributes(bucket, object)
|
||||
if err != nil || len(ents) == 0 {
|
||||
return
|
||||
return "", ""
|
||||
}
|
||||
for _, e := range ents {
|
||||
if !isValidMeta(e) {
|
||||
continue
|
||||
}
|
||||
b, err := xattr.Get(path, e)
|
||||
if err == errNoData {
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("user.%v.", metaHdr))] = ""
|
||||
continue
|
||||
}
|
||||
b, err := p.meta.RetrieveAttribute(bucket, object, e)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("user.%v.", metaHdr))] = string(b)
|
||||
if b == nil {
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("%v.", metaHdr))] = ""
|
||||
continue
|
||||
}
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("%v.", metaHdr))] = string(b)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(path, "user."+contentTypeHdr)
|
||||
var contentType, contentEncoding string
|
||||
b, _ := p.meta.RetrieveAttribute(bucket, object, contentTypeHdr)
|
||||
contentType = string(b)
|
||||
if err != nil {
|
||||
contentType = ""
|
||||
}
|
||||
if contentType != "" {
|
||||
m[contentTypeHdr] = contentType
|
||||
}
|
||||
|
||||
b, err = xattr.Get(path, "user."+contentEncHdr)
|
||||
b, _ = p.meta.RetrieveAttribute(bucket, object, contentEncHdr)
|
||||
contentEncoding = string(b)
|
||||
if err != nil {
|
||||
contentEncoding = ""
|
||||
}
|
||||
if contentEncoding != "" {
|
||||
m[contentEncHdr] = contentEncoding
|
||||
}
|
||||
|
||||
return
|
||||
return contentType, contentEncoding
|
||||
}
|
||||
|
||||
func compareUserMetadata(meta1, meta2 map[string]string) bool {
|
||||
@@ -561,10 +571,10 @@ func compareUserMetadata(meta1, meta2 map[string]string) bool {
|
||||
}
|
||||
|
||||
func isValidMeta(val string) bool {
|
||||
if strings.HasPrefix(val, "user."+metaHdr) {
|
||||
if strings.HasPrefix(val, metaHdr) {
|
||||
return true
|
||||
}
|
||||
if strings.EqualFold(val, "user.Expires") {
|
||||
if strings.EqualFold(val, "Expires") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -656,7 +666,7 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
continue
|
||||
}
|
||||
|
||||
b, err := xattr.Get(filepath.Join(bucket, metaTmpMultipartDir, obj.Name()), onameAttr)
|
||||
b, err := p.meta.RetrieveAttribute(bucket, filepath.Join(metaTmpMultipartDir, obj.Name()), onameAttr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -802,9 +812,10 @@ func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3respon
|
||||
return lpr, err
|
||||
}
|
||||
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
tmpdir := filepath.Join(bucket, objdir)
|
||||
|
||||
ents, err := os.ReadDir(filepath.Join(objdir, uploadID))
|
||||
ents, err := os.ReadDir(filepath.Join(tmpdir, uploadID))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
@@ -820,13 +831,13 @@ func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3respon
|
||||
}
|
||||
|
||||
partPath := filepath.Join(objdir, uploadID, e.Name())
|
||||
b, err := xattr.Get(partPath, etagkey)
|
||||
b, err := p.meta.RetrieveAttribute(bucket, partPath, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(partPath)
|
||||
fi, err := os.Lstat(filepath.Join(bucket, partPath))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -855,7 +866,7 @@ func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3respon
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
upiddir := filepath.Join(objdir, uploadID)
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
p.loadUserMetaData(bucket, upiddir, userMetaData)
|
||||
|
||||
return s3response.ListPartsResult{
|
||||
Bucket: bucket,
|
||||
@@ -941,7 +952,10 @@ func (p *Posix) UploadPart(ctx context.Context, input *s3.UploadPartInput) (stri
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum)
|
||||
xattr.Set(filepath.Join(bucket, partPath), etagkey, []byte(etag))
|
||||
err = p.meta.StoreAttribute(bucket, partPath, etagkey, []byte(etag))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("set etag attr: %w", err)
|
||||
}
|
||||
|
||||
return etag, nil
|
||||
}
|
||||
@@ -1056,7 +1070,10 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum)
|
||||
xattr.Set(filepath.Join(*upi.Bucket, partPath), etagkey, []byte(etag))
|
||||
err = p.meta.StoreAttribute(*upi.Bucket, partPath, etagkey, []byte(etag))
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("set etag attr: %w", err)
|
||||
}
|
||||
|
||||
fi, err = os.Stat(filepath.Join(*upi.Bucket, partPath))
|
||||
if err != nil {
|
||||
@@ -1132,11 +1149,18 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
|
||||
for k, v := range po.Metadata {
|
||||
xattr.Set(name, fmt.Sprintf("user.%v.%v", metaHdr, k), []byte(v))
|
||||
err := p.meta.StoreAttribute(*po.Bucket, *po.Key,
|
||||
fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("set user attr %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
|
||||
// set etag attribute to signify this dir was specifically put
|
||||
xattr.Set(name, etagkey, []byte(emptyMD5))
|
||||
err = p.meta.StoreAttribute(*po.Bucket, *po.Key, etagkey, []byte(emptyMD5))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("set etag attr: %w", err)
|
||||
}
|
||||
|
||||
return emptyMD5, nil
|
||||
}
|
||||
@@ -1180,7 +1204,11 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
}
|
||||
|
||||
for k, v := range po.Metadata {
|
||||
xattr.Set(name, fmt.Sprintf("user.%v.%v", metaHdr, k), []byte(v))
|
||||
err := p.meta.StoreAttribute(*po.Bucket, *po.Key,
|
||||
fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("set user attr %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
|
||||
if tagsStr != "" {
|
||||
@@ -1192,7 +1220,10 @@ func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, e
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum[:])
|
||||
xattr.Set(name, etagkey, []byte(etag))
|
||||
err = p.meta.StoreAttribute(*po.Bucket, *po.Key, etagkey, []byte(etag))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("set etag attr: %w", err)
|
||||
}
|
||||
|
||||
return etag, nil
|
||||
}
|
||||
@@ -1224,26 +1255,30 @@ func (p *Posix) DeleteObject(_ context.Context, input *s3.DeleteObjectInput) err
|
||||
return fmt.Errorf("delete object: %w", err)
|
||||
}
|
||||
|
||||
err = p.meta.DeleteAttributes(bucket, object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete object attributes: %w", err)
|
||||
}
|
||||
|
||||
return p.removeParents(bucket, object)
|
||||
}
|
||||
|
||||
func (p *Posix) removeParents(bucket, object string) error {
|
||||
// this will remove all parent directories that were not
|
||||
// specifically uploaded with a put object. we detect
|
||||
// this with a special xattr to indicate these. stop
|
||||
// this with a special attribute to indicate these. stop
|
||||
// at either the bucket or the first parent we encounter
|
||||
// with the xattr, whichever comes first.
|
||||
objPath := filepath.Join(bucket, object)
|
||||
|
||||
// with the attribute, whichever comes first.
|
||||
objPath := object
|
||||
for {
|
||||
parent := filepath.Dir(objPath)
|
||||
|
||||
if filepath.Base(parent) == bucket {
|
||||
if parent == string(filepath.Separator) {
|
||||
// stop removing parents if we hit the bucket directory.
|
||||
break
|
||||
}
|
||||
|
||||
_, err := xattr.Get(parent, etagkey)
|
||||
_, err := p.meta.RetrieveAttribute(bucket, parent, etagkey)
|
||||
if err == nil {
|
||||
// a directory with a valid etag means this was specifically
|
||||
// uploaded with a put object, so stop here and leave this
|
||||
@@ -1251,7 +1286,7 @@ func (p *Posix) removeParents(bucket, object string) error {
|
||||
break
|
||||
}
|
||||
|
||||
err = os.Remove(parent)
|
||||
err = os.Remove(filepath.Join(bucket, parent))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
@@ -1349,21 +1384,22 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
|
||||
var contentRange string
|
||||
if acceptRange != "" {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, objSize)
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v",
|
||||
startOffset, startOffset+length-1, objSize)
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
contentType, contentEncoding := p.loadUserMetaData(bucket, object, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
b, err := p.meta.RetrieveAttribute(bucket, object, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
tags, err := p.getXattrTags(bucket, object)
|
||||
tags, err := p.getAttrTags(bucket, object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
@@ -1400,15 +1436,15 @@ func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
contentType, contentEncoding := p.loadUserMetaData(bucket, object, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
b, err := p.meta.RetrieveAttribute(bucket, object, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
tags, err := p.getXattrTags(bucket, object)
|
||||
tags, err := p.getAttrTags(bucket, object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
@@ -1456,9 +1492,9 @@ func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.He
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
contentType, contentEncoding := p.loadUserMetaData(bucket, object, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
b, err := p.meta.RetrieveAttribute(bucket, object, etagkey)
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
@@ -1528,7 +1564,7 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
}
|
||||
|
||||
meta := make(map[string]string)
|
||||
loadUserMetaData(objPath, meta)
|
||||
p.loadUserMetaData(srcBucket, srcObject, meta)
|
||||
|
||||
dstObjdPath := filepath.Join(dstBucket, dstObject)
|
||||
if dstObjdPath == objPath {
|
||||
@@ -1536,10 +1572,17 @@ func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.
|
||||
return &s3.CopyObjectOutput{}, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
|
||||
} else {
|
||||
for key := range meta {
|
||||
xattr.Remove(dstObjdPath, key)
|
||||
err := p.meta.DeleteAttribute(dstBucket, dstObject, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("delete user metadata: %w", err)
|
||||
}
|
||||
}
|
||||
for k, v := range input.Metadata {
|
||||
xattr.Set(dstObjdPath, fmt.Sprintf("user.%v.%v", metaHdr, k), []byte(v))
|
||||
err := p.meta.StoreAttribute(dstBucket, dstObject,
|
||||
fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set user attr %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1603,7 +1646,7 @@ func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
fileToObj(bucket), []string{metaTmpDir})
|
||||
p.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
@@ -1621,13 +1664,13 @@ func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fileToObj(bucket string) backend.GetObjFunc {
|
||||
func (p *Posix) fileToObj(bucket string) backend.GetObjFunc {
|
||||
return func(path string, d fs.DirEntry) (types.Object, error) {
|
||||
if d.IsDir() {
|
||||
// directory object only happens if directory empty
|
||||
// check to see if this is a directory object by checking etag
|
||||
etagBytes, err := xattr.Get(filepath.Join(bucket, path), etagkey)
|
||||
if isNoAttr(err) || errors.Is(err, fs.ErrNotExist) {
|
||||
etagBytes, err := p.meta.RetrieveAttribute(bucket, path, etagkey)
|
||||
if errors.Is(err, meta.ErrNoSuchKey) || errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
@@ -1653,14 +1696,14 @@ func fileToObj(bucket string) backend.GetObjFunc {
|
||||
}
|
||||
|
||||
// file object, get object info and fill out object data
|
||||
etagBytes, err := xattr.Get(filepath.Join(bucket, path), etagkey)
|
||||
etagBytes, err := p.meta.RetrieveAttribute(bucket, path, etagkey)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return types.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil && !isNoAttr(err) {
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return types.Object{}, fmt.Errorf("get etag: %w", err)
|
||||
}
|
||||
// note: isNoAttr(err) will return etagBytes = []byte{}
|
||||
// note: meta.ErrNoSuchKey will return etagBytes = []byte{}
|
||||
// so this will just set etag to "" if its not already set
|
||||
|
||||
etag := string(etagBytes)
|
||||
@@ -1724,7 +1767,7 @@ func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
fileToObj(bucket), []string{metaTmpDir})
|
||||
p.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
@@ -1754,7 +1797,7 @@ func (p *Posix) PutBucketAcl(_ context.Context, bucket string, data []byte) erro
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if err := xattr.Set(bucket, aclkey, data); err != nil {
|
||||
if err := p.meta.StoreAttribute(bucket, "", aclkey, data); err != nil {
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
|
||||
@@ -1773,8 +1816,8 @@ func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(*input.Bucket, aclkey)
|
||||
if isNoAttr(err) {
|
||||
b, err := p.meta.RetrieveAttribute(*input.Bucket, "", aclkey)
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
@@ -1793,8 +1836,8 @@ func (p *Posix) PutBucketTagging(_ context.Context, bucket string, tags map[stri
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
err = xattr.Remove(bucket, "user."+tagHdr)
|
||||
if err != nil && !isNoAttr(err) {
|
||||
err = p.meta.DeleteAttribute(bucket, "", tagHdr)
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return fmt.Errorf("remove tags: %w", err)
|
||||
}
|
||||
|
||||
@@ -1806,7 +1849,7 @@ func (p *Posix) PutBucketTagging(_ context.Context, bucket string, tags map[stri
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
|
||||
err = xattr.Set(bucket, "user."+tagHdr, b)
|
||||
err = p.meta.StoreAttribute(bucket, "", tagHdr, b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set tags: %w", err)
|
||||
}
|
||||
@@ -1823,7 +1866,7 @@ func (p *Posix) GetBucketTagging(_ context.Context, bucket string) (map[string]s
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
tags, err := p.getXattrTags(bucket, "")
|
||||
tags, err := p.getAttrTags(bucket, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1844,16 +1887,16 @@ func (p *Posix) GetObjectTagging(_ context.Context, bucket, object string) (map[
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
return p.getXattrTags(bucket, object)
|
||||
return p.getAttrTags(bucket, object)
|
||||
}
|
||||
|
||||
func (p *Posix) getXattrTags(bucket, object string) (map[string]string, error) {
|
||||
func (p *Posix) getAttrTags(bucket, object string) (map[string]string, error) {
|
||||
tags := make(map[string]string)
|
||||
b, err := xattr.Get(filepath.Join(bucket, object), "user."+tagHdr)
|
||||
b, err := p.meta.RetrieveAttribute(bucket, object, tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if isNoAttr(err) {
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return tags, nil
|
||||
}
|
||||
if err != nil {
|
||||
@@ -1878,10 +1921,13 @@ func (p *Posix) PutObjectTagging(_ context.Context, bucket, object string, tags
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
err = xattr.Remove(filepath.Join(bucket, object), "user."+tagHdr)
|
||||
err = p.meta.DeleteAttribute(bucket, object, tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove tags: %w", err)
|
||||
}
|
||||
@@ -1893,7 +1939,7 @@ func (p *Posix) PutObjectTagging(_ context.Context, bucket, object string, tags
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
|
||||
err = xattr.Set(filepath.Join(bucket, object), "user."+tagHdr, b)
|
||||
err = p.meta.StoreAttribute(bucket, object, tagHdr, b)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -1918,8 +1964,9 @@ func (p *Posix) PutBucketPolicy(ctx context.Context, bucket string, policy []byt
|
||||
}
|
||||
|
||||
if policy == nil {
|
||||
if err := xattr.Remove(bucket, policykey); err != nil {
|
||||
if isNoAttr(err) {
|
||||
err := p.meta.DeleteAttribute(bucket, "", policykey)
|
||||
if err != nil {
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1929,7 +1976,8 @@ func (p *Posix) PutBucketPolicy(ctx context.Context, bucket string, policy []byt
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := xattr.Set(bucket, policykey, policy); err != nil {
|
||||
err = p.meta.StoreAttribute(bucket, "", policykey, policy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set policy: %w", err)
|
||||
}
|
||||
|
||||
@@ -1945,10 +1993,13 @@ func (p *Posix) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, err
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
policy, err := xattr.Get(bucket, policykey)
|
||||
if isNoAttr(err) {
|
||||
policy, err := p.meta.RetrieveAttribute(bucket, "", policykey)
|
||||
if errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get bucket policy: %w", err)
|
||||
}
|
||||
@@ -1969,7 +2020,7 @@ func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket, newOwner string)
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
aclTag, err := xattr.Get(bucket, aclkey)
|
||||
aclTag, err := p.meta.RetrieveAttribute(bucket, "", aclkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get acl: %w", err)
|
||||
}
|
||||
@@ -1987,7 +2038,7 @@ func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket, newOwner string)
|
||||
return fmt.Errorf("marshal acl: %w", err)
|
||||
}
|
||||
|
||||
err = xattr.Set(bucket, aclkey, newAcl)
|
||||
err = p.meta.StoreAttribute(bucket, "", aclkey, newAcl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
@@ -2011,7 +2062,7 @@ func (p *Posix) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.
|
||||
continue
|
||||
}
|
||||
|
||||
aclTag, err := xattr.Get(entry.Name(), aclkey)
|
||||
aclTag, err := p.meta.RetrieveAttribute(entry.Name(), "", aclkey)
|
||||
if err != nil {
|
||||
return buckets, fmt.Errorf("get acl tag: %w", err)
|
||||
}
|
||||
@@ -2035,20 +2086,6 @@ func (p *Posix) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
if err == errNoData {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getString(str *string) string {
|
||||
if str == nil {
|
||||
return ""
|
||||
|
||||
@@ -30,11 +30,12 @@ import (
|
||||
"github.com/versity/scoutfs-go"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
)
|
||||
|
||||
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
||||
p, err := posix.New(rootdir, posix.PosixOpts{
|
||||
p, err := posix.New(rootdir, meta.XattrMeta{}, posix.PosixOpts{
|
||||
ChownUID: opts.ChownUID,
|
||||
ChownGID: opts.ChownGID,
|
||||
})
|
||||
|
||||
@@ -17,6 +17,7 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -37,6 +38,7 @@ var (
|
||||
adminAccess string
|
||||
adminSecret string
|
||||
adminEndpoint string
|
||||
allowInsecure bool
|
||||
)
|
||||
|
||||
func adminCommand() *cli.Command {
|
||||
@@ -154,10 +156,24 @@ func adminCommand() *cli.Command {
|
||||
Required: true,
|
||||
Destination: &adminEndpoint,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "allow-insecure",
|
||||
Usage: "disable tls certificate verification for the admin endpoint",
|
||||
EnvVars: []string{"ADMIN_ALLOW_INSECURE"},
|
||||
Aliases: []string{"ai"},
|
||||
Destination: &allowInsecure,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initHTTPClient() *http.Client {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: allowInsecure},
|
||||
}
|
||||
return &http.Client{Transport: tr}
|
||||
}
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
userID, groupID, projectID := ctx.Int("user-id"), ctx.Int("group-id"), ctx.Int("projectID")
|
||||
@@ -199,18 +215,22 @@ func createUser(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("%s", body)
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", body)
|
||||
|
||||
@@ -240,18 +260,22 @@ func deleteUser(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("%s", body)
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", body)
|
||||
|
||||
@@ -276,18 +300,18 @@ func listUsers(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("%s", body)
|
||||
@@ -343,18 +367,22 @@ func changeBucketOwner(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("%s", body)
|
||||
}
|
||||
|
||||
fmt.Println(string(body))
|
||||
|
||||
@@ -391,18 +419,18 @@ func listBuckets(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
client := initHTTPClient()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("%s", body)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
"github.com/versity/versitygw/tests/integration"
|
||||
)
|
||||
@@ -56,7 +57,7 @@ func initPosix(ctx context.Context) {
|
||||
log.Fatalf("make temp directory: %v", err)
|
||||
}
|
||||
|
||||
be, err := posix.New(tempdir, posix.PosixOpts{})
|
||||
be, err := posix.New(tempdir, meta.XattrMeta{}, posix.PosixOpts{})
|
||||
if err != nil {
|
||||
log.Fatalf("init posix: %v", err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend/meta"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
)
|
||||
|
||||
@@ -62,7 +63,13 @@ func runPosix(ctx *cli.Context) error {
|
||||
return fmt.Errorf("no directory provided for operation")
|
||||
}
|
||||
|
||||
be, err := posix.New(ctx.Args().Get(0), posix.PosixOpts{
|
||||
gwroot := (ctx.Args().Get(0))
|
||||
ok := meta.XattrMeta{}.Test(gwroot)
|
||||
if !ok {
|
||||
return fmt.Errorf("posix backend requires extended attributes support")
|
||||
}
|
||||
|
||||
be, err := posix.New(gwroot, meta.XattrMeta{}, posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
})
|
||||
|
||||
8
go.mod
8
go.mod
@@ -4,12 +4,12 @@ go 1.21
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.26.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1
|
||||
github.com/aws/smithy-go v1.20.2
|
||||
github.com/go-ldap/ldap/v3 v3.4.6
|
||||
github.com/go-ldap/ldap/v3 v3.4.7
|
||||
github.com/gofiber/fiber/v2 v2.52.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
@@ -58,7 +58,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
|
||||
60
go.sum
60
go.sum
@@ -1,19 +1,19 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
|
||||
@@ -63,24 +63,40 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A=
|
||||
github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc=
|
||||
github.com/go-ldap/ldap/v3 v3.4.7 h1:3Hbd7mIB1qjd3Ra59fI3JYea/t5kykFu2CVHBca9koE=
|
||||
github.com/go-ldap/ldap/v3 v3.4.7/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
|
||||
github.com/gofiber/fiber/v2 v2.52.4 h1:P+T+4iK7VaqUsq2PALYEfBBo6bJZ4q3FP8cZ84EggTM=
|
||||
github.com/gofiber/fiber/v2 v2.52.4/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
|
||||
github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
|
||||
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -114,10 +130,13 @@ github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUan
|
||||
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
|
||||
github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
@@ -139,18 +158,24 @@ github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQut
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -167,16 +192,18 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
@@ -192,6 +219,7 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
|
||||
@@ -49,7 +49,7 @@ func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
|
||||
|
||||
err = c.iam.CreateAccount(usr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create a user: %w", err)
|
||||
return fmt.Errorf("failed to create user: %w", err)
|
||||
}
|
||||
|
||||
return ctx.SendString("The user has been created successfully")
|
||||
|
||||
@@ -193,11 +193,11 @@ func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
return n + int(chunkSize), err
|
||||
} else {
|
||||
cr.chunkDataLeft = chunkSize - int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
}
|
||||
|
||||
cr.chunkDataLeft = chunkSize - int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
@@ -231,6 +231,7 @@ const (
|
||||
// error if any. See the AWS documentation for the chunk header format. The
|
||||
// header[0] byte is expected to be the first byte of the chunk size here.
|
||||
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
|
||||
stashLen := len(cr.stash)
|
||||
if cr.stash != nil {
|
||||
tmp := make([]byte, maxHeaderSize)
|
||||
copy(tmp, cr.stash)
|
||||
@@ -265,5 +266,5 @@ func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int,
|
||||
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
|
||||
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
|
||||
|
||||
return chunkSize, signature, dataStartOffset, nil
|
||||
return chunkSize, signature, dataStartOffset - stashLen, nil
|
||||
}
|
||||
|
||||
@@ -13,4 +13,5 @@ S3CMD_CONFIG=./tests/s3cfg.local.default
|
||||
SECRETS_FILE=./tests/.secrets
|
||||
MC_ALIAS=versity
|
||||
LOG_LEVEL=2
|
||||
GOCOVERDIR=$PWD/cover
|
||||
GOCOVERDIR=$PWD/cover
|
||||
USERS_FOLDER=$PWD/iam
|
||||
@@ -6130,9 +6130,9 @@ func IAM_user_access_denied(s *S3Conf) error {
|
||||
}
|
||||
|
||||
out, err := execCommand("admin", "-a", usr.access, "-s", usr.secret, "-er", s.endpoint, "delete-user", "-a", "random_access")
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
if err == nil {
|
||||
failF("%v: expected cmd error", testName)
|
||||
return fmt.Errorf("%v: expected cmd error", testName)
|
||||
}
|
||||
if !strings.Contains(string(out), adminAccessDeniedMsg) {
|
||||
failF("%v: expected response error message to be %v, instead got %s", testName, adminAccessDeniedMsg, out)
|
||||
@@ -6161,9 +6161,9 @@ func IAM_userplus_access_denied(s *S3Conf) error {
|
||||
}
|
||||
|
||||
out, err := execCommand("admin", "-a", usr.access, "-s", usr.secret, "-er", s.endpoint, "delete-user", "-a", "random_access")
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
if err == nil {
|
||||
failF("%v: expected cmd error", testName)
|
||||
return fmt.Errorf("%v: expected cmd error", testName)
|
||||
}
|
||||
if !strings.Contains(string(out), adminAccessDeniedMsg) {
|
||||
failF("%v: expected response error message to be %v, instead got %s", testName, adminAccessDeniedMsg, out)
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
var (
|
||||
bcktCount = 0
|
||||
succUsrCrt = "The user has been created successfully"
|
||||
failUsrCrt = "failed to create a user: update iam data: account already exists"
|
||||
failUsrCrt = "failed to create user: update iam data: account already exists"
|
||||
adminAccessDeniedMsg = "access denied: only admin users have access to this resource"
|
||||
succDeleteUserMsg = "The user has been deleted successfully"
|
||||
)
|
||||
@@ -546,7 +546,7 @@ func createUsers(s *S3Conf, users []user) error {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(string(out), succUsrCrt) && !strings.Contains(string(out), failUsrCrt) {
|
||||
return fmt.Errorf("failed to create a user account")
|
||||
return fmt.Errorf("failed to create user account")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
20
tests/run.sh
20
tests/run.sh
@@ -20,7 +20,7 @@ handle_param() {
|
||||
-s|--static)
|
||||
export RECREATE_BUCKETS=false
|
||||
;;
|
||||
aws|aws-posix|s3cmd|mc)
|
||||
aws|aws-posix|s3cmd|mc|user)
|
||||
set_command_type "$1"
|
||||
;;
|
||||
*) # Handle unrecognized options or positional arguments
|
||||
@@ -39,7 +39,14 @@ set_command_type() {
|
||||
export command_type
|
||||
}
|
||||
|
||||
export RECREATE_BUCKETS=true
|
||||
if [[ -z $RECREATE_BUCKETS ]]; then
|
||||
export RECREATE_BUCKETS=true
|
||||
elif [[ $RECREATE_BUCKETS != true ]] && [[ $RECREATE_BUCKETS != false ]]; then
|
||||
echo "Invalid RECREATE_BUCKETS value: $RECREATE_BUCKETS"
|
||||
exit 1
|
||||
else
|
||||
export RECREATE_BUCKETS=$RECREATE_BUCKETS
|
||||
fi
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
handle_param "$1"
|
||||
shift # past argument or value
|
||||
@@ -60,16 +67,25 @@ fi
|
||||
case $command_type in
|
||||
aws)
|
||||
"$HOME"/bin/bats ./tests/test_aws.sh || exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
"$HOME"/bin/bats ./tests/test_user_aws.sh || exit_code=$?
|
||||
fi
|
||||
;;
|
||||
aws-posix)
|
||||
"$HOME"/bin/bats ./tests/test_aws_posix.sh || exit_code=$?
|
||||
;;
|
||||
s3cmd)
|
||||
"$HOME"/bin/bats ./tests/test_s3cmd.sh || exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
"$HOME"/bin/bats ./tests/test_user_s3cmd.sh || exit_code=$?
|
||||
fi
|
||||
;;
|
||||
mc)
|
||||
"$HOME"/bin/bats ./tests/test_mc.sh || exit_code=$?
|
||||
;;
|
||||
user)
|
||||
"$HOME"/bin/bats ./tests/test_user_aws.sh || exit_code=$?
|
||||
;;
|
||||
esac
|
||||
|
||||
exit $exit_code
|
||||
|
||||
@@ -4,6 +4,11 @@ if [[ -z "$VERSITYGW_TEST_ENV" ]]; then
|
||||
echo "Error: VERSITYGW_TEST_ENV parameter must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck source=./.env.default
|
||||
source "$VERSITYGW_TEST_ENV"
|
||||
export RECREATE_BUCKETS
|
||||
|
||||
if ! ./tests/run.sh aws; then
|
||||
exit 1
|
||||
fi
|
||||
@@ -16,16 +21,7 @@ fi
|
||||
if ! ./tests/run.sh mc; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh -s aws; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh -s aws-posix; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh -s s3cmd; then
|
||||
exit 1
|
||||
fi
|
||||
if ! ./tests/run.sh -s mc; then
|
||||
if ! ./tests/run.sh user; then
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_aws.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/test_common.sh
|
||||
|
||||
@@ -81,7 +83,6 @@ source ./tests/test_common.sh
|
||||
|
||||
# test ability to delete multiple objects from bucket
|
||||
@test "test_delete_objects" {
|
||||
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
|
||||
@@ -119,7 +120,6 @@ source ./tests/test_common.sh
|
||||
|
||||
# test v1 s3api list objects command
|
||||
@test "test-s3api-list-objects-v1" {
|
||||
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
@@ -150,7 +150,6 @@ source ./tests/test_common.sh
|
||||
|
||||
# test v2 s3api list objects command
|
||||
@test "test-s3api-list-objects-v2" {
|
||||
|
||||
local object_one="test-file-one"
|
||||
local object_two="test-file-two"
|
||||
local object_two_data="test data\n"
|
||||
@@ -186,7 +185,6 @@ source ./tests/test_common.sh
|
||||
|
||||
# test multi-part upload
|
||||
@test "test-multi-part-upload" {
|
||||
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
@@ -209,7 +207,6 @@ source ./tests/test_common.sh
|
||||
|
||||
# test multi-part upload abort
|
||||
@test "test-multi-part-upload-abort" {
|
||||
|
||||
local bucket_file="bucket-file"
|
||||
bucket_file_data="test file\n"
|
||||
|
||||
@@ -231,7 +228,6 @@ source ./tests/test_common.sh
|
||||
|
||||
# test multi-part upload list parts command
|
||||
@test "test-multipart-upload-list-parts" {
|
||||
|
||||
local bucket_file="bucket-file"
|
||||
local bucket_file_data="test file\n"
|
||||
|
||||
@@ -279,10 +275,14 @@ source ./tests/test_common.sh
|
||||
|
||||
# test listing of active uploads
|
||||
@test "test-multipart-upload-list-uploads" {
|
||||
|
||||
local bucket_file_one="bucket-file-one"
|
||||
local bucket_file_two="bucket-file-two"
|
||||
|
||||
if [[ $RECREATE_BUCKETS == false ]]; then
|
||||
abort_all_multipart_uploads "$BUCKET_ONE_NAME" || local abort_result=$?
|
||||
[[ $abort_result -eq 0 ]] || fail "error aborting all uploads"
|
||||
fi
|
||||
|
||||
create_test_files "$bucket_file_one" "$bucket_file_two" || local created=$?
|
||||
[[ $created -eq 0 ]] || fail "Error creating test files"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local result=$?
|
||||
@@ -293,6 +293,7 @@ source ./tests/test_common.sh
|
||||
|
||||
local key_one
|
||||
local key_two
|
||||
log 5 "$uploads"
|
||||
key_one=$(echo "$uploads" | jq '.Uploads[0].Key')
|
||||
key_two=$(echo "$uploads" | jq '.Uploads[1].Key')
|
||||
key_one=${key_one//\"/}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
source ./tests/util_file.sh
|
||||
source ./tests/util_posix.sh
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ test_common_put_object_with_data() {
|
||||
create_test_files "$object_name" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "Error creating test file"
|
||||
echo "test data" > "$test_file_folder"/"$object_name"
|
||||
test_common_put_object "$1" "$object_name"
|
||||
}
|
||||
|
||||
test_common_put_object_no_data() {
|
||||
@@ -163,7 +164,7 @@ test_common_set_get_bucket_tags() {
|
||||
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
if [[ $tags != "" ]]; then
|
||||
tag_set=$(echo "$tags" | sed '1d' | jq '.TagSet')
|
||||
tag_set=$(echo "$tags" | jq '.TagSet')
|
||||
[[ $tag_set == "[]" ]] || fail "Error: tags not empty: $tags"
|
||||
fi
|
||||
else
|
||||
@@ -177,8 +178,9 @@ test_common_set_get_bucket_tags() {
|
||||
local tag_set_key
|
||||
local tag_set_value
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set_key=$(echo "$tags" | sed '1d' | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | sed '1d' | jq '.TagSet[0].Value')
|
||||
log 5 "Post-export tags: $tags"
|
||||
tag_set_key=$(echo "$tags" | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq '.TagSet[0].Value')
|
||||
[[ $tag_set_key == '"'$key'"' ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == '"'$value'"' ]] || fail "Value mismatch"
|
||||
else
|
||||
@@ -211,7 +213,7 @@ test_common_set_get_object_tags() {
|
||||
get_object_tags "$1" "$BUCKET_ONE_NAME" $bucket_file || local get_result=$?
|
||||
[[ $get_result -eq 0 ]] || fail "Error getting object tags"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set=$(echo "$tags" | sed '1d' | jq '.TagSet')
|
||||
tag_set=$(echo "$tags" | jq '.TagSet')
|
||||
[[ $tag_set == "[]" ]] || fail "Error: tags not empty"
|
||||
elif [[ ! $tags == *"No tags found"* ]]; then
|
||||
fail "no tags found (tags: $tags)"
|
||||
@@ -221,8 +223,8 @@ test_common_set_get_object_tags() {
|
||||
get_object_tags "$1" "$BUCKET_ONE_NAME" $bucket_file || local get_result_two=$?
|
||||
[[ $get_result_two -eq 0 ]] || fail "Error getting object tags"
|
||||
if [[ $1 == 'aws' ]]; then
|
||||
tag_set_key=$(echo "$tags" | sed '1d' | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | sed '1d' | jq '.TagSet[0].Value')
|
||||
tag_set_key=$(echo "$tags" | jq '.TagSet[0].Key')
|
||||
tag_set_value=$(echo "$tags" | jq '.TagSet[0].Value')
|
||||
[[ $tag_set_key == '"'$key'"' ]] || fail "Key mismatch"
|
||||
[[ $tag_set_value == '"'$value'"' ]] || fail "Value mismatch"
|
||||
else
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
|
||||
export RUN_MC=true
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
source ./tests/setup.sh
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
|
||||
export RUN_S3CMD=true
|
||||
|
||||
|
||||
19
tests/test_user_aws.sh
Executable file
19
tests/test_user_aws.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/test_user_common.sh
|
||||
|
||||
@test "test_admin_user_aws" {
|
||||
test_admin_user "aws"
|
||||
}
|
||||
|
||||
@test "test_create_user_already_exists_aws" {
|
||||
test_create_user_already_exists "aws"
|
||||
}
|
||||
|
||||
@test "test_user_user_aws" {
|
||||
test_user_user "aws"
|
||||
}
|
||||
|
||||
@test "test_userplus_operation_aws" {
|
||||
test_userplus_operation "aws"
|
||||
}
|
||||
178
tests/test_user_common.sh
Executable file
178
tests/test_user_common.sh
Executable file
@@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util_users.sh
|
||||
source ./tests/util.sh
|
||||
source ./tests/util_bucket_create.sh
|
||||
|
||||
test_admin_user() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "test admin user command requires command type"
|
||||
fi
|
||||
|
||||
admin_username="ABCDEF"
|
||||
user_username="GHIJKL"
|
||||
admin_password="123456"
|
||||
user_password="789012"
|
||||
|
||||
user_exists "$admin_username" || local admin_exists_result=$?
|
||||
if [[ $admin_exists_result -eq 0 ]]; then
|
||||
delete_user "$admin_username" || local delete_admin_result=$?
|
||||
[[ $delete_admin_result -eq 0 ]] || fail "failed to delete admin user"
|
||||
fi
|
||||
create_user "$admin_username" "$admin_password" "admin" || create_admin_result=$?
|
||||
[[ $create_admin_result -eq 0 ]] || fail "failed to create admin user"
|
||||
|
||||
user_exists "$user_username" || local user_exists_result=$?
|
||||
if [[ $user_exists_result -eq 0 ]]; then
|
||||
delete_user "$user_username" || local delete_user_result=$?
|
||||
[[ $delete_user_result -eq 0 ]] || fail "failed to delete user user"
|
||||
fi
|
||||
create_user_with_user "$admin_username" "$admin_password" "$user_username" "$user_password" "user"
|
||||
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "error deleting bucket if it exists"
|
||||
create_bucket_with_user "aws" "versity-gwtest-admin-bucket" "$admin_username" "$admin_password" || create_result_two=$?
|
||||
[[ $create_result_two -eq 0 ]] || fail "error creating bucket with user"
|
||||
|
||||
bucket_one_found=false
|
||||
bucket_two_found=false
|
||||
list_buckets_with_user "aws" "$admin_username" "$admin_password"
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ]; then
|
||||
bucket_one_found=true
|
||||
elif [ "$bucket" == "versity-gwtest-admin-bucket" ]; then
|
||||
bucket_two_found=true
|
||||
fi
|
||||
if [ $bucket_one_found == true ] && [ $bucket_two_found == true ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ $bucket_one_found == false ] || [ $bucket_two_found == false ]; then
|
||||
fail "not all expected buckets listed"
|
||||
fi
|
||||
change_bucket_owner "$admin_username" "$admin_password" "versity-gwtest-admin-bucket" "$user_username" || local change_result=$?
|
||||
[[ $change_result -eq 0 ]] || fail "error changing bucket owner"
|
||||
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket"
|
||||
delete_user "$user_username"
|
||||
delete_user "$admin_username"
|
||||
}
|
||||
|
||||
test_create_user_already_exists() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "test admin user command requires command type"
|
||||
fi
|
||||
|
||||
username="ABCDEG"
|
||||
password="123456"
|
||||
|
||||
user_exists "$username" || local exists_result=$?
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
delete_user "$username" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "failed to delete user '$username'"
|
||||
fi
|
||||
|
||||
create_user "$username" "123456" "admin" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "error creating user"
|
||||
create_user "$username" "123456" "admin" || local create_result=$?
|
||||
[[ $create_result -eq 1 ]] || fail "'user already exists' error not returned"
|
||||
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket"
|
||||
delete_user "$username"
|
||||
}
|
||||
|
||||
test_user_user() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "test admin user command requires command type"
|
||||
fi
|
||||
|
||||
username="ABCDEG"
|
||||
password="123456"
|
||||
|
||||
user_exists "$username" || local exists_result=$?
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
delete_user "$username" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "failed to delete user '$username'"
|
||||
fi
|
||||
delete_bucket "aws" "versity-gwtest-user-bucket"
|
||||
|
||||
create_user "$username" "123456" "user" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "error creating user"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
create_bucket_with_user "aws" "versity-gwtest-user-bucket" "$username" "$password" || create_result_two=$?
|
||||
[[ $create_result_two -eq 1 ]] || fail "creating bucket with 'user' account failed to return error"
|
||||
[[ $error == *"Access Denied"* ]] || fail "error message '$error' doesn't contain 'Access Denied'"
|
||||
|
||||
create_bucket "aws" "versity-gwtest-user-bucket" || create_result_three=$?
|
||||
[[ $create_result_three -eq 0 ]] || fail "creating bucket account returned error"
|
||||
|
||||
change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "versity-gwtest-user-bucket" "$username" || local change_result=$?
|
||||
[[ $change_result -eq 0 ]] || fail "error changing bucket owner"
|
||||
change_bucket_owner "$username" "$password" "versity-gwtest-user-bucket" "admin" || local change_result_two=$?
|
||||
[[ $change_result_two -eq 1 ]] || fail "user shouldn't be able to change bucket owner"
|
||||
|
||||
list_buckets_with_user "aws" "$username" "$password"
|
||||
bucket_found=false
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ]; then
|
||||
fail "$BUCKET_ONE_NAME shouldn't show up in 'user' bucket list"
|
||||
elif [ "$bucket" == "versity-gwtest-user-bucket" ]; then
|
||||
bucket_found=true
|
||||
fi
|
||||
done
|
||||
if [ $bucket_found == false ]; then
|
||||
fail "user-owned bucket not found in user list"
|
||||
fi
|
||||
|
||||
delete_bucket "aws" "versity-gwtest-user-bucket"
|
||||
delete_user "$username"
|
||||
}
|
||||
|
||||
test_userplus_operation() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fail "test admin user command requires command type"
|
||||
fi
|
||||
|
||||
username="ABCDEG"
|
||||
password="123456"
|
||||
|
||||
user_exists "$username" || local exists_result=$?
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
delete_user "$username" || local delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "failed to delete user '$username'"
|
||||
fi
|
||||
delete_bucket "aws" "versity-gwtest-userplus-bucket"
|
||||
|
||||
create_user "$username" "123456" "userplus" || local create_result=$?
|
||||
[[ $create_result -eq 0 ]] || fail "error creating user"
|
||||
setup_bucket "aws" "$BUCKET_ONE_NAME" || local setup_result=$?
|
||||
[[ $setup_result -eq 0 ]] || fail "error setting up bucket"
|
||||
|
||||
create_bucket_with_user "aws" "versity-gwtest-userplus-bucket" "$username" "$password" || create_result_two=$?
|
||||
[[ $create_result_two -eq 0 ]] || fail "error creating bucket"
|
||||
|
||||
list_buckets_with_user "aws" "$username" "$password"
|
||||
bucket_found=false
|
||||
for bucket in "${bucket_array[@]}"; do
|
||||
if [ "$bucket" == "$BUCKET_ONE_NAME" ]; then
|
||||
fail "$BUCKET_ONE_NAME shouldn't show up in 'userplus' bucket list"
|
||||
elif [ "$bucket" == "versity-gwtest-userplus-bucket" ]; then
|
||||
bucket_found=true
|
||||
fi
|
||||
done
|
||||
if [ $bucket_found == false ]; then
|
||||
fail "userplus-owned bucket not found in user list"
|
||||
fi
|
||||
|
||||
change_bucket_owner "$username" "$password" "versity-gwtest-userplus-bucket" "admin" || local change_result_two=$?
|
||||
[[ $change_result_two -eq 1 ]] || fail "userplus shouldn't be able to change bucket owner"
|
||||
|
||||
delete_bucket "aws" "versity-gwtest-admin-bucket"
|
||||
delete_user "$username" || delete_result=$?
|
||||
[[ $delete_result -eq 0 ]] || fail "error deleting user"
|
||||
}
|
||||
19
tests/test_user_s3cmd.sh
Executable file
19
tests/test_user_s3cmd.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
source ./tests/test_user_common.sh
|
||||
|
||||
@test "test_admin_user_s3cmd" {
|
||||
test_admin_user "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_create_user_already_exists_s3cmd" {
|
||||
test_create_user_already_exists "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_user_user_s3cmd" {
|
||||
test_user_user "s3cmd"
|
||||
}
|
||||
|
||||
@test "test_userplus_operation_s3cmd" {
|
||||
test_userplus_operation "s3cmd"
|
||||
}
|
||||
@@ -1,59 +1,8 @@
|
||||
#!/usr/bin/env bats
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/util_mc.sh
|
||||
source ./tests/logger.sh
|
||||
|
||||
# create an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
create_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "create bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == "aws" ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error creating bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_invalid_name() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "create bucket w/invalid name missing command type"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]]; then
|
||||
bucket_create_error=$(aws --no-verify-ssl s3 mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
bucket_create_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
bucket_create_error=$(mc --insecure mb "$MC_ALIAS" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
echo "error: bucket should have not been created but was"
|
||||
return 1
|
||||
fi
|
||||
export bucket_create_error
|
||||
}
|
||||
|
||||
# delete an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
@@ -298,6 +247,7 @@ put_object() {
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
log 5 "put object exit code: $exit_code"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error copying object to bucket: $error"
|
||||
return 1
|
||||
@@ -420,6 +370,35 @@ list_buckets() {
|
||||
export bucket_array
|
||||
}
|
||||
|
||||
list_buckets_with_user() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "List buckets command missing format, user id, key"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local output
|
||||
if [[ $1 == "aws" ]]; then
|
||||
output=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" aws --no-verify-ssl s3 ls s3:// 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid format: $1"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error listing buckets: $output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
bucket_array=()
|
||||
while IFS= read -r line; do
|
||||
bucket_name=$(echo "$line" | awk '{print $NF}')
|
||||
bucket_array+=("${bucket_name%/}")
|
||||
done <<< "$output"
|
||||
|
||||
export bucket_array
|
||||
}
|
||||
|
||||
# list objects on versitygw, in bucket or folder
|
||||
# param: path of bucket or folder
|
||||
# export object_array (object names) on success, return 1 for failure
|
||||
@@ -574,6 +553,8 @@ get_bucket_tags() {
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
log 5 "Tags: $tags"
|
||||
tags=$(echo "$tags" | grep -v "InsecureRequestWarning")
|
||||
if [[ $result -ne 0 ]]; then
|
||||
if [[ $tags =~ "No tags found" ]] || [[ $tags =~ "The TagSet does not exist" ]]; then
|
||||
export tags=
|
||||
@@ -648,6 +629,8 @@ get_object_tags() {
|
||||
echo "error getting object tags: $tags"
|
||||
return 1
|
||||
fi
|
||||
log 5 "$tags"
|
||||
tags=$(echo "$tags" | grep -v "InsecureRequestWarning")
|
||||
export tags
|
||||
}
|
||||
|
||||
|
||||
40
tests/util_aws.sh
Normal file
40
tests/util_aws.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
abort_all_multipart_uploads() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "abort all multipart uploads command missing bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
upload_list=$(aws --no-verify-ssl s3api list-multipart-uploads --bucket "$1" 2>&1) || list_result=$?
|
||||
if [[ $list_result -ne 0 ]]; then
|
||||
echo "error listing multipart uploads: $upload_list"
|
||||
return 1
|
||||
fi
|
||||
log 5 "$upload_list"
|
||||
while IFS= read -r line; do
|
||||
if [[ $line != *"InsecureRequestWarning"* ]]; then
|
||||
modified_upload_list+=("$line")
|
||||
fi
|
||||
done <<< "$upload_list"
|
||||
|
||||
log 5 "Modified upload list: ${modified_upload_list[*]}"
|
||||
has_uploads=$(echo "${modified_upload_list[*]}" | jq 'has("Uploads")')
|
||||
if [[ $has_uploads != false ]]; then
|
||||
lines=$(echo "${modified_upload_list[*]}" | jq -r '.Uploads[] | "--key \(.Key) --upload-id \(.UploadId)"') || lines_result=$?
|
||||
if [[ $lines_result -ne 0 ]]; then
|
||||
echo "error getting lines for multipart upload delete: $lines"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 5 "$lines"
|
||||
while read -r line; do
|
||||
error=$(aws --no-verify-ssl s3api abort-multipart-upload --bucket "$1" $line 2>&1) || abort_result=$?
|
||||
if [[ $abort_result -ne 0 ]]; then
|
||||
echo "error aborting multipart upload: $error"
|
||||
return 1
|
||||
fi
|
||||
done <<< "$lines"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
79
tests/util_bucket_create.sh
Normal file
79
tests/util_bucket_create.sh
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/util_mc.sh
|
||||
source ./tests/logger.sh
|
||||
|
||||
# create an AWS bucket
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
create_bucket() {
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "create bucket missing command type, bucket name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local exit_code=0
|
||||
local error
|
||||
if [[ $1 == "aws" ]]; then
|
||||
error=$(aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error creating bucket: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_with_user() {
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "create bucket missing command type, bucket name, access, secret"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]]; then
|
||||
error=$(AWS_ACCESS_KEY_ID="$3" AWS_SECRET_ACCESS_KEY="$4" aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "s3cmd" ]]; then
|
||||
error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb s3://"$2" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == "mc" ]]; then
|
||||
error=$(mc --insecure mb "$MC_ALIAS"/"$2" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
echo "error creating bucket: $error"
|
||||
export error
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_bucket_invalid_name() {
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "create bucket w/invalid name missing command type"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
if [[ $1 == "aws" ]]; then
|
||||
bucket_create_error=$(aws --no-verify-ssl s3 mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 's3cmd' ]]; then
|
||||
bucket_create_error=$(s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate mb "s3://" 2>&1) || exit_code=$?
|
||||
elif [[ $1 == 'mc' ]]; then
|
||||
bucket_create_error=$(mc --insecure mb "$MC_ALIAS" 2>&1) || exit_code=$?
|
||||
else
|
||||
echo "invalid command type $1"
|
||||
return 1
|
||||
fi
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
echo "error: bucket should have not been created but was"
|
||||
return 1
|
||||
fi
|
||||
export bucket_create_error
|
||||
}
|
||||
85
tests/util_users.sh
Normal file
85
tests/util_users.sh
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
create_user() {
|
||||
if [[ $# -ne 3 ]]; then
|
||||
echo "create user command requires user ID, key, and role"
|
||||
return 1
|
||||
fi
|
||||
create_user_with_user "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$2" "$3" || create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
echo "error creating user: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_user_with_user() {
|
||||
if [[ $# -ne 5 ]]; then
|
||||
echo "create user with user command requires creator ID, key, and new user ID, key, and role"
|
||||
return 1
|
||||
fi
|
||||
error=$($VERSITY_EXE admin --allow-insecure --access "$1" --secret "$2" --endpoint-url "$AWS_ENDPOINT_URL" create-user --access "$3" --secret "$4" --role "$5") || local create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
echo "error creating user: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_users() {
|
||||
users=$($VERSITY_EXE admin --allow-insecure --access "$AWS_ACCESS_KEY_ID" --secret "$AWS_SECRET_ACCESS_KEY" --endpoint-url "$AWS_ENDPOINT_URL" list-users) || local list_result=$?
|
||||
if [[ $list_result -ne 0 ]]; then
|
||||
echo "error listing users: $users"
|
||||
return 1
|
||||
fi
|
||||
parsed_users=()
|
||||
while IFS= read -r line; do
|
||||
parsed_users+=("$line")
|
||||
done < <(awk 'NR>2 {print $1}' <<< "$users")
|
||||
export parsed_users
|
||||
return 0
|
||||
}
|
||||
|
||||
user_exists() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "user exists command requires username"
|
||||
return 2
|
||||
fi
|
||||
list_users || local list_result=$?
|
||||
if [[ $list_result -ne 0 ]]; then
|
||||
echo "error listing user"
|
||||
return 2
|
||||
fi
|
||||
for element in "${parsed_users[@]}"; do
|
||||
if [[ $element == "$1" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
delete_user() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "delete user command requires user ID"
|
||||
return 1
|
||||
fi
|
||||
error=$($VERSITY_EXE admin --allow-insecure --access $AWS_ACCESS_KEY_ID --secret $AWS_SECRET_ACCESS_KEY --endpoint-url $AWS_ENDPOINT_URL delete-user --access "$1") || local delete_result=$?
|
||||
if [[ $delete_result -ne 0 ]]; then
|
||||
echo "error deleting user: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
change_bucket_owner() {
|
||||
if [[ $# -ne 4 ]]; then
|
||||
echo "change bucket owner command requires ID, key, bucket name, and new owner"
|
||||
return 1
|
||||
fi
|
||||
error=$($VERSITY_EXE admin --allow-insecure --access "$1" --secret "$2" --endpoint-url "$AWS_ENDPOINT_URL" change-bucket-owner --bucket "$3" --owner "$4" 2>&1) || local change_result=$?
|
||||
if [[ $change_result -ne 0 ]]; then
|
||||
echo "error changing bucket owner: $error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -44,6 +44,9 @@ check_exe_params() {
|
||||
elif [[ $RUN_VERSITYGW != "true" ]] && [[ $RUN_VERSITYGW != "false" ]]; then
|
||||
echo "RUN_VERSITYGW must be 'true' or 'false'"
|
||||
return 1
|
||||
elif [ -z "$USERS_FOLDER" ]; then
|
||||
echo "No users folder parameter set"
|
||||
return 1
|
||||
fi
|
||||
if [[ -r $GOCOVERDIR ]]; then
|
||||
export GOCOVERDIR=$GOCOVERDIR
|
||||
@@ -89,7 +92,7 @@ start_versity() {
|
||||
fi
|
||||
fi
|
||||
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE AWS_ENDPOINT_URL
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE AWS_ENDPOINT_URL VERSITY_EXE
|
||||
}
|
||||
|
||||
start_versity_process() {
|
||||
@@ -128,7 +131,7 @@ run_versity_app_posix() {
|
||||
echo "run versity app w/posix command requires access ID, secret key, process number"
|
||||
return 1
|
||||
fi
|
||||
base_command=("$VERSITY_EXE" --access="$1" --secret="$2" --region="$AWS_REGION")
|
||||
base_command=("$VERSITY_EXE" --access="$1" --secret="$2" --region="$AWS_REGION" --iam-dir="$USERS_FOLDER")
|
||||
if [ -n "$CERT" ] && [ -n "$KEY" ]; then
|
||||
base_command+=(--cert "$CERT" --key "$KEY")
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user