mirror of
https://github.com/versity/versitygw.git
synced 2026-01-31 07:22:04 +00:00
Compare commits
69 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c9da2629d | ||
|
|
fd4bb8ffbc | ||
|
|
f35cdfb20c | ||
|
|
888fdd3688 | ||
|
|
646dc674b0 | ||
|
|
86fe01ede0 | ||
|
|
4add647501 | ||
|
|
748912fb3d | ||
|
|
30ad7111a6 | ||
|
|
f2c2f18ac7 | ||
|
|
c72ab8ee27 | ||
|
|
22cfacb987 | ||
|
|
da3c6211bd | ||
|
|
d14c1141f9 | ||
|
|
94d399775e | ||
|
|
c29fc6a261 | ||
|
|
7f3302ba9e | ||
|
|
ca2c8d2f48 | ||
|
|
da721b77f0 | ||
|
|
5dc3cd6889 | ||
|
|
fad19579ad | ||
|
|
a0f67936d0 | ||
|
|
c094086d83 | ||
|
|
cca9fee532 | ||
|
|
fcd50d2d1f | ||
|
|
ac6e17fd70 | ||
|
|
c37a22ffe1 | ||
|
|
9f78b94464 | ||
|
|
fe7d5d58cc | ||
|
|
cdd12dab5a | ||
|
|
fe584a8f63 | ||
|
|
04e71c44e9 | ||
|
|
5e35f89aa2 | ||
|
|
4025172897 | ||
|
|
ee315276f6 | ||
|
|
cc7ea685d9 | ||
|
|
2d75ef2d55 | ||
|
|
b983dadce9 | ||
|
|
5e83419e2c | ||
|
|
34cadaeca0 | ||
|
|
4e5586d729 | ||
|
|
a1eb66db91 | ||
|
|
1aaf99dafd | ||
|
|
02b907bc90 | ||
|
|
f487d2602c | ||
|
|
5529796ccd | ||
|
|
66ed32baca | ||
|
|
ea2184077a | ||
|
|
0a72c65b76 | ||
|
|
5932fd5e4e | ||
|
|
ba561f55ab | ||
|
|
b0c310aca6 | ||
|
|
47cf2a69c1 | ||
|
|
7c5258e6e9 | ||
|
|
59a4eab6ae | ||
|
|
1bfe331edf | ||
|
|
f471e39f4b | ||
|
|
8d4a8fc5e0 | ||
|
|
60700e3fa7 | ||
|
|
847993a514 | ||
|
|
10decc0d2c | ||
|
|
c17db864cd | ||
|
|
7e530ee8ae | ||
|
|
94d23cce9a | ||
|
|
3e99c0de0c | ||
|
|
2a61489e4c | ||
|
|
5358abca3f | ||
|
|
0704069262 | ||
|
|
70b700d577 |
1
.github/workflows/docker-bats.yml
vendored
1
.github/workflows/docker-bats.yml
vendored
@@ -14,6 +14,7 @@ jobs:
|
||||
run: |
|
||||
cp tests/.env.docker.default tests/.env.docker
|
||||
cp tests/.secrets.default tests/.secrets
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
docker build \
|
||||
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
|
||||
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \
|
||||
|
||||
30
.github/workflows/system.yml
vendored
30
.github/workflows/system.yml
vendored
@@ -73,16 +73,17 @@ jobs:
|
||||
RUN_SET: "s3api-user"
|
||||
RECREATE_BUCKETS: "false"
|
||||
BACKEND: "posix"
|
||||
- set: "s3api, s3, multipart|object, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
RECREATE_BUCKETS: "true"
|
||||
BACKEND: "s3"
|
||||
- set: "s3api, s3, policy|user, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3api-policy,s3api-user"
|
||||
RECREATE_BUCKETS: "true"
|
||||
BACKEND: "s3"
|
||||
# TODO fix/debug s3 gateway
|
||||
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
|
||||
# IAM_TYPE: folder
|
||||
# RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
|
||||
# RECREATE_BUCKETS: "true"
|
||||
# BACKEND: "s3"
|
||||
#- set: "s3api, s3, policy|user, non-static, folder IAM"
|
||||
# IAM_TYPE: folder
|
||||
# RUN_SET: "s3api-policy,s3api-user"
|
||||
# RECREATE_BUCKETS: "true"
|
||||
# BACKEND: "s3"
|
||||
- set: "s3cmd, posix, file count, non-static, folder IAM"
|
||||
IAM_TYPE: folder
|
||||
RUN_SET: "s3cmd-file-count"
|
||||
@@ -132,6 +133,14 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install libxml2-utils
|
||||
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
- name: Install AWS cli
|
||||
run: |
|
||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.22.35.zip" -o "awscliv2.zip"
|
||||
unzip -o awscliv2.zip
|
||||
./aws/install -i ${{ github.workspace }}/aws-cli -b ${{ github.workspace }}/bin
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build and run
|
||||
env:
|
||||
IAM_TYPE: ${{ matrix.IAM_TYPE }}
|
||||
@@ -170,6 +179,7 @@ jobs:
|
||||
export AWS_REGION=us-east-1
|
||||
export AWS_ACCESS_KEY_ID_TWO=user
|
||||
export AWS_SECRET_ACCESS_KEY_TWO=pass
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
|
||||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity
|
||||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity
|
||||
aws configure set aws_region $AWS_REGION --profile versity
|
||||
|
||||
159
auth/acl.go
159
auth/acl.go
@@ -17,6 +17,7 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -33,12 +34,13 @@ type ACL struct {
|
||||
}
|
||||
|
||||
type Grantee struct {
|
||||
Permission types.Permission
|
||||
Permission Permission
|
||||
Access string
|
||||
Type types.Type
|
||||
}
|
||||
|
||||
type GetBucketAclOutput struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy"`
|
||||
Owner *types.Owner
|
||||
AccessControlList AccessControlList
|
||||
}
|
||||
@@ -59,20 +61,124 @@ type AccessControlPolicy struct {
|
||||
Owner *types.Owner
|
||||
}
|
||||
|
||||
func (acp *AccessControlPolicy) Validate() error {
|
||||
if !acp.AccessControlList.isValid() {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedACL)
|
||||
}
|
||||
|
||||
// The Owner can't be nil
|
||||
if acp.Owner == nil {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedACL)
|
||||
}
|
||||
|
||||
// The Owner ID can't be empty
|
||||
if acp.Owner.ID == nil || *acp.Owner.ID == "" {
|
||||
return s3err.GetAPIError(s3err.ErrMalformedACL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type AccessControlList struct {
|
||||
Grants []Grant `xml:"Grant"`
|
||||
}
|
||||
|
||||
// Validates the AccessControlList
|
||||
func (acl *AccessControlList) isValid() bool {
|
||||
for _, el := range acl.Grants {
|
||||
if !el.isValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type Permission string
|
||||
|
||||
const (
|
||||
PermissionFullControl Permission = "FULL_CONTROL"
|
||||
PermissionWrite Permission = "WRITE"
|
||||
PermissionWriteAcp Permission = "WRITE_ACP"
|
||||
PermissionRead Permission = "READ"
|
||||
PermissionReadAcp Permission = "READ_ACP"
|
||||
)
|
||||
|
||||
// Check if the permission is valid
|
||||
func (p Permission) isValid() bool {
|
||||
return p == PermissionFullControl ||
|
||||
p == PermissionRead ||
|
||||
p == PermissionReadAcp ||
|
||||
p == PermissionWrite ||
|
||||
p == PermissionWriteAcp
|
||||
}
|
||||
|
||||
type Grant struct {
|
||||
Grantee *Grt
|
||||
Permission types.Permission
|
||||
Grantee *Grt `xml:"Grantee"`
|
||||
Permission Permission `xml:"Permission"`
|
||||
}
|
||||
|
||||
// Checks if Grant is valid
|
||||
func (g *Grant) isValid() bool {
|
||||
return g.Permission.isValid() && g.Grantee.isValid()
|
||||
}
|
||||
|
||||
type Grt struct {
|
||||
XMLNS string `xml:"xmlns:xsi,attr"`
|
||||
XMLXSI types.Type `xml:"xsi:type,attr"`
|
||||
Type types.Type `xml:"Type"`
|
||||
ID string `xml:"ID"`
|
||||
XMLNS string `xml:"xmlns:xsi,attr"`
|
||||
Type types.Type `xml:"xsi:type,attr"`
|
||||
ID string `xml:"ID"`
|
||||
}
|
||||
|
||||
// Custom Unmarshalling for Grt to parse xsi:type properly
|
||||
func (g *Grt) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Iterate through the XML tokens to process the attributes
|
||||
for _, attr := range start.Attr {
|
||||
// Check if the attribute is xsi:type and belongs to the xsi namespace
|
||||
if attr.Name.Space == "http://www.w3.org/2001/XMLSchema-instance" && attr.Name.Local == "type" {
|
||||
g.Type = types.Type(attr.Value)
|
||||
}
|
||||
// Handle xmlns:xsi
|
||||
if attr.Name.Local == "xmlns:xsi" {
|
||||
g.XMLNS = attr.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Decode the inner XML elements like ID
|
||||
for {
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
if se.Name.Local == "ID" {
|
||||
if err := d.DecodeElement(&g.ID, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case xml.EndElement:
|
||||
if se.Name.Local == start.Name.Local {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validates Grt
|
||||
func (g *Grt) isValid() bool {
|
||||
// Validate the Type
|
||||
// Only these 2 types are supported in the gateway
|
||||
if g.Type != types.TypeCanonicalUser && g.Type != types.TypeGroup {
|
||||
return false
|
||||
}
|
||||
|
||||
// The ID prop shouldn't be empty
|
||||
if g.ID == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func ParseACL(data []byte) (ACL, error) {
|
||||
@@ -99,10 +205,9 @@ func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
|
||||
acs := elem.Access
|
||||
grants = append(grants, Grant{
|
||||
Grantee: &Grt{
|
||||
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
|
||||
XMLXSI: elem.Type,
|
||||
ID: acs,
|
||||
Type: elem.Type,
|
||||
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
|
||||
ID: acs,
|
||||
Type: elem.Type,
|
||||
},
|
||||
Permission: elem.Permission,
|
||||
})
|
||||
@@ -125,7 +230,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
|
||||
|
||||
defaultGrantees := []Grantee{
|
||||
{
|
||||
Permission: types.PermissionFullControl,
|
||||
Permission: PermissionFullControl,
|
||||
Access: acl.Owner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
@@ -136,19 +241,19 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
|
||||
switch input.ACL {
|
||||
case types.BucketCannedACLPublicRead:
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Permission: types.PermissionRead,
|
||||
Permission: PermissionRead,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
})
|
||||
case types.BucketCannedACLPublicReadWrite:
|
||||
defaultGrantees = append(defaultGrantees, []Grantee{
|
||||
{
|
||||
Permission: types.PermissionRead,
|
||||
Permission: PermissionRead,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
},
|
||||
{
|
||||
Permission: types.PermissionWrite,
|
||||
Permission: PermissionWrite,
|
||||
Access: "all-users",
|
||||
Type: types.TypeGroup,
|
||||
},
|
||||
@@ -165,7 +270,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
|
||||
for _, str := range fullControlList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionFullControl,
|
||||
Permission: PermissionFullControl,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
@@ -175,7 +280,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
|
||||
for _, str := range readList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionRead,
|
||||
Permission: PermissionRead,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
@@ -185,7 +290,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
|
||||
for _, str := range readACPList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionReadAcp,
|
||||
Permission: PermissionReadAcp,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
@@ -195,7 +300,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
|
||||
for _, str := range writeList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionWrite,
|
||||
Permission: PermissionWrite,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
@@ -205,7 +310,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
|
||||
for _, str := range writeACPList {
|
||||
defaultGrantees = append(defaultGrantees, Grantee{
|
||||
Access: str,
|
||||
Permission: types.PermissionWriteAcp,
|
||||
Permission: PermissionWriteAcp,
|
||||
Type: types.TypeCanonicalUser,
|
||||
})
|
||||
}
|
||||
@@ -262,8 +367,8 @@ func CheckIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
|
||||
result = append(result, acc)
|
||||
continue
|
||||
}
|
||||
if err == ErrNotSupported {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
if errors.Is(err, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("check user account: %w", err)
|
||||
}
|
||||
@@ -286,7 +391,7 @@ func splitUnique(s, divider string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func verifyACL(acl ACL, access string, permission types.Permission) error {
|
||||
func verifyACL(acl ACL, access string, permission Permission) error {
|
||||
grantee := Grantee{
|
||||
Access: access,
|
||||
Permission: permission,
|
||||
@@ -294,7 +399,7 @@ func verifyACL(acl ACL, access string, permission types.Permission) error {
|
||||
}
|
||||
granteeFullCtrl := Grantee{
|
||||
Access: access,
|
||||
Permission: types.PermissionFullControl,
|
||||
Permission: PermissionFullControl,
|
||||
Type: types.TypeCanonicalUser,
|
||||
}
|
||||
granteeAllUsers := Grantee{
|
||||
@@ -353,7 +458,7 @@ func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
|
||||
|
||||
type AccessOptions struct {
|
||||
Acl ACL
|
||||
AclPermission types.Permission
|
||||
AclPermission Permission
|
||||
IsRoot bool
|
||||
Acc Account
|
||||
Bucket string
|
||||
@@ -364,7 +469,7 @@ type AccessOptions struct {
|
||||
|
||||
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
|
||||
if opts.Readonly {
|
||||
if opts.AclPermission == types.PermissionWrite || opts.AclPermission == types.PermissionWriteAcp {
|
||||
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
}
|
||||
@@ -422,7 +527,7 @@ func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource
|
||||
|
||||
if err := VerifyAccess(ctx, be, AccessOptions{
|
||||
Acl: srcBucketAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: PermissionRead,
|
||||
IsRoot: opts.IsRoot,
|
||||
Acc: opts.Acc,
|
||||
Bucket: srcBucket,
|
||||
|
||||
@@ -124,6 +124,12 @@ type Opts struct {
|
||||
CacheDisable bool
|
||||
CacheTTL int
|
||||
CachePrune int
|
||||
IpaHost string
|
||||
IpaVaultName string
|
||||
IpaUser string
|
||||
IpaPassword string
|
||||
IpaInsecure bool
|
||||
IpaDebug bool
|
||||
}
|
||||
|
||||
func New(o *Opts) (IAMService, error) {
|
||||
@@ -149,6 +155,9 @@ func New(o *Opts) (IAMService, error) {
|
||||
o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
|
||||
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
|
||||
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
|
||||
case o.IpaHost != "":
|
||||
svc, err = NewIpaIAMService(o.RootAccount, o.IpaHost, o.IpaVaultName, o.IpaUser, o.IpaPassword, o.IpaInsecure, o.IpaDebug)
|
||||
fmt.Printf("initializing IPA IAM with %q\n", o.IpaHost)
|
||||
default:
|
||||
// if no iam options selected, default to the single user mode
|
||||
fmt.Println("No IAM service configured, enabling single account mode")
|
||||
|
||||
446
auth/iam_ipa.go
Normal file
446
auth/iam_ipa.go
Normal file
@@ -0,0 +1,446 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const IpaVersion = "2.254"
|
||||
|
||||
type IpaIAMService struct {
|
||||
client http.Client
|
||||
id int
|
||||
version string
|
||||
host string
|
||||
vaultName string
|
||||
username string
|
||||
password string
|
||||
kraTransportKey *rsa.PublicKey
|
||||
debug bool
|
||||
rootAcc Account
|
||||
}
|
||||
|
||||
var _ IAMService = &IpaIAMService{}
|
||||
|
||||
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure, debug bool) (*IpaIAMService, error) {
|
||||
|
||||
ipa := IpaIAMService{
|
||||
id: 0,
|
||||
version: IpaVersion,
|
||||
host: host,
|
||||
vaultName: vaultName,
|
||||
username: username,
|
||||
password: password,
|
||||
debug: debug,
|
||||
rootAcc: rootAcc,
|
||||
}
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
// this should never happen
|
||||
return nil, fmt.Errorf("cookie jar creation: %w", err)
|
||||
}
|
||||
|
||||
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: mTLSConfig,
|
||||
}
|
||||
ipa.client = http.Client{Jar: jar, Transport: tr}
|
||||
|
||||
err = ipa.login()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa login failed: %w", err)
|
||||
}
|
||||
|
||||
req, err := ipa.newRequest("vaultconfig_show/1", []string{}, map[string]any{"all": true})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa vaultconfig_show: %w", err)
|
||||
}
|
||||
vaultConfig := struct {
|
||||
Kra_Server_Server []string
|
||||
Transport_Cert Base64EncodedWrapped
|
||||
Wrapping_default_algorithm string
|
||||
Wrapping_supported_algorithms []string
|
||||
}{}
|
||||
err = ipa.rpc(req, &vaultConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa vault config: %w", err)
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(vaultConfig.Transport_Cert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ipa cannot parse vault certificate: %w", err)
|
||||
}
|
||||
|
||||
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
|
||||
|
||||
isSupported := false
|
||||
for _, algo := range vaultConfig.Wrapping_supported_algorithms {
|
||||
if algo == "aes-128-cbc" {
|
||||
isSupported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isSupported {
|
||||
return nil,
|
||||
fmt.Errorf("IPA vault does not support aes-128-cbc. Only %v supported",
|
||||
vaultConfig.Wrapping_supported_algorithms)
|
||||
}
|
||||
return &ipa, nil
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) CreateAccount(account Account) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
|
||||
if access == ipa.rootAcc.Access {
|
||||
return ipa.rootAcc, nil
|
||||
}
|
||||
|
||||
req, err := ipa.newRequest("user_show/1", []string{access}, map[string]any{})
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa user_show: %w", err)
|
||||
}
|
||||
|
||||
userResult := struct {
|
||||
Gidnumber []string
|
||||
Uidnumber []string
|
||||
}{}
|
||||
|
||||
err = ipa.rpc(req, &userResult)
|
||||
if err != nil {
|
||||
return Account{}, err
|
||||
}
|
||||
|
||||
uid, err := strconv.Atoi(userResult.Uidnumber[0])
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa uid invalid: %w", err)
|
||||
}
|
||||
gid, err := strconv.Atoi(userResult.Gidnumber[0])
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa gid invalid: %w", err)
|
||||
}
|
||||
|
||||
account := Account{
|
||||
Access: access,
|
||||
Role: RoleUser,
|
||||
UserID: uid,
|
||||
GroupID: gid,
|
||||
}
|
||||
|
||||
session_key := make([]byte, 16)
|
||||
|
||||
_, err = rand.Read(session_key)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa cannot generate session key: %w", err)
|
||||
}
|
||||
|
||||
encryptedKey, err := rsa.EncryptPKCS1v15(rand.Reader, ipa.kraTransportKey, session_key)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa vault secret retrieval: %w", err)
|
||||
}
|
||||
|
||||
req, err = ipa.newRequest("vault_retrieve_internal/1", []string{ipa.vaultName},
|
||||
map[string]any{"username": access,
|
||||
"session_key": Base64EncodedWrapped(encryptedKey),
|
||||
"wrapping_algo": "aes-128-cbc"})
|
||||
if err != nil {
|
||||
return Account{}, fmt.Errorf("ipa vault_retrieve_internal: %w", err)
|
||||
}
|
||||
|
||||
data := struct {
|
||||
Vault_data Base64EncodedWrapped
|
||||
Nonce Base64EncodedWrapped
|
||||
}{}
|
||||
|
||||
err = ipa.rpc(req, &data)
|
||||
if err != nil {
|
||||
return account, err
|
||||
}
|
||||
|
||||
aes, err := aes.NewCipher(session_key)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa cannot create AES cipher: %w", err)
|
||||
}
|
||||
cbc := cipher.NewCBCDecrypter(aes, data.Nonce)
|
||||
cbc.CryptBlocks(data.Vault_data, data.Vault_data)
|
||||
secretUnpaddedJson, err := pkcs7Unpad(data.Vault_data, 16)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("ipa cannot unpad decrypted result: %w", err)
|
||||
}
|
||||
|
||||
secret := struct {
|
||||
Data Base64Encoded
|
||||
}{}
|
||||
json.Unmarshal(secretUnpaddedJson, &secret)
|
||||
account.Secret = string(secret.Data)
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) UpdateUserAccount(access string, props MutableProps) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) DeleteUserAccount(access string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) ListUserAccounts() ([]Account, error) {
|
||||
return []Account{}, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implementation
|
||||
|
||||
func (ipa *IpaIAMService) login() error {
|
||||
form := url.Values{}
|
||||
form.Set("user", ipa.username)
|
||||
form.Set("password", ipa.password)
|
||||
|
||||
req, err := http.NewRequest(
|
||||
"POST",
|
||||
fmt.Sprintf("%s/ipa/session/login_password", ipa.host),
|
||||
strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := ipa.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode == 401 {
|
||||
return errors.New("cannot login to FreeIPA: invalid credentials")
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("cannot login to FreeIPA: status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type rpcRequest = string
|
||||
|
||||
type rpcResponse struct {
|
||||
Result json.RawMessage
|
||||
Principal string
|
||||
Id int
|
||||
Version string
|
||||
}
|
||||
|
||||
func (p rpcResponse) String() string {
|
||||
return string(p.Result)
|
||||
}
|
||||
|
||||
var errRpc = errors.New("IPA RPC error")
|
||||
|
||||
func (ipa *IpaIAMService) rpc(req rpcRequest, value any) error {
|
||||
err := ipa.login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := ipa.rpcInternal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return json.Unmarshal(res.Result, value)
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
|
||||
httpReq, err := http.NewRequest("POST",
|
||||
fmt.Sprintf("%s/ipa/session/json", ipa.host),
|
||||
strings.NewReader(req))
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
}
|
||||
|
||||
ipa.log(fmt.Sprintf("%v", req))
|
||||
httpReq.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
httpResp, err := ipa.client.Do(httpReq)
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
}
|
||||
|
||||
bytes, err := io.ReadAll(httpResp.Body)
|
||||
ipa.log(string(bytes))
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
}
|
||||
|
||||
result := struct {
|
||||
Result struct {
|
||||
Json json.RawMessage `json:"result"`
|
||||
Value string `json:"value"`
|
||||
Summary any `json:"summary"`
|
||||
} `json:"result"`
|
||||
Error json.RawMessage `json:"error"`
|
||||
Id int `json:"id"`
|
||||
Principal string `json:"principal"`
|
||||
Version string `json:"version"`
|
||||
}{}
|
||||
|
||||
err = json.Unmarshal(bytes, &result)
|
||||
if err != nil {
|
||||
return rpcResponse{}, err
|
||||
}
|
||||
if string(result.Error) != "null" {
|
||||
return rpcResponse{}, fmt.Errorf("%s: %w", string(result.Error), errRpc)
|
||||
}
|
||||
|
||||
return rpcResponse{
|
||||
Result: result.Result.Json,
|
||||
Principal: result.Principal,
|
||||
Id: result.Id,
|
||||
Version: result.Version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) newRequest(method string, args []string, dict map[string]any) (rpcRequest, error) {
|
||||
|
||||
id := ipa.id
|
||||
ipa.id++
|
||||
|
||||
dict["version"] = ipa.version
|
||||
|
||||
jmethod, errMethod := json.Marshal(method)
|
||||
jargs, errArgs := json.Marshal(args)
|
||||
jdict, errDict := json.Marshal(dict)
|
||||
|
||||
err := errors.Join(errMethod, errArgs, errDict)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ipa request invalid: %w", err)
|
||||
}
|
||||
|
||||
request := map[string]interface{}{
|
||||
"id": id,
|
||||
"method": json.RawMessage(jmethod),
|
||||
"params": []json.RawMessage{json.RawMessage(jargs), json.RawMessage(jdict)},
|
||||
}
|
||||
|
||||
requestJSON, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
return string(requestJSON), nil
|
||||
}
|
||||
|
||||
// pkcs7Unpad validates and unpads data from the given bytes slice.
|
||||
// The returned value will be 1 to n bytes smaller depending on the
|
||||
// amount of padding, where n is the block size.
|
||||
func pkcs7Unpad(b []byte, blocksize int) ([]byte, error) {
|
||||
if blocksize <= 0 {
|
||||
return nil, errors.New("invalid blocksize")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil, errors.New("invalid PKCS7 data (empty or not padded)")
|
||||
}
|
||||
if len(b)%blocksize != 0 {
|
||||
return nil, errors.New("invalid padding on input")
|
||||
}
|
||||
c := b[len(b)-1]
|
||||
n := int(c)
|
||||
if n == 0 || n > len(b) {
|
||||
return nil, errors.New("invalid padding on input")
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
if b[len(b)-n+i] != c {
|
||||
return nil, errors.New("invalid padding on input")
|
||||
}
|
||||
}
|
||||
return b[:len(b)-n], nil
|
||||
}
|
||||
|
||||
/*
|
||||
e.g.
|
||||
|
||||
"value" {
|
||||
"__base64__": "aGVsbG93b3JsZAo="
|
||||
}
|
||||
*/
|
||||
type Base64EncodedWrapped []byte
|
||||
|
||||
func (b *Base64EncodedWrapped) UnmarshalJSON(data []byte) error {
|
||||
intermediate := struct {
|
||||
Base64 string `json:"__base64__"`
|
||||
}{}
|
||||
err := json.Unmarshal(data, &intermediate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b, err = base64.StdEncoding.DecodeString(intermediate.Base64)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *Base64EncodedWrapped) MarshalJSON() ([]byte, error) {
|
||||
intermediate := struct {
|
||||
Base64 string `json:"__base64__"`
|
||||
}{Base64: base64.StdEncoding.EncodeToString(*b)}
|
||||
return json.Marshal(intermediate)
|
||||
}
|
||||
|
||||
/*
|
||||
e.g.
|
||||
|
||||
"value": "aGVsbG93b3JsZAo="
|
||||
*/
|
||||
type Base64Encoded []byte
|
||||
|
||||
func (b *Base64Encoded) UnmarshalJSON(data []byte) error {
|
||||
var intermediate string
|
||||
err := json.Unmarshal(data, &intermediate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b, err = base64.StdEncoding.DecodeString(intermediate)
|
||||
return err
|
||||
}
|
||||
|
||||
func (ipa *IpaIAMService) log(msg string) {
|
||||
if ipa.debug {
|
||||
log.Println(msg)
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// IAMServiceSingle manages the single tenant (root-only) IAM service
|
||||
@@ -23,31 +23,29 @@ type IAMServiceSingle struct{}
|
||||
|
||||
var _ IAMService = &IAMServiceSingle{}
|
||||
|
||||
var ErrNotSupported = errors.New("method is not supported")
|
||||
|
||||
// CreateAccount not valid in single tenant mode
|
||||
func (IAMServiceSingle) CreateAccount(account Account) error {
|
||||
return ErrNotSupported
|
||||
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
}
|
||||
|
||||
// GetUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
|
||||
return Account{}, ErrNoSuchUser
|
||||
return Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
}
|
||||
|
||||
// UpdateUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) UpdateUserAccount(access string, props MutableProps) error {
|
||||
return ErrNotSupported
|
||||
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
}
|
||||
|
||||
// DeleteUserAccount no accounts in single tenant mode
|
||||
func (IAMServiceSingle) DeleteUserAccount(access string) error {
|
||||
return ErrNotSupported
|
||||
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
}
|
||||
|
||||
// ListUserAccounts no accounts in single tenant mode
|
||||
func (IAMServiceSingle) ListUserAccounts() ([]Account, error) {
|
||||
return []Account{}, nil
|
||||
return []Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
|
||||
}
|
||||
|
||||
// Shutdown graceful termination of service
|
||||
|
||||
@@ -47,7 +47,7 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath,
|
||||
tls.ServerCertificate.FromBytes = []byte(serverCert)
|
||||
if clientCert != "" {
|
||||
if clientCertKey == "" {
|
||||
return nil, fmt.Errorf("client certificate and client certificate should both be specified")
|
||||
return nil, fmt.Errorf("client certificate and client certificate key should both be specified")
|
||||
}
|
||||
|
||||
tls.ClientCertificate.FromBytes = []byte(clientCert)
|
||||
|
||||
54
backend/meta/none.go
Normal file
54
backend/meta/none.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// NoMeta is a metadata storer that does not store metadata.
|
||||
// This can be useful for read only mounts where attempting to store metadata
|
||||
// would fail.
|
||||
type NoMeta struct{}
|
||||
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
|
||||
// always returns ErrNoSuchKey
|
||||
func (NoMeta) RetrieveAttribute(_ *os.File, _, _, _ string) ([]byte, error) {
|
||||
return nil, ErrNoSuchKey
|
||||
}
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
|
||||
// always returns nil without storing the attribute
|
||||
func (NoMeta) StoreAttribute(_ *os.File, _, _, _ string, _ []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
|
||||
// always returns nil without deleting the attribute
|
||||
func (NoMeta) DeleteAttribute(_, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAttributes lists all attributes for an object or a bucket.
|
||||
// always returns an empty list of attributes
|
||||
func (NoMeta) ListAttributes(_, _ string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// DeleteAttributes removes all attributes for an object or a bucket.
|
||||
// always returns nil without deleting any attributes
|
||||
func (NoMeta) DeleteAttributes(bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
139
backend/meta/sidecar.go
Normal file
139
backend/meta/sidecar.go
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright 2025 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// SideCar is a metadata storer that uses sidecar files to store metadata.
|
||||
type SideCar struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
const (
|
||||
sidecarmeta = "meta"
|
||||
)
|
||||
|
||||
// NewSideCar creates a new SideCar metadata storer.
|
||||
func NewSideCar(dir string) (SideCar, error) {
|
||||
fi, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return SideCar{}, fmt.Errorf("failed to stat directory: %v", err)
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return SideCar{}, fmt.Errorf("not a directory")
|
||||
}
|
||||
|
||||
return SideCar{dir: dir}, nil
|
||||
}
|
||||
|
||||
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
|
||||
func (s SideCar) RetrieveAttribute(_ *os.File, bucket, object, attribute string) ([]byte, error) {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
attr := filepath.Join(metadir, attribute)
|
||||
|
||||
value, err := os.ReadFile(attr)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, ErrNoSuchKey
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read attribute: %v", err)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
|
||||
func (s SideCar) StoreAttribute(_ *os.File, bucket, object, attribute string, value []byte) error {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
err := os.MkdirAll(metadir, 0777)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create metadata directory: %v", err)
|
||||
}
|
||||
|
||||
attr := filepath.Join(metadir, attribute)
|
||||
err = os.WriteFile(attr, value, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write attribute: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
|
||||
func (s SideCar) DeleteAttribute(bucket, object, attribute string) error {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
attr := filepath.Join(metadir, attribute)
|
||||
|
||||
err := os.Remove(attr)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return ErrNoSuchKey
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove attribute: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAttributes lists all attributes for an object or a bucket.
|
||||
func (s SideCar) ListAttributes(bucket, object string) ([]string, error) {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
|
||||
ents, err := os.ReadDir(metadir)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return []string{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list attributes: %v", err)
|
||||
}
|
||||
|
||||
var attrs []string
|
||||
for _, ent := range ents {
|
||||
attrs = append(attrs, ent.Name())
|
||||
}
|
||||
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
// DeleteAttributes removes all attributes for an object or a bucket.
|
||||
func (s SideCar) DeleteAttributes(bucket, object string) error {
|
||||
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
|
||||
if object == "" {
|
||||
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
|
||||
}
|
||||
|
||||
err := os.RemoveAll(metadir)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("failed to remove attributes: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -107,9 +107,14 @@ type PosixOpts struct {
|
||||
BucketLinks bool
|
||||
VersioningDir string
|
||||
NewDirPerm fs.FileMode
|
||||
SideCarDir string
|
||||
}
|
||||
|
||||
func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, error) {
|
||||
if opts.SideCarDir != "" && strings.HasPrefix(opts.SideCarDir, rootdir) {
|
||||
return nil, fmt.Errorf("sidecar directory cannot be inside the gateway root directory")
|
||||
}
|
||||
|
||||
err := os.Chdir(rootdir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("chdir %v: %w", rootdir, err)
|
||||
@@ -120,46 +125,36 @@ func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, erro
|
||||
return nil, fmt.Errorf("open %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
var verioningdirAbs string
|
||||
rootdirAbs, err := filepath.Abs(rootdir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get absolute path of %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
var verioningdirAbs string
|
||||
// Ensure the versioning directory isn't within the root directory
|
||||
if opts.VersioningDir != "" {
|
||||
rootdirAbs, err := filepath.Abs(rootdir)
|
||||
verioningdirAbs, err = validateSubDir(rootdirAbs, opts.VersioningDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get absolute path of %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
verioningdirAbs, err = filepath.Abs(opts.VersioningDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get absolute path of %v: %w", opts.VersioningDir, err)
|
||||
}
|
||||
|
||||
// Ensure the paths end with a separator
|
||||
if !strings.HasSuffix(rootdirAbs, string(filepath.Separator)) {
|
||||
rootdirAbs += string(filepath.Separator)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(verioningdirAbs, string(filepath.Separator)) {
|
||||
verioningdirAbs += string(filepath.Separator)
|
||||
}
|
||||
|
||||
// Ensure the posix root directory doesn't contain the versioning directory
|
||||
if strings.HasPrefix(verioningdirAbs, rootdirAbs) {
|
||||
return nil, fmt.Errorf("the root directory %v contains the versioning directory %v", rootdir, opts.VersioningDir)
|
||||
}
|
||||
|
||||
vDir, err := os.Stat(verioningdirAbs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat versioning dir: %w", err)
|
||||
}
|
||||
|
||||
// Check the versioning path to be a directory
|
||||
if !vDir.IsDir() {
|
||||
return nil, fmt.Errorf("versioning path should be a directory")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Bucket versioning enabled with directory: %v\n", verioningdirAbs)
|
||||
var sidecardirAbs string
|
||||
// Ensure the sidecar directory isn't within the root directory
|
||||
if opts.SideCarDir != "" {
|
||||
sidecardirAbs, err = validateSubDir(rootdirAbs, opts.SideCarDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if verioningdirAbs != "" {
|
||||
fmt.Println("Bucket versioning enabled with directory:", verioningdirAbs)
|
||||
}
|
||||
|
||||
if sidecardirAbs != "" {
|
||||
fmt.Println("Using sidecar directory for metadata:", sidecardirAbs)
|
||||
}
|
||||
|
||||
return &Posix{
|
||||
meta: meta,
|
||||
@@ -175,6 +170,48 @@ func New(rootdir string, meta meta.MetadataStorer, opts PosixOpts) (*Posix, erro
|
||||
}, nil
|
||||
}
|
||||
|
||||
func validateSubDir(root, dir string) (string, error) {
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get absolute path of %v: %w",
|
||||
dir, err)
|
||||
}
|
||||
|
||||
if isDirBelowRoot(root, absDir) {
|
||||
return "", fmt.Errorf("the root directory %v contains the directory %v",
|
||||
root, dir)
|
||||
}
|
||||
|
||||
vDir, err := os.Stat(absDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("stat %q: %w", absDir, err)
|
||||
}
|
||||
|
||||
if !vDir.IsDir() {
|
||||
return "", fmt.Errorf("path %q is not a directory", absDir)
|
||||
}
|
||||
|
||||
return absDir, nil
|
||||
}
|
||||
|
||||
func isDirBelowRoot(root, dir string) bool {
|
||||
// Ensure the paths ends with a separator
|
||||
if !strings.HasSuffix(root, string(filepath.Separator)) {
|
||||
root += string(filepath.Separator)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(dir, string(filepath.Separator)) {
|
||||
dir += string(filepath.Separator)
|
||||
}
|
||||
|
||||
// Ensure the root directory doesn't contain the directory
|
||||
if strings.HasPrefix(dir, root) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Posix) Shutdown() {
|
||||
p.rootfd.Close()
|
||||
}
|
||||
|
||||
@@ -74,6 +74,9 @@ var (
|
||||
metricsService string
|
||||
statsdServers string
|
||||
dogstatsServers string
|
||||
ipaHost, ipaVaultName string
|
||||
ipaUser, ipaPassword string
|
||||
ipaInsecure, ipaDebug bool
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -206,6 +209,7 @@ func initFlags() []cli.Flag {
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Value: false,
|
||||
EnvVars: []string{"VGW_DEBUG"},
|
||||
Destination: &debug,
|
||||
},
|
||||
@@ -506,6 +510,42 @@ func initFlags() []cli.Flag {
|
||||
Aliases: []string{"mds"},
|
||||
Destination: &dogstatsServers,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "ipa-host",
|
||||
Usage: "FreeIPA server url e.g. https://ipa.example.test",
|
||||
EnvVars: []string{"VGW_IPA_HOST"},
|
||||
Destination: &ipaHost,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "ipa-vault-name",
|
||||
Usage: "A name of the user vault containing their secret",
|
||||
EnvVars: []string{"VGW_IPA_VAULT_NAME"},
|
||||
Destination: &ipaVaultName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "ipa-user",
|
||||
Usage: "Username used to connect to FreeIPA. Needs permissions to read user vault contents",
|
||||
EnvVars: []string{"VGW_IPA_USER"},
|
||||
Destination: &ipaUser,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "ipa-password",
|
||||
Usage: "Password of the user used to connect to FreeIPA.",
|
||||
EnvVars: []string{"VGW_IPA_PASSWORD"},
|
||||
Destination: &ipaPassword,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "ipa-insecure",
|
||||
Usage: "Verify TLS certificate of FreeIPA server. Default is 'true'.",
|
||||
EnvVars: []string{"VGW_IPA_INSECURE"},
|
||||
Destination: &ipaInsecure,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "ipa-debug",
|
||||
Usage: "FreeIPA IAM debug output",
|
||||
EnvVars: []string{"VGW_IPA_DEBUG"},
|
||||
Destination: &ipaDebug,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -623,6 +663,12 @@ func runGateway(ctx context.Context, be backend.Backend) error {
|
||||
CacheDisable: iamCacheDisable,
|
||||
CacheTTL: iamCacheTTL,
|
||||
CachePrune: iamCachePrune,
|
||||
IpaHost: ipaHost,
|
||||
IpaVaultName: ipaVaultName,
|
||||
IpaUser: ipaUser,
|
||||
IpaPassword: ipaPassword,
|
||||
IpaInsecure: ipaInsecure,
|
||||
IpaDebug: ipaDebug,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup iam: %w", err)
|
||||
|
||||
@@ -29,6 +29,8 @@ var (
|
||||
bucketlinks bool
|
||||
versioningDir string
|
||||
dirPerms uint
|
||||
sidecar string
|
||||
nometa bool
|
||||
)
|
||||
|
||||
func posixCommand() *cli.Command {
|
||||
@@ -79,6 +81,18 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
|
||||
DefaultText: "0755",
|
||||
Value: 0755,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "sidecar",
|
||||
Usage: "use provided sidecar directory to store metadata",
|
||||
EnvVars: []string{"VGW_META_SIDECAR"},
|
||||
Destination: &sidecar,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nometa",
|
||||
Usage: "disable metadata storage",
|
||||
EnvVars: []string{"VGW_META_NONE"},
|
||||
Destination: &nometa,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -89,24 +103,45 @@ func runPosix(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
gwroot := (ctx.Args().Get(0))
|
||||
err := meta.XattrMeta{}.Test(gwroot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("posix xattr check: %v", err)
|
||||
}
|
||||
|
||||
if dirPerms > math.MaxUint32 {
|
||||
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
|
||||
}
|
||||
|
||||
be, err := posix.New(gwroot, meta.XattrMeta{}, posix.PosixOpts{
|
||||
if nometa && sidecar != "" {
|
||||
return fmt.Errorf("cannot use both nometa and sidecar metadata")
|
||||
}
|
||||
|
||||
opts := posix.PosixOpts{
|
||||
ChownUID: chownuid,
|
||||
ChownGID: chowngid,
|
||||
BucketLinks: bucketlinks,
|
||||
VersioningDir: versioningDir,
|
||||
NewDirPerm: fs.FileMode(dirPerms),
|
||||
})
|
||||
}
|
||||
|
||||
var ms meta.MetadataStorer
|
||||
switch {
|
||||
case sidecar != "":
|
||||
sc, err := meta.NewSideCar(sidecar)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init sidecar metadata: %w", err)
|
||||
}
|
||||
ms = sc
|
||||
opts.SideCarDir = sidecar
|
||||
case nometa:
|
||||
ms = meta.NoMeta{}
|
||||
default:
|
||||
ms = meta.XattrMeta{}
|
||||
err := meta.XattrMeta{}.Test(gwroot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("xattr check failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
be, err := posix.New(gwroot, ms, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
return fmt.Errorf("failed to init posix backend: %w", err)
|
||||
}
|
||||
|
||||
return runGateway(ctx.Context, be)
|
||||
|
||||
@@ -38,6 +38,7 @@ var (
|
||||
checksumDisable bool
|
||||
versioningEnabled bool
|
||||
azureTests bool
|
||||
tlsStatus bool
|
||||
)
|
||||
|
||||
func testCommand() *cli.Command {
|
||||
@@ -79,6 +80,12 @@ func initTestFlags() []cli.Flag {
|
||||
Aliases: []string{"d"},
|
||||
Destination: &debug,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "allow-insecure",
|
||||
Usage: "skip tls verification",
|
||||
Aliases: []string{"ai"},
|
||||
Destination: &tlsStatus,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,6 +218,7 @@ func initTestCommands() []*cli.Command {
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithConcurrency(concurrency),
|
||||
integration.WithPartSize(partSize),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
@@ -271,6 +279,7 @@ func initTestCommands() []*cli.Command {
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithConcurrency(concurrency),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
@@ -296,6 +305,7 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
@@ -333,6 +343,7 @@ func extractIntTests() (commands []*cli.Command) {
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithTLSStatus(tlsStatus),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
|
||||
74
go.mod
74
go.mod
@@ -3,28 +3,28 @@ module github.com/versity/versitygw
|
||||
go 1.21.0
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0
|
||||
github.com/aws/smithy-go v1.22.1
|
||||
github.com/go-ldap/ldap/v3 v3.4.8
|
||||
github.com/gofiber/fiber/v2 v2.52.5
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/DataDog/datadog-go/v5 v5.6.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.34.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1
|
||||
github.com/aws/smithy-go v1.22.2
|
||||
github.com/go-ldap/ldap/v3 v3.4.10
|
||||
github.com/gofiber/fiber/v2 v2.52.6
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/vault-client-go v0.4.3
|
||||
github.com/nats-io/nats.go v1.37.0
|
||||
github.com/nats-io/nats.go v1.38.0
|
||||
github.com/oklog/ulid/v2 v2.1.0
|
||||
github.com/pkg/xattr v0.4.10
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
github.com/smira/go-statsd v1.3.4
|
||||
github.com/urfave/cli/v2 v2.27.5
|
||||
github.com/valyala/fasthttp v1.57.0
|
||||
github.com/valyala/fasthttp v1.58.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
|
||||
golang.org/x/sync v0.9.0
|
||||
golang.org/x/sys v0.27.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.29.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -32,11 +32,11 @@ require (
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
@@ -45,33 +45,33 @@ require (
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.8 // indirect
|
||||
github.com/nats-io/nkeys v0.4.9 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.29.0 // indirect
|
||||
golang.org/x/net v0.31.0 // indirect
|
||||
golang.org/x/text v0.20.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.42
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.2
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.55
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.54
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
|
||||
189
go.sum
189
go.sum
@@ -1,23 +1,23 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
|
||||
github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw=
|
||||
github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
@@ -25,48 +25,48 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.42 h1:vEnk9vtjJ62OO2wOhEmgKMZgNcn1w0aF7XCiNXO5rK0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.42/go.mod h1:GUOPbPJWRZsdt1OJ355upCrry4d3ZFgdX6rhT7gtkto=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 h1:r67ps7oHCYnflpgDy2LZU0MAQtQbYIOqNNnqGO6xQkE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25/go.mod h1:GrGY+Q4fIokYLtjCVB/aFfCVL6hhGUFl8inD18fDalE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 h1:HCpPsWqmYQieU7SS6E9HXfdAMSud0pteVXieJmcpIRI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6/go.mod h1:ngUiVRCco++u+soRRVBIvBZxSMMvOVMXA4PJ36JLfSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 h1:BbGDtTi0T1DYlmjBiCr/le3wzhA37O8QTC5/Ab8+EXk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6/go.mod h1:hLMJt7Q8ePgViKupeymbqI0la+t9/iYFBjxQCFwuAwI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0 h1:HrHFR8RoS4l4EvodRMFcJMYQ8o3UhmALn2nbInXaxZA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0/go.mod h1:sT/iQz8JK3u/5gZkT+Hmr7GzVZehUMkRZpOaAwYXeGY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8=
|
||||
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
|
||||
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go-v2 v1.34.0 h1:9iyL+cjifckRGEVpRKZP3eIxVlL06Qk1Tk13vreaVQU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.34.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.2 h1:JuIxOEPcSKpMB0J+khMjznG9LIhIBdmqNiEcPclnwqc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.2/go.mod h1:HktTHregOZwNSM/e7WTfVSu9RCX+3eOv+6ij27PtaYs=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.55 h1:CDhKnDEaGkLA5ZszV/qw5uwN5M8rbv9Cl0JRN+PRsaM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.55/go.mod h1:kPD/vj+RB5MREDUky376+zdnjZpR+WgdBBvwrmnlmKE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 h1:kU7tmXNaJ07LsyN3BUgGqAmVmQtq0w6duVIHAKfp0/w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25/go.mod h1:OiC8+OiqrURb1wrwmr/UbOVLFSWEGxjinj5C299VQdo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.54 h1:6BWOAho3Cgdy4cmNJ4HWY8VZgqODEU7Gw78XXireNZI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.54/go.mod h1:n+t/oyYErOV3jf/GxNTVlizSM9RMV1yH7jvcIvld3Do=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 h1:Ej0Rf3GMv50Qh4G4852j2djtoDb7AzQ7MuQeFHa3D70=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29/go.mod h1:oeNTC7PwJNoM5AznVr23wxhLnuJv0ZDe5v7w0wqIs9M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 h1:6e8a71X+9GfghragVevC5bZqvATtc3mAMgxpSNbgzF0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29/go.mod h1:c4jkZiQ+BWpNqq7VtrxjwISrLrt/VvPq3XiopkUIolI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 h1:g9OUETuxA8i/Www5Cby0R3WSTe7ppFTZXHVLNskNS4w=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29/go.mod h1:CQk+koLR1QeY1+vm7lqNfFii07DEderKq6T3F1L2pyc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 h1:EP1ITDgYVPM2dL1bBBntJ7AW5yTjuWGz9XO+CZwpALU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3/go.mod h1:5lWNWeAgWenJ/BZ/CP9k9DjLbC0pjnM045WjXRPPi14=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 h1:hN4yJBGswmFTOVYqmbz1GBs9ZMtQe8SrYxPwrkrlRv8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10/go.mod h1:TsxON4fEZXyrKY+D+3d2gSTyJkGORexIYab9PTf56DA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 h1:fXoWC2gi7tdJYNTPnnlSGzEVwewUchOi8xVq/dkg8Qs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10/go.mod h1:cvzBApD5dVazHU8C2rbBQzzzsKc8m5+wNJ9mCRZLKPc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1 h1:9LawY3cDJ3HE+v2GMd5SOkNLDwgN4K7TsCjyVBYu/L4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1/go.mod h1:hHnELVnIHltd8EOF3YzahVX6F6y2C6dNqpRj1IMkS5I=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 h1:kznaW4f81mNMlREkU9w3jUuJvU5g/KsqDV43ab7Rp6s=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.12/go.mod h1:bZy9r8e0/s0P7BSDHgMLXK2KvdyRRBIQ2blKlvLt0IU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 h1:mUwIpAvILeKFnRx4h1dEgGEFGuV8KJ3pEScZWVFYuZA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11/go.mod h1:JDJtD+b8HNVv71axz8+S5492KM8wTzHRFpMKQbPlYxw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 h1:g9d+TOsu3ac7SgmY2dUf1qMgu/uJVTlQ4VCbH6hRxSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.10/go.mod h1:WZfNmntu92HO44MVZAubQaz3qCuIdeOdog2sADfU6hU=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -74,13 +74,12 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ=
|
||||
github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
|
||||
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
|
||||
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
|
||||
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
|
||||
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
@@ -124,27 +123,26 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
|
||||
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.4.8 h1:+wee30071y3vCZAYRsnrmIPaOe47A/SkK/UBDPdIV70=
|
||||
github.com/nats-io/nkeys v0.4.8/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nats.go v1.38.0 h1:A7P+g7Wjp4/NWqDOOP/K6hfhr54DvdDQUznt5JFg9XA=
|
||||
github.com/nats-io/nats.go v1.38.0/go.mod h1:IGUM++TwokGnXPs82/wCuiHS02/aKrdYUQkU8If6yjw=
|
||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
|
||||
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -152,8 +150,8 @@ github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
|
||||
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -175,14 +173,14 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
|
||||
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.57.0 h1:Xw8SjWGEP/+wAAgyy5XTvgrWlOD1+TxbbvNADYCm1Tg=
|
||||
github.com/valyala/fasthttp v1.57.0/go.mod h1:h6ZBaPRlzpZ6O3H5t2gEk1Qi33+TmLvfwgLLp0t9CpE=
|
||||
github.com/valyala/fasthttp v1.58.0 h1:GGB2dWxSbEprU9j0iMJHgdKYJVDyjrOwF9RE59PbRuE=
|
||||
github.com/valyala/fasthttp v1.58.0/go.mod h1:SYXvHHaFp7QZHGKSHmoMipInhrI5StHrhDTYVEjK/Kw=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
|
||||
@@ -203,14 +201,19 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
|
||||
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -220,17 +223,22 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
|
||||
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -242,23 +250,27 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
@@ -267,15 +279,18 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
89
runtests.sh
89
runtests.sh
@@ -10,6 +10,14 @@ mkdir /tmp/versioning.covdata
|
||||
rm -rf /tmp/versioningdir
|
||||
mkdir /tmp/versioningdir
|
||||
|
||||
# setup tls certificate and key
|
||||
ECHO "Generating TLS certificate and key in the cert.pem and key.pem files"
|
||||
|
||||
openssl genpkey -algorithm RSA -out key.pem -pkeyopt rsa_keygen_bits:2048
|
||||
openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http"
|
||||
# run server in background not versioning-enabled
|
||||
# port: 7070(default)
|
||||
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
|
||||
@@ -17,7 +25,7 @@ GW_PID=$!
|
||||
|
||||
sleep 1
|
||||
|
||||
# check if versioning-enabled gateway process is still running
|
||||
# check if gateway process is still running
|
||||
if ! kill -0 $GW_PID; then
|
||||
echo "server no longer running"
|
||||
exit 1
|
||||
@@ -45,9 +53,48 @@ fi
|
||||
|
||||
kill $GW_PID
|
||||
|
||||
ECHO "Running the sdk test over https"
|
||||
|
||||
# run server in background with TLS certificate
|
||||
# port: 7071(default)
|
||||
GOCOVERDIR=/tmp/https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7071 -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
|
||||
GW_HTTPS_PID=$!
|
||||
|
||||
sleep 1
|
||||
|
||||
# check if https gateway process is still running
|
||||
if ! kill -0 $GW_HTTPS_PID; then
|
||||
echo "server no longer running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow; then
|
||||
echo "full flow tests failed"
|
||||
kill $GW_HTTPS_PID
|
||||
exit 1
|
||||
fi
|
||||
# posix tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 posix; then
|
||||
echo "posix tests failed"
|
||||
kill $GW_HTTPS_PID
|
||||
exit 1
|
||||
fi
|
||||
# iam tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 iam; then
|
||||
echo "iam tests failed"
|
||||
kill $GW_HTTPS_PID
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kill $GW_HTTPS_PID
|
||||
|
||||
|
||||
ECHO "Running the sdk test over http against the versioning-enabled gateway"
|
||||
# run server in background versioning-enabled
|
||||
# port: 7071
|
||||
GOCOVERDIR=/tmp/versioning.covdata ./versitygw -p :7071 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
|
||||
# port: 7072
|
||||
GOCOVERDIR=/tmp/versioning.covdata ./versitygw -p :7072 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
|
||||
GW_VS_PID=$!
|
||||
|
||||
# wait a second for server to start up
|
||||
@@ -61,13 +108,13 @@ fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7071 full-flow -vs; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_PID
|
||||
exit 1
|
||||
fi
|
||||
# posix tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7071 posix -vs; then
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 posix -vs; then
|
||||
echo "versiongin-enabled posix tests failed"
|
||||
kill $GW_VS_PID
|
||||
exit 1
|
||||
@@ -76,6 +123,38 @@ fi
|
||||
# kill off server
|
||||
kill $GW_VS_PID
|
||||
|
||||
ECHO "Running the sdk test over https against the versioning-enabled gateway"
|
||||
# run server in background versioning-enabled
|
||||
# port: 7073
|
||||
GOCOVERDIR=/tmp/versioning.https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7073 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
|
||||
GW_VS_HTTPS_PID=$!
|
||||
|
||||
# wait a second for server to start up
|
||||
sleep 1
|
||||
|
||||
# check if versioning-enabled gateway process is still running
|
||||
if ! kill -0 $GW_VS_HTTPS_PID; then
|
||||
echo "versioning-enabled server no longer running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# run tests
|
||||
# full flow tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs; then
|
||||
echo "versioning-enabled full-flow tests failed"
|
||||
kill $GW_VS_HTTPS_PID
|
||||
exit 1
|
||||
fi
|
||||
# posix tests
|
||||
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 posix -vs; then
|
||||
echo "versiongin-enabled posix tests failed"
|
||||
kill $GW_VS_HTTPS_PID
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# kill off server
|
||||
kill $GW_VS_HTTPS_PID
|
||||
|
||||
exit 0
|
||||
|
||||
# if the above binary was built with -cover enabled (make testbin),
|
||||
|
||||
@@ -167,7 +167,7 @@ func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
|
||||
Owner: owner,
|
||||
Grantees: []auth.Grantee{
|
||||
{
|
||||
Permission: types.PermissionFullControl,
|
||||
Permission: auth.PermissionFullControl,
|
||||
Access: owner,
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
|
||||
@@ -126,7 +126,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -175,7 +175,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -217,7 +217,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -282,7 +282,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -319,7 +319,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionReadAcp,
|
||||
AclPermission: auth.PermissionReadAcp,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -352,7 +352,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -468,7 +468,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -589,7 +589,16 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if res.Body != nil {
|
||||
ctx.Response().SetBodyStream(res.Body, int(getint64(res.ContentLength)))
|
||||
err := utils.StreamResponseBody(ctx, res.Body)
|
||||
if err != nil {
|
||||
SendResponse(ctx, nil,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionGetObject,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return SendResponse(ctx, nil,
|
||||
@@ -637,7 +646,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -685,7 +694,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -722,7 +731,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -762,7 +771,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -792,7 +801,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -845,7 +854,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -886,7 +895,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionReadAcp,
|
||||
AclPermission: auth.PermissionReadAcp,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -926,7 +935,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -977,7 +986,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1027,7 +1036,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1127,7 +1136,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1180,7 +1189,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1210,7 +1219,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1272,7 +1281,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1313,7 +1322,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1384,7 +1393,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWriteAcp,
|
||||
AclPermission: auth.PermissionWriteAcp,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1415,13 +1424,12 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if accessControlPolicy.Owner == nil ||
|
||||
accessControlPolicy.Owner.ID == nil ||
|
||||
*accessControlPolicy.Owner.ID == "" {
|
||||
err = accessControlPolicy.Validate()
|
||||
if err != nil {
|
||||
if c.debug {
|
||||
log.Println("empty access control policy owner")
|
||||
log.Printf("invalid access control policy: %v\n", err)
|
||||
}
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedACL),
|
||||
return SendResponse(ctx, err,
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
Action: metrics.ActionPutBucketAcl,
|
||||
@@ -1733,7 +1741,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1766,7 +1774,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1839,7 +1847,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
if err := auth.VerifyAccess(ctx.Context(), c.be, auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1903,7 +1911,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
err = auth.VerifyObjectCopyAccess(ctx.Context(), c.be, copySource,
|
||||
auth.AccessOptions{
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -1967,7 +1975,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2073,7 +2081,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
ID: &grt.Grantee.ID,
|
||||
Type: grt.Grantee.Type,
|
||||
},
|
||||
Permission: grt.Permission,
|
||||
Permission: types.Permission(grt.Permission),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2174,7 +2182,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
err = auth.VerifyObjectCopyAccess(ctx.Context(), c.be, copySource,
|
||||
auth.AccessOptions{
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2306,7 +2314,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2437,7 +2445,7 @@ func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2469,7 +2477,7 @@ func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2501,7 +2509,7 @@ func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2532,7 +2540,7 @@ func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2585,7 +2593,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2658,7 +2666,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2696,7 +2704,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2737,7 +2745,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2826,7 +2834,7 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -2909,7 +2917,7 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -3080,7 +3088,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -3135,7 +3143,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionRead,
|
||||
AclPermission: auth.PermissionRead,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -3189,11 +3197,25 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
})
|
||||
}
|
||||
|
||||
if len(data.Parts) == 0 {
|
||||
if c.debug {
|
||||
log.Println("empty parts provided for complete multipart upload")
|
||||
}
|
||||
return SendXMLResponse(ctx, nil,
|
||||
s3err.GetAPIError(s3err.ErrEmptyParts),
|
||||
&MetaOpts{
|
||||
Logger: c.logger,
|
||||
MetricsMng: c.mm,
|
||||
Action: metrics.ActionCompleteMultipartUpload,
|
||||
BucketOwner: parsedAcl.Owner,
|
||||
})
|
||||
}
|
||||
|
||||
err = auth.VerifyAccess(ctx.Context(), c.be,
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
@@ -3253,7 +3275,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
auth.AccessOptions{
|
||||
Readonly: c.readonly,
|
||||
Acl: parsedAcl,
|
||||
AclPermission: types.PermissionWrite,
|
||||
AclPermission: auth.PermissionWrite,
|
||||
IsRoot: isRoot,
|
||||
Acc: acct,
|
||||
Bucket: bucket,
|
||||
|
||||
@@ -941,12 +941,12 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
</Tagging>
|
||||
`
|
||||
|
||||
retentionBody := `
|
||||
<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Mode>GOVERNANCE</Mode>
|
||||
<RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
|
||||
</Retention>
|
||||
`
|
||||
//retentionBody := `
|
||||
//<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
// <Mode>GOVERNANCE</Mode>
|
||||
// <RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
|
||||
//</Retention>
|
||||
//`
|
||||
|
||||
legalHoldBody := `
|
||||
<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
@@ -1076,15 +1076,15 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "put-object-retention-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
//{
|
||||
// name: "put-object-retention-success",
|
||||
// app: app,
|
||||
// args: args{
|
||||
// req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
|
||||
// },
|
||||
// wantErr: false,
|
||||
// statusCode: 200,
|
||||
//},
|
||||
{
|
||||
name: "put-legal-hold-invalid-request",
|
||||
app: app,
|
||||
@@ -1713,6 +1713,19 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
</SelectObjectContentRequest>
|
||||
`
|
||||
|
||||
completMpBody := `
|
||||
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Part>
|
||||
<ETag>etag</ETag>
|
||||
<PartNumber>1</PartNumber>
|
||||
</Part>
|
||||
</CompleteMultipartUpload>
|
||||
`
|
||||
|
||||
completMpEmptyBody := `
|
||||
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></CompleteMultipartUpload>
|
||||
`
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("account", auth.Account{Access: "valid access"})
|
||||
ctx.Locals("isRoot", true)
|
||||
@@ -1765,11 +1778,20 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Complete-multipart-upload-empty-parts",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpEmptyBody)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Complete-multipart-upload-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(`<root><key>body</key></root>`)),
|
||||
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpBody)),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
|
||||
@@ -47,7 +47,7 @@ func ProcessChunkedBody(root RootUserConfig, iam auth.IAMService, logger s3log.A
|
||||
if utils.IsBigDataAction(ctx) {
|
||||
var err error
|
||||
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
|
||||
var cr *utils.ChunkReader
|
||||
var cr io.Reader
|
||||
cr, err = utils.NewChunkReader(ctx, r, authData, region, acct.Secret, date)
|
||||
return cr
|
||||
})
|
||||
|
||||
@@ -260,19 +260,3 @@ func removeSpace(str string) string {
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
var (
|
||||
specialValues = map[string]bool{
|
||||
"UNSIGNED-PAYLOAD": true,
|
||||
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
|
||||
}
|
||||
)
|
||||
|
||||
// IsSpecialPayload checks for streaming/unsigned authorization types
|
||||
func IsSpecialPayload(str string) bool {
|
||||
return specialValues[str]
|
||||
}
|
||||
|
||||
@@ -15,260 +15,85 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// chunked uploads described in:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
type payloadType string
|
||||
|
||||
const (
|
||||
chunkHdrStr = ";chunk-signature="
|
||||
chunkHdrDelim = "\r\n"
|
||||
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
awsV4Request = "aws4_request"
|
||||
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||
payloadTypeUnsigned payloadType = "UNSIGNED-PAYLOAD"
|
||||
payloadTypeStreamingUnsignedTrailer payloadType = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
payloadTypeStreamingSigned payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
payloadTypeStreamingSignedTrailer payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
||||
payloadTypeStreamingEcdsa payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
payloadTypeStreamingEcdsaTrailer payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
||||
)
|
||||
|
||||
// ChunkReader reads from chunked upload request body, and returns
|
||||
// object data stream
|
||||
type ChunkReader struct {
|
||||
r io.Reader
|
||||
signingKey []byte
|
||||
prevSig string
|
||||
parsedSig string
|
||||
currentChunkSize int64
|
||||
chunkDataLeft int64
|
||||
trailerExpected int
|
||||
stash []byte
|
||||
chunkHash hash.Hash
|
||||
strToSignPrefix string
|
||||
skipcheck bool
|
||||
}
|
||||
|
||||
// NewChunkReader reads from request body io.Reader and parses out the
|
||||
// chunk metadata in stream. The headers are validated for proper signatures.
|
||||
// Reading from the chunk reader will read only the object data stream
|
||||
// without the chunk headers/trailers.
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (*ChunkReader, error) {
|
||||
return &ChunkReader{
|
||||
r: r,
|
||||
signingKey: getSigningKey(secret, region, date),
|
||||
// the authdata.Signature is validated in the auth-reader,
|
||||
// so we can use that here without any other checks
|
||||
prevSig: authdata.Signature,
|
||||
chunkHash: sha256.New(),
|
||||
strToSignPrefix: getStringToSignPrefix(date, region),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader for this type
|
||||
func (cr *ChunkReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.r.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if cr.chunkDataLeft < int64(n) {
|
||||
chunkSize := cr.chunkDataLeft
|
||||
if chunkSize > 0 {
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
}
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
n += int(chunkSize)
|
||||
return n, err
|
||||
}
|
||||
|
||||
cr.chunkDataLeft -= int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
|
||||
// This part is the same for all chunks,
|
||||
// only the previous signature and hash of current chunk changes
|
||||
func getStringToSignPrefix(date time.Time, region string) string {
|
||||
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
|
||||
date.Format("20060102"),
|
||||
region,
|
||||
awsS3Service,
|
||||
awsV4Request)
|
||||
|
||||
return fmt.Sprintf("%s\n%s\n%s",
|
||||
streamPayloadAlgo,
|
||||
date.Format("20060102T150405Z"),
|
||||
credentialScope)
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
|
||||
// signature For each chunk, you calculate the signature using the following
|
||||
// string to sign. For the first chunk, you use the seed-signature as the
|
||||
// previous signature.
|
||||
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s",
|
||||
prefix,
|
||||
prevSig,
|
||||
zeroLenSig,
|
||||
hex.EncodeToString(chunkHash))
|
||||
}
|
||||
|
||||
// The provided p should have all of the previous chunk data and trailer
|
||||
// consumed already. The positioning here is expected that p[0] starts the
|
||||
// new chunk size with the ";chunk-signature=" following. The only exception
|
||||
// is if we started consuming the trailer, but hit the end of the read buffer.
|
||||
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
|
||||
// finish consuming the final trailer bytes.
|
||||
// This parses the chunk metadata in situ without allocating an extra buffer.
|
||||
// It will just read and validate the chunk metadata and then move the
|
||||
// following chunk data to overwrite the metadata in the provided buffer.
|
||||
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
n := len(p)
|
||||
|
||||
if !cr.skipcheck && cr.parsedSig != "" {
|
||||
chunkhash := cr.chunkHash.Sum(nil)
|
||||
cr.chunkHash.Reset()
|
||||
|
||||
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
|
||||
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
|
||||
|
||||
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
}
|
||||
|
||||
if cr.trailerExpected != 0 {
|
||||
if len(p) < len(chunkHdrDelim) {
|
||||
// This is the special case where we need to consume the
|
||||
// trailer, but instead hit the end of the buffer. The
|
||||
// subsequent call will finish consuming the trailer.
|
||||
cr.chunkDataLeft = 0
|
||||
cr.trailerExpected -= len(p)
|
||||
cr.skipcheck = true
|
||||
return 0, nil
|
||||
}
|
||||
// move data up to remove trailer
|
||||
copy(p, p[cr.trailerExpected:])
|
||||
n -= cr.trailerExpected
|
||||
}
|
||||
|
||||
cr.skipcheck = false
|
||||
|
||||
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
|
||||
cr.currentChunkSize = chunkSize
|
||||
cr.parsedSig = sig
|
||||
if err == errskipHeader {
|
||||
cr.chunkDataLeft = 0
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if chunkSize == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
cr.trailerExpected = len(chunkHdrDelim)
|
||||
|
||||
// move data up to remove chunk header
|
||||
copy(p, p[bufOffset:n])
|
||||
n -= bufOffset
|
||||
|
||||
// if remaining buffer larger than chunk data,
|
||||
// parse next header in buffer
|
||||
if int64(n) > chunkSize {
|
||||
cr.chunkDataLeft = 0
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
if (chunkSize + int64(n)) > math.MaxInt {
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
return n + int(chunkSize), err
|
||||
}
|
||||
|
||||
cr.chunkDataLeft = chunkSize - int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
||||
// Task 3: Calculate Signature
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
|
||||
func getSigningKey(secret, region string, date time.Time) []byte {
|
||||
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
|
||||
dateRegionKey := hmac256(dateKey, []byte(region))
|
||||
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
|
||||
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
|
||||
return signingKey
|
||||
}
|
||||
|
||||
func hmac256(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidChunkFormat = errors.New("invalid chunk header format")
|
||||
errskipHeader = errors.New("skip to next header")
|
||||
specialValues = map[payloadType]bool{
|
||||
payloadTypeUnsigned: true,
|
||||
payloadTypeStreamingUnsignedTrailer: true,
|
||||
payloadTypeStreamingSigned: true,
|
||||
payloadTypeStreamingSignedTrailer: true,
|
||||
payloadTypeStreamingEcdsa: true,
|
||||
payloadTypeStreamingEcdsaTrailer: true,
|
||||
}
|
||||
)
|
||||
|
||||
func (pt payloadType) isValid() bool {
|
||||
return pt == payloadTypeUnsigned ||
|
||||
pt == payloadTypeStreamingUnsignedTrailer ||
|
||||
pt == payloadTypeStreamingSigned ||
|
||||
pt == payloadTypeStreamingSignedTrailer ||
|
||||
pt == payloadTypeStreamingEcdsa ||
|
||||
pt == payloadTypeStreamingEcdsaTrailer
|
||||
}
|
||||
|
||||
type checksumType string
|
||||
|
||||
const (
|
||||
maxHeaderSize = 1024
|
||||
checksumTypeCrc32 checksumType = "x-amz-checksum-crc32"
|
||||
checksumTypeCrc32c checksumType = "x-amz-checksum-crc32c"
|
||||
checksumTypeSha1 checksumType = "x-amz-checksum-sha1"
|
||||
checksumTypeSha256 checksumType = "x-amz-checksum-sha256"
|
||||
checksumTypeCrc64nvme checksumType = "x-amz-checksum-crc64nvme"
|
||||
)
|
||||
|
||||
// Theis returns the chunk payload size, signature, data start offset, and
|
||||
// error if any. See the AWS documentation for the chunk header format. The
|
||||
// header[0] byte is expected to be the first byte of the chunk size here.
|
||||
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
|
||||
stashLen := len(cr.stash)
|
||||
if cr.stash != nil {
|
||||
tmp := make([]byte, maxHeaderSize)
|
||||
copy(tmp, cr.stash)
|
||||
copy(tmp[len(cr.stash):], header)
|
||||
header = tmp
|
||||
cr.stash = nil
|
||||
}
|
||||
|
||||
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
|
||||
if semicolonIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
sigIndex := semicolonIndex + len(chunkHdrStr)
|
||||
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
|
||||
if sigEndIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
chunkSizeBytes := header[:semicolonIndex]
|
||||
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
|
||||
if err != nil {
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
|
||||
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
|
||||
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
|
||||
|
||||
return chunkSize, signature, dataStartOffset - stashLen, nil
|
||||
func (c checksumType) isValid() bool {
|
||||
return c == checksumTypeCrc32 ||
|
||||
c == checksumTypeCrc32c ||
|
||||
c == checksumTypeSha1 ||
|
||||
c == checksumTypeSha256 ||
|
||||
c == checksumTypeCrc64nvme
|
||||
}
|
||||
|
||||
// IsSpecialPayload checks for streaming/unsigned authorization types
|
||||
func IsSpecialPayload(str string) bool {
|
||||
return specialValues[payloadType(str)]
|
||||
}
|
||||
|
||||
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
|
||||
if !contentSha256.isValid() {
|
||||
//TODO: Add proper APIError
|
||||
return nil, fmt.Errorf("invalid x-amz-content-sha256: %v", string(contentSha256))
|
||||
}
|
||||
|
||||
checksumType := checksumType(ctx.Get("X-Amz-Trailer"))
|
||||
if checksumType != "" && !checksumType.isValid() {
|
||||
//TODO: Add proper APIError
|
||||
return nil, fmt.Errorf("invalid X-Amz-Trailer: %v", checksumType)
|
||||
}
|
||||
|
||||
switch contentSha256 {
|
||||
case payloadTypeStreamingUnsignedTrailer:
|
||||
return NewUnsignedChunkReader(r, checksumType)
|
||||
//TODO: Add other chunk readers
|
||||
}
|
||||
|
||||
return NewSignedChunkReader(r, authdata, region, secret, date)
|
||||
}
|
||||
|
||||
276
s3api/utils/signed-chunk-reader.go
Normal file
276
s3api/utils/signed-chunk-reader.go
Normal file
@@ -0,0 +1,276 @@
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// chunked uploads described in:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
|
||||
const (
|
||||
chunkHdrStr = ";chunk-signature="
|
||||
chunkHdrDelim = "\r\n"
|
||||
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
awsV4 = "AWS4"
|
||||
awsS3Service = "s3"
|
||||
awsV4Request = "aws4_request"
|
||||
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||
)
|
||||
|
||||
// ChunkReader reads from chunked upload request body, and returns
|
||||
// object data stream
|
||||
type ChunkReader struct {
|
||||
r io.Reader
|
||||
signingKey []byte
|
||||
prevSig string
|
||||
parsedSig string
|
||||
currentChunkSize int64
|
||||
chunkDataLeft int64
|
||||
trailerExpected int
|
||||
stash []byte
|
||||
chunkHash hash.Hash
|
||||
strToSignPrefix string
|
||||
skipcheck bool
|
||||
}
|
||||
|
||||
// NewChunkReader reads from request body io.Reader and parses out the
|
||||
// chunk metadata in stream. The headers are validated for proper signatures.
|
||||
// Reading from the chunk reader will read only the object data stream
|
||||
// without the chunk headers/trailers.
|
||||
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
|
||||
return &ChunkReader{
|
||||
r: r,
|
||||
signingKey: getSigningKey(secret, region, date),
|
||||
// the authdata.Signature is validated in the auth-reader,
|
||||
// so we can use that here without any other checks
|
||||
prevSig: authdata.Signature,
|
||||
chunkHash: sha256.New(),
|
||||
strToSignPrefix: getStringToSignPrefix(date, region),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader for this type
|
||||
func (cr *ChunkReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.r.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if cr.chunkDataLeft < int64(n) {
|
||||
chunkSize := cr.chunkDataLeft
|
||||
if chunkSize > 0 {
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
}
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
n += int(chunkSize)
|
||||
return n, err
|
||||
}
|
||||
|
||||
cr.chunkDataLeft -= int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
|
||||
// This part is the same for all chunks,
|
||||
// only the previous signature and hash of current chunk changes
|
||||
func getStringToSignPrefix(date time.Time, region string) string {
|
||||
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
|
||||
date.Format("20060102"),
|
||||
region,
|
||||
awsS3Service,
|
||||
awsV4Request)
|
||||
|
||||
return fmt.Sprintf("%s\n%s\n%s",
|
||||
streamPayloadAlgo,
|
||||
date.Format("20060102T150405Z"),
|
||||
credentialScope)
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
|
||||
// signature For each chunk, you calculate the signature using the following
|
||||
// string to sign. For the first chunk, you use the seed-signature as the
|
||||
// previous signature.
|
||||
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s",
|
||||
prefix,
|
||||
prevSig,
|
||||
zeroLenSig,
|
||||
hex.EncodeToString(chunkHash))
|
||||
}
|
||||
|
||||
// The provided p should have all of the previous chunk data and trailer
|
||||
// consumed already. The positioning here is expected that p[0] starts the
|
||||
// new chunk size with the ";chunk-signature=" following. The only exception
|
||||
// is if we started consuming the trailer, but hit the end of the read buffer.
|
||||
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
|
||||
// finish consuming the final trailer bytes.
|
||||
// This parses the chunk metadata in situ without allocating an extra buffer.
|
||||
// It will just read and validate the chunk metadata and then move the
|
||||
// following chunk data to overwrite the metadata in the provided buffer.
|
||||
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
|
||||
n := len(p)
|
||||
|
||||
if !cr.skipcheck && cr.parsedSig != "" {
|
||||
chunkhash := cr.chunkHash.Sum(nil)
|
||||
cr.chunkHash.Reset()
|
||||
|
||||
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
|
||||
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
|
||||
|
||||
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
}
|
||||
|
||||
if cr.trailerExpected != 0 {
|
||||
if len(p) < len(chunkHdrDelim) {
|
||||
// This is the special case where we need to consume the
|
||||
// trailer, but instead hit the end of the buffer. The
|
||||
// subsequent call will finish consuming the trailer.
|
||||
cr.chunkDataLeft = 0
|
||||
cr.trailerExpected -= len(p)
|
||||
cr.skipcheck = true
|
||||
return 0, nil
|
||||
}
|
||||
// move data up to remove trailer
|
||||
copy(p, p[cr.trailerExpected:])
|
||||
n -= cr.trailerExpected
|
||||
}
|
||||
|
||||
cr.skipcheck = false
|
||||
|
||||
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
|
||||
cr.currentChunkSize = chunkSize
|
||||
cr.parsedSig = sig
|
||||
if err == errskipHeader {
|
||||
cr.chunkDataLeft = 0
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if chunkSize == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
cr.trailerExpected = len(chunkHdrDelim)
|
||||
|
||||
// move data up to remove chunk header
|
||||
copy(p, p[bufOffset:n])
|
||||
n -= bufOffset
|
||||
|
||||
// if remaining buffer larger than chunk data,
|
||||
// parse next header in buffer
|
||||
if int64(n) > chunkSize {
|
||||
cr.chunkDataLeft = 0
|
||||
cr.chunkHash.Write(p[:chunkSize])
|
||||
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
|
||||
if (chunkSize + int64(n)) > math.MaxInt {
|
||||
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
return n + int(chunkSize), err
|
||||
}
|
||||
|
||||
cr.chunkDataLeft = chunkSize - int64(n)
|
||||
cr.chunkHash.Write(p[:n])
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
||||
// Task 3: Calculate Signature
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
|
||||
func getSigningKey(secret, region string, date time.Time) []byte {
|
||||
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
|
||||
dateRegionKey := hmac256(dateKey, []byte(region))
|
||||
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
|
||||
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
|
||||
return signingKey
|
||||
}
|
||||
|
||||
func hmac256(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidChunkFormat = errors.New("invalid chunk header format")
|
||||
errskipHeader = errors.New("skip to next header")
|
||||
)
|
||||
|
||||
const (
|
||||
maxHeaderSize = 1024
|
||||
)
|
||||
|
||||
// This returns the chunk payload size, signature, data start offset, and
|
||||
// error if any. See the AWS documentation for the chunk header format. The
|
||||
// header[0] byte is expected to be the first byte of the chunk size here.
|
||||
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
|
||||
stashLen := len(cr.stash)
|
||||
if stashLen > maxHeaderSize {
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
if cr.stash != nil {
|
||||
tmp := make([]byte, maxHeaderSize)
|
||||
copy(tmp, cr.stash)
|
||||
copy(tmp[len(cr.stash):], header)
|
||||
header = tmp
|
||||
cr.stash = nil
|
||||
}
|
||||
|
||||
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
|
||||
if semicolonIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
sigIndex := semicolonIndex + len(chunkHdrStr)
|
||||
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
|
||||
if sigEndIndex == -1 {
|
||||
cr.stash = make([]byte, len(header))
|
||||
copy(cr.stash, header)
|
||||
cr.trailerExpected = 0
|
||||
return 0, "", 0, errskipHeader
|
||||
}
|
||||
|
||||
chunkSizeBytes := header[:semicolonIndex]
|
||||
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
|
||||
if err != nil {
|
||||
return 0, "", 0, errInvalidChunkFormat
|
||||
}
|
||||
|
||||
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
|
||||
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
|
||||
|
||||
return chunkSize, signature, dataStartOffset - stashLen, nil
|
||||
}
|
||||
235
s3api/utils/unsigned-chunk-reader.go
Normal file
235
s3api/utils/unsigned-chunk-reader.go
Normal file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2024 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
trailerDelim = []byte{'\n', '\r', '\n'}
|
||||
errMalformedEncoding = errors.New("malformed chunk encoding")
|
||||
)
|
||||
|
||||
type UnsignedChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
checksumType checksumType
|
||||
expectedChecksum string
|
||||
hasher hash.Hash
|
||||
stash []byte
|
||||
chunkCounter int
|
||||
offset int
|
||||
}
|
||||
|
||||
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
|
||||
hasher, err := getHasher(ct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &UnsignedChunkReader{
|
||||
reader: bufio.NewReader(r),
|
||||
checksumType: ct,
|
||||
stash: make([]byte, 0),
|
||||
hasher: hasher,
|
||||
chunkCounter: 1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
|
||||
// First read any stashed data
|
||||
if len(ucr.stash) != 0 {
|
||||
n := copy(p, ucr.stash)
|
||||
ucr.offset += n
|
||||
|
||||
if n < len(ucr.stash) {
|
||||
ucr.stash = ucr.stash[n:]
|
||||
ucr.offset = 0
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// Read the chunk size
|
||||
chunkSize, err := ucr.extractChunkSize()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if chunkSize == 0 {
|
||||
// Stop reading parsing payloads as 0 sized chunk is reached
|
||||
break
|
||||
}
|
||||
rdr := io.TeeReader(ucr.reader, ucr.hasher)
|
||||
payload := make([]byte, chunkSize)
|
||||
// Read and cache the payload
|
||||
_, err = io.ReadFull(rdr, payload)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Skip the trailing "\r\n"
|
||||
if err := ucr.readAndSkip('\r', '\n'); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Copy the payload into the io.Reader buffer
|
||||
n := copy(p[ucr.offset:], payload)
|
||||
ucr.offset += n
|
||||
ucr.chunkCounter++
|
||||
|
||||
if int64(n) < chunkSize {
|
||||
// stash the remaining data
|
||||
ucr.stash = payload[n:]
|
||||
dataRead := ucr.offset
|
||||
ucr.offset = 0
|
||||
return dataRead, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Read and validate trailers
|
||||
if err := ucr.readTrailer(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return ucr.offset, io.EOF
|
||||
}
|
||||
|
||||
// Reads and validates the bytes provided from the underlying io.Reader
|
||||
func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
|
||||
for _, d := range data {
|
||||
b, err := ucr.reader.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if b != d {
|
||||
return errMalformedEncoding
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extracts the chunk size from the payload
|
||||
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
|
||||
line, err := ucr.reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return 0, errMalformedEncoding
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
chunkSize, err := strconv.ParseInt(line, 16, 64)
|
||||
if err != nil {
|
||||
return 0, errMalformedEncoding
|
||||
}
|
||||
|
||||
return chunkSize, nil
|
||||
}
|
||||
|
||||
// Reads and validates the trailer at the end
|
||||
func (ucr *UnsignedChunkReader) readTrailer() error {
|
||||
var trailerBuffer bytes.Buffer
|
||||
|
||||
for {
|
||||
v, err := ucr.reader.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if v != '\r' {
|
||||
trailerBuffer.WriteByte(v)
|
||||
continue
|
||||
}
|
||||
var tmp [3]byte
|
||||
_, err = io.ReadFull(ucr.reader, tmp[:])
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(tmp[:], trailerDelim) {
|
||||
return errMalformedEncoding
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Parse the trailer
|
||||
trailerHeader := trailerBuffer.String()
|
||||
trailerHeader = strings.TrimSpace(trailerHeader)
|
||||
trailerHeaderParts := strings.Split(trailerHeader, ":")
|
||||
if len(trailerHeaderParts) != 2 {
|
||||
return errMalformedEncoding
|
||||
}
|
||||
|
||||
if trailerHeaderParts[0] != string(ucr.checksumType) {
|
||||
//TODO: handle the error
|
||||
return errMalformedEncoding
|
||||
}
|
||||
|
||||
ucr.expectedChecksum = trailerHeaderParts[1]
|
||||
|
||||
// Validate checksum
|
||||
return ucr.validateChecksum()
|
||||
}
|
||||
|
||||
// Validates the trailing checksum sent at the end
|
||||
func (ucr *UnsignedChunkReader) validateChecksum() error {
|
||||
csum := ucr.hasher.Sum(nil)
|
||||
checksum := base64.StdEncoding.EncodeToString(csum)
|
||||
|
||||
if checksum != ucr.expectedChecksum {
|
||||
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retruns the hash calculator based on the hash type provided
|
||||
func getHasher(ct checksumType) (hash.Hash, error) {
|
||||
switch ct {
|
||||
case checksumTypeCrc32:
|
||||
return crc32.NewIEEE(), nil
|
||||
case checksumTypeCrc32c:
|
||||
return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
|
||||
case checksumTypeCrc64nvme:
|
||||
table := crc64.MakeTable(bits.Reverse64(0xad93d23594c93659))
|
||||
return crc64.New(table), nil
|
||||
case checksumTypeSha1:
|
||||
return sha1.New(), nil
|
||||
case checksumTypeSha256:
|
||||
return sha256.New(), nil
|
||||
default:
|
||||
return nil, errors.New("unsupported checksum type")
|
||||
}
|
||||
}
|
||||
@@ -196,6 +196,30 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
|
||||
}
|
||||
}
|
||||
|
||||
// Streams the response body by chunks
|
||||
func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser) error {
|
||||
buf := make([]byte, 4096) // 4KB chunks
|
||||
defer rdr.Close()
|
||||
for {
|
||||
n, err := rdr.Read(buf)
|
||||
if n > 0 {
|
||||
_, writeErr := ctx.Write(buf[:n])
|
||||
if writeErr != nil {
|
||||
return fmt.Errorf("write chunk: %w", writeErr)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
return fmt.Errorf("read chunk: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
return false
|
||||
|
||||
@@ -72,6 +72,7 @@ const (
|
||||
ErrInvalidPartNumberMarker
|
||||
ErrInvalidObjectAttributes
|
||||
ErrInvalidPart
|
||||
ErrEmptyParts
|
||||
ErrInvalidPartNumber
|
||||
ErrInternalError
|
||||
ErrInvalidCopyDest
|
||||
@@ -154,6 +155,7 @@ const (
|
||||
ErrAdminUserExists
|
||||
ErrAdminInvalidUserRole
|
||||
ErrAdminMissingUserAcess
|
||||
ErrAdminMethodNotSupported
|
||||
)
|
||||
|
||||
var errorCodeResponse = map[ErrorCode]APIError{
|
||||
@@ -252,6 +254,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEmptyParts: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "You must specify at least one part",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPartNumber: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Part number must be an integer between 1 and 10000, inclusive.",
|
||||
@@ -636,6 +643,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "User access key ID is missing.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminMethodNotSupported: {
|
||||
Code: "XAdminMethodNotSupported",
|
||||
Description: "The method is not supported in single root user mode.",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
}
|
||||
|
||||
// GetAPIError provides API Error for input API error code.
|
||||
|
||||
@@ -25,4 +25,5 @@ USERNAME_TWO=HIJKLMN
|
||||
PASSWORD_TWO=OPQRSTU
|
||||
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
|
||||
RECREATE_BUCKETS=true
|
||||
REMOVE_TEST_FILE_FOLDER=true
|
||||
REMOVE_TEST_FILE_FOLDER=true
|
||||
VERSIONING_DIR=/tmp/versioning
|
||||
@@ -3,8 +3,9 @@ FROM ubuntu:latest
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG SECRETS_FILE=tests/.secrets
|
||||
ARG CONFIG_FILE=tests/.env.docker
|
||||
ARG GO_LIBRARY=go1.23.1.linux-arm64.tar.gz
|
||||
ARG AWS_CLI=awscli-exe-linux-aarch64.zip
|
||||
ARG GO_LIBRARY=go1.21.13.linux-arm64.tar.gz
|
||||
# see https://github.com/versity/versitygw/issues/1034
|
||||
ARG AWS_CLI=awscli-exe-linux-aarch64-2.22.35.zip
|
||||
ARG MC_FOLDER=linux-arm64
|
||||
|
||||
ENV TZ=Etc/UTC
|
||||
@@ -85,6 +86,7 @@ RUN openssl genpkey -algorithm RSA -out versitygw-docker.pem -pkeyopt rsa_keygen
|
||||
|
||||
ENV WORKSPACE=.
|
||||
ENV VERSITYGW_TEST_ENV=$CONFIG_FILE
|
||||
#ENV AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
|
||||
|
||||
ENTRYPOINT ["tests/run.sh"]
|
||||
CMD ["s3api,s3,s3cmd,mc,rest"]
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
# Command-Line Tests
|
||||
|
||||
## Table of Contents
|
||||
|
||||
[Instructions - Running Locally](#instructions---running-locally)<br>
|
||||
[* Posix Backend](#posix-backend)<br>
|
||||
[* Static Bucket Mode](#static-bucket-mode)<br>
|
||||
[* S3 Backend](#s3-backend)<br>
|
||||
[* Direct Mode](#direct-mode)<br>
|
||||
[Instructions - Running With Docker](#instructions---running-with-docker)<br>
|
||||
[Instructions - Running With Docker-Compose](#instructions---running-with-docker-compose)<br>
|
||||
[Environment Parameters](#environment-parameters)<br>
|
||||
[* Secret](#secret)<br>
|
||||
[* Non-Secret](#non-secret)<br>
|
||||
[REST Scripts](#rest-scripts)<br>
|
||||
|
||||
## Instructions - Running Locally
|
||||
|
||||
### Posix Backend
|
||||
@@ -61,10 +75,11 @@ To communicate directly with s3, in order to compare the gateway results to dire
|
||||
1. Copy `.secrets.default` to `.secrets` in the `tests` folder and change the parameters and add the additional s3 fields explained in the **S3 Backend** section above if running with the s3 backend.
|
||||
2. By default, the dockerfile uses the **arm** architecture (usually modern Mac). If using **amd** (usually earlier Mac or Linux), you can either replace the corresponding `ARG` values directly, or with `arg="<param>=<amd library or folder>"` Also, you can determine which is used by your OS with `uname -a`.
|
||||
3. Build and run the `Dockerfile_test_bats` file. Change the `SECRETS_FILE` and `CONFIG_FILE` parameters to point to your secrets and config file, respectively, if not using the defaults. Example: `docker build -t <tag> -f Dockerfile_test_bats --build-arg="SECRETS_FILE=<file>" --build-arg="CONFIG_FILE=<file>" .`.
|
||||
4. To run the entire suite, run `docker run -it <image name>`. To run an individual suite, pass in the name of the suite as defined in `tests/run.sh` (e.g. REST tests -> `docker run -it <image name> rest`). Also, multiple specific suites can be run, if separated by comma.
|
||||
|
||||
## Instructions - Running with docker-compose
|
||||
|
||||
A file named `docker-compose-bats.yml` is provided in the root folder. Four configurations are provided:
|
||||
A file named `docker-compose-bats.yml` is provided in the root folder. A few configurations are provided, and you can also create your own provided you have a secrets and config file:
|
||||
* insecure (without certificates), with creation/removal of buckets
|
||||
* secure, posix backend, with static buckets
|
||||
* secure, posix backend, with creation/removal of buckets
|
||||
@@ -85,8 +100,18 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
|
||||
|
||||
## Environment Parameters
|
||||
|
||||
### Secret
|
||||
|
||||
**AWS_PROFILE**, **AWS_ENDPOINT_URL**, **AWS_REGION**, **AWS_ACCESS_KEY_ID**, **AWS_SECRET_ACCESS_KEY**: identical to the same parameters in **s3**.
|
||||
|
||||
**AWS_CANONICAL_ID**: for direct mode, the canonical ID for the main user (owner)
|
||||
|
||||
**ACL_AWS_CANONICAL_ID**: for direct mode, the canonical ID for the user to test ACL changes and access by non-owners
|
||||
|
||||
**ACL_AWS_ACCESS_KEY_ID**, **ACL_AWS_ACCESS_SECRET_KEY**: for direct mode, the ID and key for the S3 user in the **ACL_AWS_CANONICAL_ID** account.
|
||||
|
||||
### Non-Secret
|
||||
|
||||
**VERSITY_EXE**: location of the versity executable relative to test folder.
|
||||
|
||||
**RUN_VERSITYGW**: whether to run the versitygw executable, should be set to **false** when running tests directly against **s3**.
|
||||
@@ -134,3 +159,21 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
|
||||
**VERSIONING_DIR**: where to put gateway file versioning info.
|
||||
|
||||
**COMMAND_LOG**: where to store list of client commands, which if using will be reported during test failures.
|
||||
|
||||
**TIME_LOG**: optional log to show duration of individual tests
|
||||
|
||||
**DIRECT_S3_ROOT_ACCOUNT_NAME**: for direct mode, S3 username
|
||||
|
||||
**DELETE_BUCKETS_AFTER_TEST**: whether or not to delete buckets after individual tests, useful for debugging if the post-test bucket state needs to be checked
|
||||
|
||||
## REST Scripts
|
||||
|
||||
REST scripts are included for calls to S3's REST API in the `./tests/rest_scripts/` folder. To call a script, the following parameters are needed:
|
||||
* **AWS_ACCESS_KEY_ID**, **AWS_SECRET_ACCESS_KEY**, etc.
|
||||
* **AWS_ENDPOINT_URL** (default: `https://localhost:7070`)
|
||||
* **OUTPUT_FILE**: file where the command's response data is written
|
||||
* Any other parameters specified at the top of the script file, such as payloads and variables. Sometimes, defaults are included.
|
||||
|
||||
Upon success, the script will return a response code, and write the data to the **OUTPUT_FILE** location.
|
||||
|
||||
Example: `AWS_ACCESS_KEY_ID={id} AWS_SECRET_ACCESS_KEY={key} AWS_ENDPOINT_URL=https://s3.amazonaws.com OUTPUT_FILE=./output_file.xml ./tests/rest_scripts/list_buckets.sh`
|
||||
|
||||
@@ -43,38 +43,12 @@ delete_object_tagging_rest() {
|
||||
log 2 "'delete_object_tagging' requires bucket, key"
|
||||
return 1
|
||||
fi
|
||||
|
||||
generate_hash_for_payload ""
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
|
||||
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
|
||||
# shellcheck disable=SC2154
|
||||
canonical_request="DELETE
|
||||
/$1/$2
|
||||
tagging=
|
||||
host:$aws_endpoint_url_address
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
|
||||
log 2 "error generating sts string"
|
||||
if ! result=$(BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/delete_object_tagging.sh); then
|
||||
log 2 "error sending delete object tagging REST command: $result"
|
||||
return 1
|
||||
fi
|
||||
get_signature
|
||||
# shellcheck disable=SC2154
|
||||
reply=$(send_command curl -ks -w "%{http_code}" -X DELETE "$header://$aws_endpoint_url_address/$1/$2?tagging" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "x-amz-content-sha256: $payload_hash" \
|
||||
-H "x-amz-date: $current_date_time" \
|
||||
-d "$tagging" -o "$TEST_FILE_FOLDER"/delete_tagging_error.txt 2>&1)
|
||||
log 5 "reply status code: $reply"
|
||||
if [[ "$reply" != "204" ]]; then
|
||||
log 2 "reply error: $reply"
|
||||
log 2 "put object tagging command returned error: $(cat "$TEST_FILE_FOLDER"/delete_tagging_error.txt)"
|
||||
if [ "$result" != "204" ]; then
|
||||
log 2 "delete-object-tagging returned code $result (response: $(cat "$TEST_FILE_FOLDER/response.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
|
||||
@@ -34,30 +34,10 @@ list_object_versions_rest() {
|
||||
log 2 "'list_object_versions_rest' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
generate_hash_for_payload ""
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
# shellcheck disable=SC2154
|
||||
canonical_request="GET
|
||||
/$1
|
||||
versions=
|
||||
host:${AWS_ENDPOINT_URL#*//}
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
|
||||
log 2 "error generating sts string"
|
||||
log 5 "list object versions REST"
|
||||
if ! result=$(BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/object_versions.txt" ./tests/rest_scripts/list_object_versions.sh); then
|
||||
log 2 "error listing object versions: $result"
|
||||
return 1
|
||||
fi
|
||||
|
||||
get_signature
|
||||
# shellcheck disable=SC2034,SC2154
|
||||
reply=$(send_command curl -ks "$AWS_ENDPOINT_URL/$1?versions" \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
|
||||
-H "x-amz-content-sha256: $payload_hash" \
|
||||
-H "x-amz-date: $current_date_time" \
|
||||
-o "$TEST_FILE_FOLDER/object_versions.txt" 2>&1)
|
||||
return 0
|
||||
}
|
||||
@@ -58,22 +58,15 @@ reset_bucket_acl() {
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
cat <<EOF > "$TEST_FILE_FOLDER/$acl_file"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
"Grantee": {
|
||||
"ID": "$AWS_ACCESS_KEY_ID",
|
||||
"Type": "CanonicalUser"
|
||||
},
|
||||
"Permission": "FULL_CONTROL"
|
||||
}
|
||||
],
|
||||
"Owner": {
|
||||
"ID": "$AWS_ACCESS_KEY_ID"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
if ! setup_acl_json "$TEST_FILE_FOLDER/$acl_file" "CanonicalUser" "$AWS_ACCESS_KEY_ID" "FULL_CONTROL" "$AWS_ACCESS_KEY_ID"; then
|
||||
log 2 "error resetting versitygw ACL"
|
||||
return 1
|
||||
fi
|
||||
elif ! setup_acl_json "$TEST_FILE_FOLDER/$acl_file" "CanonicalUser" "$AWS_CANONICAL_ID" "FULL_CONTROL" "$AWS_CANONICAL_ID"; then
|
||||
log 2 "error resetting direct ACL"
|
||||
return 1
|
||||
fi
|
||||
if ! put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$acl_file"; then
|
||||
log 2 "error putting bucket acl (s3api)"
|
||||
return 1
|
||||
|
||||
14
tests/env.sh
14
tests/env.sh
@@ -123,18 +123,30 @@ check_universal_vars() {
|
||||
log 1 "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" != "true" ] && [ "$RECREATE_BUCKETS" != "false" ]; then
|
||||
log 1 "RECREATE_BUCKETS must be 'true' or 'false'"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$RECREATE_BUCKETS" == "false" ] && [ "$DELETE_BUCKETS_AFTER_TEST" == "true" ]; then
|
||||
log 1 "cannot set DELETE_BUCKETS_AFTER_TEST to 'true' if RECREATE_BUCKETS is 'false'"
|
||||
return 1
|
||||
fi
|
||||
if [ -z "$TEST_FILE_FOLDER" ]; then
|
||||
log 1 "TEST_FILE_FOLDER missing"
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -d "$TEST_FILE_FOLDER" ]; then
|
||||
if ! error=$(mkdir -p "$TEST_FILE_FOLDER"); then
|
||||
if ! error=$(mkdir -p "$TEST_FILE_FOLDER" 2>&1); then
|
||||
log 2 "error creating test folder: $error"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# exporting these since they're needed for subshells
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_REGION AWS_PROFILE AWS_ENDPOINT_URL
|
||||
if [ -n "$AWS_CANONICAL_ID" ]; then
|
||||
log 5 "canonical ID: $AWS_CANONICAL_ID"
|
||||
export AWS_CANONICAL_ID
|
||||
fi
|
||||
}
|
||||
|
||||
delete_command_log() {
|
||||
|
||||
@@ -172,6 +172,7 @@ func TestGetObject(s *S3Conf) {
|
||||
GetObject_invalid_ranges(s)
|
||||
GetObject_invalid_parent(s)
|
||||
GetObject_with_meta(s)
|
||||
GetObject_large_object(s)
|
||||
GetObject_success(s)
|
||||
GetObject_directory_success(s)
|
||||
GetObject_by_range_success(s)
|
||||
@@ -320,6 +321,7 @@ func TestCompleteMultipartUpload(s *S3Conf) {
|
||||
CompletedMultipartUpload_non_existing_bucket(s)
|
||||
CompleteMultipartUpload_invalid_part_number(s)
|
||||
CompleteMultipartUpload_invalid_ETag(s)
|
||||
CompleteMultipartUpload_empty_parts(s)
|
||||
CompleteMultipartUpload_success(s)
|
||||
if !s.azureTests {
|
||||
CompleteMultipartUpload_racey_success(s)
|
||||
@@ -335,6 +337,10 @@ func TestPutBucketAcl(s *S3Conf) {
|
||||
PutBucketAcl_invalid_acl_acp_and_grants(s)
|
||||
PutBucketAcl_invalid_owner(s)
|
||||
PutBucketAcl_invalid_owner_not_in_body(s)
|
||||
PutBucketAcl_invalid_empty_owner_id_in_body(s)
|
||||
PutBucketAcl_invalid_permission_in_body(s)
|
||||
PutBucketAcl_invalid_grantee_type_in_body(s)
|
||||
PutBucketAcl_empty_grantee_ID_in_body(s)
|
||||
PutBucketAcl_success_access_denied(s)
|
||||
PutBucketAcl_success_grants(s)
|
||||
PutBucketAcl_success_canned_acl(s)
|
||||
@@ -447,8 +453,8 @@ func TestGetObjectLegalHold(s *S3Conf) {
|
||||
func TestWORMProtection(s *S3Conf) {
|
||||
WORMProtection_bucket_object_lock_configuration_compliance_mode(s)
|
||||
WORMProtection_bucket_object_lock_configuration_governance_mode(s)
|
||||
// WORMProtection_bucket_object_lock_governance_bypass_delete(s)
|
||||
// WORMProtection_bucket_object_lock_governance_bypass_delete_multiple
|
||||
WORMProtection_bucket_object_lock_governance_bypass_delete(s)
|
||||
WORMProtection_bucket_object_lock_governance_bypass_delete_multiple(s)
|
||||
WORMProtection_object_lock_retention_compliance_locked(s)
|
||||
WORMProtection_object_lock_retention_governance_locked(s)
|
||||
WORMProtection_object_lock_retention_governance_bypass_overwrite(s)
|
||||
@@ -748,6 +754,7 @@ func GetIntTests() IntTests {
|
||||
"GetObject_invalid_ranges": GetObject_invalid_ranges,
|
||||
"GetObject_invalid_parent": GetObject_invalid_parent,
|
||||
"GetObject_with_meta": GetObject_with_meta,
|
||||
"GetObject_large_object": GetObject_large_object,
|
||||
"GetObject_success": GetObject_success,
|
||||
"GetObject_directory_success": GetObject_directory_success,
|
||||
"GetObject_by_range_success": GetObject_by_range_success,
|
||||
@@ -849,6 +856,7 @@ func GetIntTests() IntTests {
|
||||
"CompletedMultipartUpload_non_existing_bucket": CompletedMultipartUpload_non_existing_bucket,
|
||||
"CompleteMultipartUpload_invalid_part_number": CompleteMultipartUpload_invalid_part_number,
|
||||
"CompleteMultipartUpload_invalid_ETag": CompleteMultipartUpload_invalid_ETag,
|
||||
"CompleteMultipartUpload_empty_parts": CompleteMultipartUpload_empty_parts,
|
||||
"CompleteMultipartUpload_success": CompleteMultipartUpload_success,
|
||||
"CompleteMultipartUpload_racey_success": CompleteMultipartUpload_racey_success,
|
||||
"PutBucketAcl_non_existing_bucket": PutBucketAcl_non_existing_bucket,
|
||||
@@ -859,6 +867,10 @@ func GetIntTests() IntTests {
|
||||
"PutBucketAcl_invalid_acl_acp_and_grants": PutBucketAcl_invalid_acl_acp_and_grants,
|
||||
"PutBucketAcl_invalid_owner": PutBucketAcl_invalid_owner,
|
||||
"PutBucketAcl_invalid_owner_not_in_body": PutBucketAcl_invalid_owner_not_in_body,
|
||||
"PutBucketAcl_invalid_empty_owner_id_in_body": PutBucketAcl_invalid_empty_owner_id_in_body,
|
||||
"PutBucketAcl_invalid_permission_in_body": PutBucketAcl_invalid_permission_in_body,
|
||||
"PutBucketAcl_invalid_grantee_type_in_body": PutBucketAcl_invalid_grantee_type_in_body,
|
||||
"PutBucketAcl_empty_grantee_ID_in_body": PutBucketAcl_empty_grantee_ID_in_body,
|
||||
"PutBucketAcl_success_access_denied": PutBucketAcl_success_access_denied,
|
||||
"PutBucketAcl_success_grants": PutBucketAcl_success_grants,
|
||||
"PutBucketAcl_success_canned_acl": PutBucketAcl_success_canned_acl,
|
||||
|
||||
@@ -16,6 +16,7 @@ package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
@@ -42,6 +43,8 @@ type S3Conf struct {
|
||||
debug bool
|
||||
versioningEnabled bool
|
||||
azureTests bool
|
||||
tlsStatus bool
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
func NewS3Conf(opts ...Option) *S3Conf {
|
||||
@@ -50,6 +53,20 @@ func NewS3Conf(opts ...Option) *S3Conf {
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
|
||||
customTransport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: s.tlsStatus,
|
||||
},
|
||||
}
|
||||
|
||||
customHTTPClient := &http.Client{
|
||||
Transport: customTransport,
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
s.httpClient = customHTTPClient
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -88,6 +105,9 @@ func WithVersioningEnabled() Option {
|
||||
func WithAzureMode() Option {
|
||||
return func(s *S3Conf) { s.azureTests = true }
|
||||
}
|
||||
func WithTLSStatus(ts bool) Option {
|
||||
return func(s *S3Conf) { s.tlsStatus = ts }
|
||||
}
|
||||
|
||||
func (c *S3Conf) getCreds() credentials.StaticCredentialsProvider {
|
||||
// TODO support token/IAM
|
||||
@@ -118,6 +138,8 @@ func (c *S3Conf) Config() aws.Config {
|
||||
config.WithRetryMaxAttempts(1),
|
||||
}
|
||||
|
||||
opts = append(opts, config.WithHTTPClient(c.httpClient))
|
||||
|
||||
if c.checksumDisable {
|
||||
opts = append(opts,
|
||||
config.WithAPIOptions([]func(*middleware.Stack) error{v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware}))
|
||||
@@ -168,3 +190,11 @@ func (c *S3Conf) DownloadData(w io.WriterAt, bucket, object string) (int64, erro
|
||||
|
||||
return downloader.Download(context.Background(), w, downinfo)
|
||||
}
|
||||
|
||||
func (c *S3Conf) getAdminCommand(args ...string) []string {
|
||||
if c.tlsStatus {
|
||||
return append([]string{"admin", "--allow-insecure"}, args...)
|
||||
}
|
||||
|
||||
return append([]string{"admin"}, args...)
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
var (
|
||||
shortTimeout = 10 * time.Second
|
||||
longTimeout = 60 * time.Second
|
||||
iso8601Format = "20060102T150405Z"
|
||||
nullVersionId = "null"
|
||||
)
|
||||
@@ -55,11 +56,8 @@ func Authentication_empty_auth_header(s *S3Conf) error {
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
req.Header.Set("Authorization", "")
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -83,11 +81,8 @@ func Authentication_invalid_auth_header(s *S3Conf) error {
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
req.Header.Set("Authorization", "invalid header")
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -114,11 +109,7 @@ func Authentication_unsupported_signature_version(s *S3Conf) error {
|
||||
authHdr = strings.Replace(authHdr, "AWS4-HMAC-SHA256", "AWS2-HMAC-SHA1", 1)
|
||||
req.Header.Set("Authorization", authHdr)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -146,11 +137,7 @@ func Authentication_malformed_credentials(s *S3Conf) error {
|
||||
hdr := regExp.ReplaceAllString(authHdr, "Credential-access/32234/us-east-1/s3/aws4_request,")
|
||||
req.Header.Set("Authorization", hdr)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -178,11 +165,7 @@ func Authentication_malformed_credentials_invalid_parts(s *S3Conf) error {
|
||||
hdr := regExp.ReplaceAllString(authHdr, "Credential=access/32234/us-east-1/s3,")
|
||||
req.Header.Set("Authorization", hdr)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -210,11 +193,7 @@ func Authentication_credentials_terminated_string(s *S3Conf) error {
|
||||
hdr := regExp.ReplaceAllString(authHdr, "Credential=access/32234/us-east-1/s3/aws_request,")
|
||||
req.Header.Set("Authorization", hdr)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -237,11 +216,8 @@ func Authentication_credentials_incorrect_service(s *S3Conf) error {
|
||||
service: "ec2",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -270,16 +246,13 @@ func Authentication_credentials_incorrect_region(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
apiErr := s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", cfg.awsRegion),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -307,11 +280,7 @@ func Authentication_credentials_invalid_date(s *S3Conf) error {
|
||||
hdr := regExp.ReplaceAllString(authHdr, "Credential=access/3223423234/us-east-1/s3/aws4_request,")
|
||||
req.Header.Set("Authorization", hdr)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -334,11 +303,8 @@ func Authentication_credentials_future_date(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now().Add(time.Duration(5) * 24 * time.Hour),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -375,11 +341,8 @@ func Authentication_credentials_past_date(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now().Add(time.Duration(-5) * 24 * time.Hour),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -421,11 +384,7 @@ func Authentication_credentials_non_existing_access_key(s *S3Conf) error {
|
||||
hdr := regExp.ReplaceAllString(authHdr, "Credential=a_rarely_existing_access_key_id_a7s86df78as6df89790a8sd7f")
|
||||
req.Header.Set("Authorization", hdr)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -453,11 +412,7 @@ func Authentication_invalid_signed_headers(s *S3Conf) error {
|
||||
hdr := regExp.ReplaceAllString(authHdr, "SignedHeaders-host;x-amz-content-sha256;x-amz-date,")
|
||||
req.Header.Set("Authorization", hdr)
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -480,12 +435,9 @@ func Authentication_missing_date_header(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
req.Header.Set("X-Amz-Date", "")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -508,12 +460,9 @@ func Authentication_invalid_date_header(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
req.Header.Set("X-Amz-Date", "03032006")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -536,12 +485,9 @@ func Authentication_date_mismatch(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
req.Header.Set("X-Amz-Date", "20220830T095525Z")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -564,12 +510,9 @@ func Authentication_incorrect_payload_hash(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
req.Header.Set("X-Amz-Content-Sha256", "7sa6df576dsa5f675sad67f")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -592,13 +535,10 @@ func Authentication_incorrect_md5(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Md5", "sadfasdf87sad6f87==")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -623,11 +563,8 @@ func Authentication_signature_error_incorrect_secret_key(s *S3Conf) error {
|
||||
service: "s3",
|
||||
date: time.Now(),
|
||||
}, func(req *http.Request) error {
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -650,10 +587,6 @@ func PresignedAuth_missing_algo_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -668,7 +601,7 @@ func PresignedAuth_missing_algo_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -691,10 +624,6 @@ func PresignedAuth_unsupported_algorithm(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
uri := strings.Replace(v4req.URL, "AWS4-HMAC-SHA256", "AWS4-SHA256", 1)
|
||||
|
||||
req, err := http.NewRequest(v4req.Method, uri, nil)
|
||||
@@ -702,7 +631,7 @@ func PresignedAuth_unsupported_algorithm(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -725,10 +654,6 @@ func PresignedAuth_missing_credentials_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -743,7 +668,7 @@ func PresignedAuth_missing_credentials_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -766,10 +691,6 @@ func PresignedAuth_malformed_creds_invalid_parts(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -784,7 +705,7 @@ func PresignedAuth_malformed_creds_invalid_parts(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -807,10 +728,6 @@ func PresignedAuth_creds_invalid_terminator(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
uri, err := changeAuthCred(v4req.URL, "aws5_request", credTerminator)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -821,7 +738,7 @@ func PresignedAuth_creds_invalid_terminator(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -844,10 +761,6 @@ func PresignedAuth_creds_incorrect_service(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
uri, err := changeAuthCred(v4req.URL, "sns", credService)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -858,7 +771,7 @@ func PresignedAuth_creds_incorrect_service(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -889,16 +802,12 @@ func PresignedAuth_creds_incorrect_region(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(v4req.Method, v4req.URL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -925,10 +834,6 @@ func PresignedAuth_creds_invalid_date(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
uri, err := changeAuthCred(v4req.URL, "32234Z34", credDate)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -939,7 +844,7 @@ func PresignedAuth_creds_invalid_date(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -962,10 +867,6 @@ func PresignedAuth_non_existing_access_key_id(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
uri, err := changeAuthCred(v4req.URL, "a_rarely_existing_access_key_id890asd6f807as6ydf870say", credAccess)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -976,7 +877,7 @@ func PresignedAuth_non_existing_access_key_id(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -999,10 +900,6 @@ func PresignedAuth_missing_date_query(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1017,7 +914,7 @@ func PresignedAuth_missing_date_query(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1040,10 +937,6 @@ func PresignedAuth_dates_mismatch(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
uri, err := changeAuthCred(v4req.URL, "20060102", credDate)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1054,7 +947,7 @@ func PresignedAuth_dates_mismatch(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1077,10 +970,6 @@ func PresignedAuth_missing_signed_headers_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1095,7 +984,7 @@ func PresignedAuth_missing_signed_headers_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1118,10 +1007,6 @@ func PresignedAuth_missing_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1136,7 +1021,7 @@ func PresignedAuth_missing_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1159,10 +1044,6 @@ func PresignedAuth_invalid_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1177,7 +1058,7 @@ func PresignedAuth_invalid_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1200,10 +1081,6 @@ func PresignedAuth_negative_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1218,7 +1095,7 @@ func PresignedAuth_negative_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1241,10 +1118,6 @@ func PresignedAuth_exceeding_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1259,7 +1132,7 @@ func PresignedAuth_exceeding_expiration_query_param(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1282,10 +1155,6 @@ func PresignedAuth_expired_request(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
urlParsed, err := url.Parse(v4req.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1307,7 +1176,7 @@ func PresignedAuth_expired_request(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1332,16 +1201,12 @@ func PresignedAuth_incorrect_secret_key(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(v4req.Method, v4req.URL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1369,16 +1234,12 @@ func PresignedAuth_PutObject_success(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, v4req.URL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1415,10 +1276,6 @@ func PresignedAuth_Put_GetObject_with_data(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(v4req.Method, v4req.URL, body)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1426,7 +1283,7 @@ func PresignedAuth_Put_GetObject_with_data(s *S3Conf) error {
|
||||
|
||||
req.Header = v4req.SignedHeader
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1447,7 +1304,7 @@ func PresignedAuth_Put_GetObject_with_data(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err = httpClient.Do(req)
|
||||
resp, err = s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1491,10 +1348,6 @@ func PresignedAuth_Put_GetObject_with_UTF8_chars(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(v4req.Method, v4req.URL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1502,7 +1355,7 @@ func PresignedAuth_Put_GetObject_with_UTF8_chars(s *S3Conf) error {
|
||||
|
||||
req.Header = v4req.SignedHeader
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1523,7 +1376,7 @@ func PresignedAuth_Put_GetObject_with_UTF8_chars(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err = httpClient.Do(req)
|
||||
resp, err = s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1564,16 +1417,12 @@ func PresignedAuth_UploadPart(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(v4req.Method, v4req.URL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2394,11 +2243,7 @@ func DeleteBucket_success_status_code(s *S3Conf) error {
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
failF("%v: %v", testName, err)
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
@@ -2713,11 +2558,7 @@ func PutBucketTagging_success_status(s *S3Conf) error {
|
||||
return fmt.Errorf("err signing the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("err sending request: %w", err)
|
||||
}
|
||||
@@ -2833,11 +2674,7 @@ func DeleteBucketTagging_success_status(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3788,6 +3625,50 @@ func GetObject_with_meta(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func GetObject_large_object(s *S3Conf) error {
|
||||
testName := "GetObject_large_object"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
//FIXME: make the object size larger after
|
||||
// resolving the context deadline exceeding issue
|
||||
// in the github actions
|
||||
dataLength, obj := int64(100*1024*1024), "my-obj"
|
||||
ctype := defaultContentType
|
||||
|
||||
r, err := putObjectWithData(dataLength, &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
ContentType: &ctype,
|
||||
}, s3client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), longTimeout)
|
||||
out, err := s3client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
})
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *out.ContentLength != dataLength {
|
||||
return fmt.Errorf("expected content-length %v, instead got %v", dataLength, out.ContentLength)
|
||||
}
|
||||
|
||||
bdy, err := io.ReadAll(out.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Body.Close()
|
||||
outCsum := sha256.Sum256(bdy)
|
||||
if outCsum != r.csum {
|
||||
return fmt.Errorf("expected the output data checksum to be %v, instead got %v", r.csum, outCsum)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func GetObject_success(s *S3Conf) error {
|
||||
testName := "GetObject_success"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -3973,11 +3854,7 @@ func GetObject_by_range_resp_status(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -4916,11 +4793,7 @@ func DeleteObject_success_status_code(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -5644,11 +5517,7 @@ func DeleteObjectTagging_success_status(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -7203,11 +7072,7 @@ func AbortMultipartUpload_success_status_code(s *S3Conf) error {
|
||||
return err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: shortTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -7327,6 +7192,38 @@ func CompleteMultipartUpload_invalid_ETag(s *S3Conf) error {
|
||||
})
|
||||
}
|
||||
|
||||
func CompleteMultipartUpload_empty_parts(s *S3Conf) error {
|
||||
testName := "CompleteMultipartUpload_empty_parts"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
obj := "my-obj"
|
||||
mp, err := createMp(s3client, bucket, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, err = uploadParts(s3client, 5*1024*1024, 1, bucket, obj, *mp.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = s3client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: &bucket,
|
||||
Key: &obj,
|
||||
UploadId: mp.UploadId,
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: []types.CompletedPart{}, // empty parts list
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrEmptyParts)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func CompleteMultipartUpload_success(s *S3Conf) error {
|
||||
testName := "CompleteMultipartUpload_success"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -7621,6 +7518,7 @@ func PutBucketAcl_invalid_acl_canned_and_grants(s *S3Conf) error {
|
||||
ID: getPtr("awsID"),
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
Permission: types.PermissionFullControl,
|
||||
},
|
||||
},
|
||||
Owner: &types.Owner{
|
||||
@@ -7651,6 +7549,7 @@ func PutBucketAcl_invalid_acl_acp_and_grants(s *S3Conf) error {
|
||||
ID: getPtr("awsID"),
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
Permission: types.PermissionFullControl,
|
||||
},
|
||||
},
|
||||
Owner: &types.Owner{
|
||||
@@ -7696,6 +7595,7 @@ func PutBucketAcl_invalid_owner(s *S3Conf) error {
|
||||
ID: getPtr(usr.access),
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
Permission: types.PermissionRead,
|
||||
},
|
||||
},
|
||||
Owner: &types.Owner{
|
||||
@@ -7743,6 +7643,124 @@ func PutBucketAcl_invalid_owner_not_in_body(s *S3Conf) error {
|
||||
}, withOwnership(types.ObjectOwnershipBucketOwnerPreferred))
|
||||
}
|
||||
|
||||
func PutBucketAcl_invalid_empty_owner_id_in_body(s *S3Conf) error {
|
||||
testName := "PutBucketAcl_invalid_empty_owner_id_in_body"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.PutBucketAcl(ctx, &s3.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Grants: []types.Grant{
|
||||
{
|
||||
Grantee: &types.Grantee{
|
||||
Type: types.TypeCanonicalUser,
|
||||
ID: getPtr("grt1"),
|
||||
},
|
||||
Permission: types.PermissionRead,
|
||||
},
|
||||
},
|
||||
// Empty owner ID
|
||||
Owner: &types.Owner{},
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrMalformedACL)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}, withOwnership(types.ObjectOwnershipBucketOwnerPreferred))
|
||||
}
|
||||
|
||||
func PutBucketAcl_invalid_permission_in_body(s *S3Conf) error {
|
||||
testName := "PutBucketAcl_invalid_permission_in_body"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.PutBucketAcl(ctx, &s3.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Grants: []types.Grant{
|
||||
{
|
||||
Grantee: &types.Grantee{
|
||||
Type: types.TypeCanonicalUser,
|
||||
ID: getPtr("grt1"),
|
||||
},
|
||||
Permission: types.Permission("invalid_permission"),
|
||||
},
|
||||
},
|
||||
Owner: &types.Owner{
|
||||
ID: &s.awsID,
|
||||
},
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrMalformedACL)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}, withOwnership(types.ObjectOwnershipBucketOwnerPreferred))
|
||||
}
|
||||
|
||||
func PutBucketAcl_invalid_grantee_type_in_body(s *S3Conf) error {
|
||||
testName := "PutBucketAcl_invalid_grantee_type_in_body"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.PutBucketAcl(ctx, &s3.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Grants: []types.Grant{
|
||||
{
|
||||
Grantee: &types.Grantee{
|
||||
Type: types.Type("invalid_type"),
|
||||
ID: getPtr("grt1"),
|
||||
},
|
||||
Permission: types.PermissionRead,
|
||||
},
|
||||
},
|
||||
Owner: &types.Owner{
|
||||
ID: &s.awsID,
|
||||
},
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrMalformedACL)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}, withOwnership(types.ObjectOwnershipBucketOwnerPreferred))
|
||||
}
|
||||
|
||||
func PutBucketAcl_empty_grantee_ID_in_body(s *S3Conf) error {
|
||||
testName := "PutBucketAcl_empty_grantee_ID_in_body"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.PutBucketAcl(ctx, &s3.PutBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
AccessControlPolicy: &types.AccessControlPolicy{
|
||||
Grants: []types.Grant{
|
||||
{
|
||||
Grantee: &types.Grantee{
|
||||
Type: types.TypeCanonicalUser,
|
||||
},
|
||||
Permission: types.PermissionRead,
|
||||
},
|
||||
},
|
||||
Owner: &types.Owner{
|
||||
ID: &s.awsID,
|
||||
},
|
||||
},
|
||||
})
|
||||
cancel()
|
||||
if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrMalformedACL)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}, withOwnership(types.ObjectOwnershipBucketOwnerPreferred))
|
||||
}
|
||||
|
||||
func PutBucketAcl_success_access_denied(s *S3Conf) error {
|
||||
testName := "PutBucketAcl_success_access_denied"
|
||||
return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error {
|
||||
@@ -10927,7 +10945,7 @@ func IAM_user_access_denied(s *S3Conf) error {
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
out, err := execCommand("admin", "-a", usr.access, "-s", usr.secret, "-er", s.endpoint, "delete-user", "-a", "random_access")
|
||||
out, err := execCommand(s.getAdminCommand("-a", usr.access, "-s", usr.secret, "-er", s.endpoint, "delete-user", "-a", "random_access")...)
|
||||
if err == nil {
|
||||
failF("%v: expected cmd error", testName)
|
||||
return fmt.Errorf("%v: expected cmd error", testName)
|
||||
@@ -10958,7 +10976,7 @@ func IAM_userplus_access_denied(s *S3Conf) error {
|
||||
return fmt.Errorf("%v: %w", testName, err)
|
||||
}
|
||||
|
||||
out, err := execCommand("admin", "-a", usr.access, "-s", usr.secret, "-er", s.endpoint, "delete-user", "-a", "random_access")
|
||||
out, err := execCommand(s.getAdminCommand("-a", usr.access, "-s", usr.secret, "-er", s.endpoint, "delete-user", "-a", "random_access")...)
|
||||
if err == nil {
|
||||
failF("%v: expected cmd error", testName)
|
||||
return fmt.Errorf("%v: expected cmd error", testName)
|
||||
|
||||
@@ -766,7 +766,7 @@ func createUsers(s *S3Conf, users []user) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := execCommand("admin", "-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "create-user", "-a", usr.access, "-s", usr.secret, "-r", usr.role)
|
||||
out, err := execCommand(s.getAdminCommand("-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "create-user", "-a", usr.access, "-s", usr.secret, "-r", usr.role)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -778,7 +778,7 @@ func createUsers(s *S3Conf, users []user) error {
|
||||
}
|
||||
|
||||
func deleteUser(s *S3Conf, access string) error {
|
||||
out, err := execCommand("admin", "-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "delete-user", "-a", access)
|
||||
out, err := execCommand(s.getAdminCommand("-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "delete-user", "-a", access)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -791,7 +791,7 @@ func deleteUser(s *S3Conf, access string) error {
|
||||
|
||||
func changeBucketsOwner(s *S3Conf, buckets []string, owner string) error {
|
||||
for _, bucket := range buckets {
|
||||
out, err := execCommand("admin", "-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "change-bucket-owner", "-b", bucket, "-o", owner)
|
||||
out, err := execCommand(s.getAdminCommand("-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "change-bucket-owner", "-b", bucket, "-o", owner)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -804,7 +804,7 @@ func changeBucketsOwner(s *S3Conf, buckets []string, owner string) error {
|
||||
}
|
||||
|
||||
func listBuckets(s *S3Conf) error {
|
||||
out, err := execCommand("admin", "-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "list-buckets")
|
||||
out, err := execCommand(s.getAdminCommand("-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "list-buckets")...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ log_mask() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "$log_level" "${masked_args[*]}"
|
||||
log_message "$log_level" "$masked_data"
|
||||
}
|
||||
|
||||
mask_args() {
|
||||
@@ -63,23 +63,38 @@ mask_args() {
|
||||
echo "'mask_args' requires string"
|
||||
return 1
|
||||
fi
|
||||
IFS=' ' read -r -a array <<< "$1"
|
||||
unmasked_array=()
|
||||
masked_data=""
|
||||
while IFS= read -r line; do
|
||||
unmasked_array+=("$line")
|
||||
done <<< "$1"
|
||||
|
||||
if ! mask_arg_array "${array[@]}"; then
|
||||
echo "error masking arg array"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2068
|
||||
first_line=true
|
||||
for line in "${unmasked_array[@]}"; do
|
||||
if ! mask_arg_array "$line"; then
|
||||
echo "error masking arg array"
|
||||
return 1
|
||||
fi
|
||||
if [ "$first_line" == "true" ]; then
|
||||
masked_data="${masked_args[*]}"
|
||||
first_line="false"
|
||||
else
|
||||
masked_data+=$(printf "\n%s" "${masked_args[*]}")
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
mask_arg_array() {
|
||||
masked_args=() # Initialize an array to hold the masked arguments
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "'mask_arg_array' requires parameters"
|
||||
return 1
|
||||
fi
|
||||
mask_next=false
|
||||
is_access=false
|
||||
for arg in "$@"; do
|
||||
masked_args=() # Initialize an array to hold the masked arguments
|
||||
# shellcheck disable=SC2068
|
||||
for arg in $@; do
|
||||
if ! check_arg_for_mask "$arg"; then
|
||||
echo "error checking arg for mask"
|
||||
return 1
|
||||
@@ -135,4 +150,5 @@ log_message() {
|
||||
if [[ -n "$TEST_LOG_FILE" ]]; then
|
||||
echo "$now $1 $2" >> "$TEST_LOG_FILE.tmp"
|
||||
fi
|
||||
sync
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_object.sh
|
||||
|
||||
delete_bucket_if_exists() {
|
||||
if [[ $# -ne 2 ]]; then
|
||||
@@ -28,26 +28,23 @@ delete_bucket_if_exists() {
|
||||
return 1
|
||||
fi
|
||||
if [[ $exists_result -eq 1 ]]; then
|
||||
log 5 "bucket '$2' doesn't exist, skipping"
|
||||
echo "bucket '$2' doesn't exist, skipping"
|
||||
return 0
|
||||
fi
|
||||
if ! delete_bucket_recursive "$1" "$2"; then
|
||||
log 2 "error deleting bucket"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket '$2' successfully deleted"
|
||||
echo "bucket '$2' successfully deleted"
|
||||
return 0
|
||||
}
|
||||
|
||||
if ! setup; then
|
||||
log 2 "error starting versity to set up static buckets"
|
||||
exit 1
|
||||
fi
|
||||
if ! delete_bucket_if_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
base_setup
|
||||
if ! RECREATE_BUCKETS=true delete_bucket_if_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error deleting static bucket one"
|
||||
elif ! delete_bucket_if_exists "s3api" "$BUCKET_TWO_NAME"; then
|
||||
elif ! RECREATE_BUCKETS=true delete_bucket_if_exists "s3api" "$BUCKET_TWO_NAME"; then
|
||||
log 2 "error deleting static bucket two"
|
||||
fi
|
||||
if ! teardown; then
|
||||
if ! stop_versity; then
|
||||
log 2 "error stopping versity"
|
||||
fi
|
||||
50
tests/rest_scripts/copy_object.sh
Executable file
50
tests/rest_scripts/copy_object.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
# shellcheck disable=SC2153
|
||||
key="$OBJECT_KEY"
|
||||
# shellcheck disable=SC2153
|
||||
copy_source="$COPY_SOURCE"
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
canonical_request="PUT
|
||||
/$bucket_name/$key
|
||||
|
||||
host:$host
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-copy-source:$copy_source
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-copy-source;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" -X PUT "$AWS_ENDPOINT_URL/$bucket_name/$key")
|
||||
curl_command+=(-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-copy-source;x-amz-date,Signature=$signature\"")
|
||||
curl_command+=(-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-copy-source: $copy_source\""
|
||||
-H "\"x-amz-date: $current_date_time\"")
|
||||
curl_command+=(-o "$OUTPUT_FILE")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
47
tests/rest_scripts/delete_object_tagging.sh
Executable file
47
tests/rest_scripts/delete_object_tagging.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
# shellcheck disable=SC2153
|
||||
key="$OBJECT_KEY"
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
canonical_request="DELETE
|
||||
/$bucket_name/$key
|
||||
tagging=
|
||||
host:$host
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" -X DELETE "$AWS_ENDPOINT_URL/$bucket_name/$key?tagging"
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-o "$OUTPUT_FILE")
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
70
tests/rest_scripts/delete_objects.sh
Executable file
70
tests/rest_scripts/delete_objects.sh
Executable file
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153,SC2154
|
||||
payload="$PAYLOAD"
|
||||
# shellcheck disable=SC2153,SC2154
|
||||
bucket_name="$BUCKET_NAME"
|
||||
has_content_md5="${HAS_CONTENT_MD5:="true"}"
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
payload_hash="$(echo -n "$payload" | sha256sum | awk '{print $1}')"
|
||||
if [ "$has_content_md5" == "true" ]; then
|
||||
content_md5=$(echo -n "$payload" | openssl dgst -binary -md5 | openssl base64)
|
||||
fi
|
||||
|
||||
canonical_request="POST
|
||||
/$bucket_name
|
||||
delete=
|
||||
"
|
||||
if [ "$has_content_md5" == "true" ]; then
|
||||
canonical_request+="content-md5:$content_md5
|
||||
"
|
||||
fi
|
||||
canonical_request+="host:$host
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
"
|
||||
if [ "$has_content_md5" == "true" ]; then
|
||||
canonical_request+="content-md5;"
|
||||
fi
|
||||
canonical_request+="host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" -X POST "$AWS_ENDPOINT_URL/$bucket_name?delete")
|
||||
signed_headers=""
|
||||
if [ "$has_content_md5" == "true" ]; then
|
||||
signed_headers+="content-md5;"
|
||||
fi
|
||||
signed_headers+="host;x-amz-content-sha256;x-amz-date"
|
||||
curl_command+=(-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=$signed_headers,Signature=$signature\"")
|
||||
curl_command+=(-H "\"Content-Type: application/xml\"")
|
||||
if [ "$has_content_md5" == "true" ]; then
|
||||
curl_command+=(-H "\"content-md5: $content_md5\"")
|
||||
fi
|
||||
curl_command+=(-H "\"x-amz-content-sha256: $payload_hash\""
|
||||
-H "\"x-amz-date: $current_date_time\"")
|
||||
curl_command+=(-o "$OUTPUT_FILE")
|
||||
curl_command+=(-d "\"${payload//\"/\\\"}\"")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
47
tests/rest_scripts/get_object.sh
Executable file
47
tests/rest_scripts/get_object.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
key="$OBJECT_KEY"
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
#x-amz-object-attributes:ETag
|
||||
canonical_request="GET
|
||||
/$bucket_name/$key
|
||||
|
||||
host:$host
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" "$AWS_ENDPOINT_URL/$bucket_name/$key"
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-o "$OUTPUT_FILE")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
44
tests/rest_scripts/get_public_access_block.sh
Executable file
44
tests/rest_scripts/get_public_access_block.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
canonical_request="GET
|
||||
/$bucket_name
|
||||
publicAccessBlock=
|
||||
host:$host
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" "$AWS_ENDPOINT_URL/$bucket_name?publicAccessBlock="
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-o "$OUTPUT_FILE")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
47
tests/rest_scripts/head_object.sh
Executable file
47
tests/rest_scripts/head_object.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
# shellcheck disable=SC2154
|
||||
key="$OBJECT_KEY"
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
#x-amz-object-attributes:ETag
|
||||
canonical_request="HEAD
|
||||
/$bucket_name/$key
|
||||
|
||||
host:$host
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ksI -w "\"%{http_code}\"" "$AWS_ENDPOINT_URL/$bucket_name/$key"
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-o "$OUTPUT_FILE")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
45
tests/rest_scripts/list_object_versions.sh
Executable file
45
tests/rest_scripts/list_object_versions.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
canonical_request="GET
|
||||
/$bucket_name
|
||||
versions=
|
||||
host:$host
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" "https://$host/$bucket_name?versions"
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-o "$OUTPUT_FILE")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
74
tests/rest_scripts/list_objects.sh
Executable file
74
tests/rest_scripts/list_objects.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
version_two="${VERSION_TWO:-FALSE}"
|
||||
max_keys="${MAX_KEYS:-0}"
|
||||
# shellcheck disable=SC2153
|
||||
marker="$MARKER"
|
||||
# shellcheck disable=SC2153
|
||||
if [ "$CONTINUATION_TOKEN" != "" ]; then
|
||||
continuation_token=$(jq -rn --arg token "$CONTINUATION_TOKEN" '$token | @uri')
|
||||
fi
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
#x-amz-object-attributes:ETag
|
||||
canonical_request="GET
|
||||
/$bucket_name
|
||||
"
|
||||
|
||||
queries=""
|
||||
if [ "$MARKER" != "" ]; then
|
||||
queries=$(add_parameter "$queries" "marker=$marker")
|
||||
fi
|
||||
if [ "$CONTINUATION_TOKEN" != "" ]; then
|
||||
queries=$(add_parameter "$queries" "continuation-token=$continuation_token")
|
||||
fi
|
||||
if [ "$version_two" != "FALSE" ]; then
|
||||
queries=$(add_parameter "$queries" "list-type=2")
|
||||
fi
|
||||
if [ "$max_keys" -ne 0 ]; then
|
||||
queries=$(add_parameter "$queries" "max-keys=$max_keys")
|
||||
fi
|
||||
|
||||
canonical_request+="
|
||||
host:$host
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"")
|
||||
url="'$AWS_ENDPOINT_URL/$bucket_name"
|
||||
if [ "$queries" != "" ]; then
|
||||
url+="?$queries"
|
||||
fi
|
||||
url+="'"
|
||||
curl_command+=("$url")
|
||||
curl_command+=(-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-o "$OUTPUT_FILE")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
77
tests/rest_scripts/put_bucket_acl.sh
Executable file
77
tests/rest_scripts/put_bucket_acl.sh
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
# shellcheck disable=SC2153
|
||||
acl_file="$ACL_FILE"
|
||||
# shellcheck disable=SC2153
|
||||
canned_acl="$CANNED_ACL"
|
||||
|
||||
if [ -n "$ACL_FILE" ]; then
|
||||
payload="$(cat "$acl_file")"
|
||||
else
|
||||
payload=""
|
||||
fi
|
||||
|
||||
payload_hash="$(echo -n "$payload" | sha256sum | awk '{print $1}')"
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
canonical_request="PUT
|
||||
/$bucket_name
|
||||
acl=
|
||||
host:$host
|
||||
"
|
||||
if [ -n "$CANNED_ACL" ]; then
|
||||
canonical_request+="x-amz-acl:$canned_acl
|
||||
"
|
||||
fi
|
||||
canonical_request+="x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
"
|
||||
canonical_request+="host;"
|
||||
if [ -n "$CANNED_ACL" ]; then
|
||||
canonical_request+="x-amz-acl;"
|
||||
fi
|
||||
canonical_request+="x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" -X PUT "$AWS_ENDPOINT_URL/$bucket_name?acl=")
|
||||
if [ -n "$CANNED_ACL" ]; then
|
||||
acl_header="x-amz-acl;"
|
||||
else
|
||||
acl_header=""
|
||||
fi
|
||||
curl_command+=(-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;${acl_header}x-amz-content-sha256;x-amz-date,Signature=$signature\"")
|
||||
if [ -n "$CANNED_ACL" ]; then
|
||||
curl_command+=(-H "\"x-amz-acl: $canned_acl\"")
|
||||
fi
|
||||
curl_command+=(-H "\"x-amz-content-sha256: $payload_hash\""
|
||||
-H "\"x-amz-date: $current_date_time\"")
|
||||
if [ -n "$ACL_FILE" ]; then
|
||||
curl_command+=(-d "\"${payload//\"/\\\"}\"")
|
||||
fi
|
||||
curl_command+=(-o "$OUTPUT_FILE")
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
58
tests/rest_scripts/put_public_access_block.sh
Executable file
58
tests/rest_scripts/put_public_access_block.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# Fields
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
block_public_acls="${BLOCK_PUBLIC_ACLS:-TRUE}"
|
||||
ignore_public_acls="${IGNORE_PUBLIC_ACLS:-TRUE}"
|
||||
block_public_policy="${BLOCK_PUBLIC_POLICY:-TRUE}"
|
||||
restrict_public_buckets="${RESTRICT_PUBLIC_BUCKETS:-TRUE}"
|
||||
|
||||
payload="<?xml version=\"1.0\" encoding=\"UTF-8\"?>
|
||||
<PublicAccessBlockConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">
|
||||
<BlockPublicAcls>$block_public_acls</BlockPublicAcls>
|
||||
<IgnorePublicAcls>$ignore_public_acls</IgnorePublicAcls>
|
||||
<BlockPublicPolicy>$block_public_policy</BlockPublicPolicy>
|
||||
<RestrictPublicBuckets>$restrict_public_buckets</RestrictPublicBuckets>
|
||||
</PublicAccessBlockConfiguration>"
|
||||
|
||||
payload_hash="$(echo -n "$payload" | sha256sum | awk '{print $1}')"
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
|
||||
canonical_request="PUT
|
||||
/$bucket_name
|
||||
publicAccessBlock=
|
||||
host:$host
|
||||
x-amz-content-sha256:$payload_hash
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-date
|
||||
$payload_hash"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" -X PUT "$AWS_ENDPOINT_URL/$bucket_name?publicAccessBlock="
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: $payload_hash\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-d "\"${payload//\"/\\\"}\""
|
||||
-o "$OUTPUT_FILE")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
@@ -65,3 +65,15 @@ $canonical_request_hash"
|
||||
curl_command=()
|
||||
add_command_recording_if_enabled
|
||||
}
|
||||
|
||||
add_parameter() {
|
||||
if [ "$#" -ne 2 ]; then
|
||||
return
|
||||
fi
|
||||
current_string="$1"
|
||||
if [ "$current_string" != "" ]; then
|
||||
current_string+="&"
|
||||
fi
|
||||
current_string+="$2"
|
||||
echo "$current_string"
|
||||
}
|
||||
|
||||
@@ -46,8 +46,6 @@ $payload_hash"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
sleep 5
|
||||
|
||||
curl_command+=(curl -isk -w "\"%{http_code}\"" "\"$AWS_ENDPOINT_URL/$bucket_name/$key?partNumber=$part_number&uploadId=$upload_id\""
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: $payload_hash\""
|
||||
|
||||
55
tests/rest_scripts/upload_part_copy.sh
Executable file
55
tests/rest_scripts/upload_part_copy.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/rest_scripts/rest.sh
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
bucket_name="$BUCKET_NAME"
|
||||
# shellcheck disable=SC2153
|
||||
key="$OBJECT_KEY"
|
||||
# shellcheck disable=SC2153
|
||||
part_number="$PART_NUMBER"
|
||||
# shellcheck disable=SC2153
|
||||
upload_id="$UPLOAD_ID"
|
||||
# shellcheck disable=SC2153
|
||||
part_location=$PART_LOCATION
|
||||
|
||||
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
|
||||
# shellcheck disable=SC2034
|
||||
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
|
||||
# shellcheck disable=SC2154
|
||||
canonical_request="PUT
|
||||
/$bucket_name/$key
|
||||
partNumber=$part_number&uploadId=$upload_id
|
||||
host:$aws_endpoint_url_address
|
||||
x-amz-content-sha256:UNSIGNED-PAYLOAD
|
||||
x-amz-copy-source:$part_location
|
||||
x-amz-date:$current_date_time
|
||||
|
||||
host;x-amz-content-sha256;x-amz-copy-source;x-amz-date
|
||||
UNSIGNED-PAYLOAD"
|
||||
|
||||
create_canonical_hash_sts_and_signature
|
||||
|
||||
curl_command+=(curl -ks -w "\"%{http_code}\"" -X PUT "\"$AWS_ENDPOINT_URL/$bucket_name/$key?partNumber=$part_number&uploadId=$upload_id\""
|
||||
-H "\"Authorization: AWS4-HMAC-SHA256 Credential=$aws_access_key_id/$year_month_day/$aws_region/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-copy-source;x-amz-date,Signature=$signature\""
|
||||
-H "\"x-amz-content-sha256: UNSIGNED-PAYLOAD\""
|
||||
-H "\"x-amz-copy-source: $part_location\""
|
||||
-H "\"x-amz-date: $current_date_time\""
|
||||
-o "\"$OUTPUT_FILE\"")
|
||||
# shellcheck disable=SC2154
|
||||
eval "${curl_command[*]}" 2>&1
|
||||
@@ -141,7 +141,11 @@ run_suite() {
|
||||
;;
|
||||
rest)
|
||||
echo "Running rest tests ..."
|
||||
"$HOME"/bin/bats ./tests/test_rest.sh || exit_code=$?
|
||||
if "$HOME"/bin/bats ./tests/test_rest.sh; then
|
||||
"$HOME"/bin/bats ./tests/test_rest_acl.sh || exit_code=$?
|
||||
else
|
||||
exit_code=1
|
||||
fi
|
||||
;;
|
||||
s3api-user)
|
||||
echo "Running s3api user tests ..."
|
||||
|
||||
@@ -14,13 +14,10 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/env.sh
|
||||
source ./tests/report.sh
|
||||
source ./tests/setup_mc.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/versity.sh
|
||||
|
||||
# bats setup function
|
||||
@@ -53,6 +50,7 @@ setup() {
|
||||
fi
|
||||
|
||||
export AWS_PROFILE
|
||||
log 4 "********** END SETUP **********"
|
||||
}
|
||||
|
||||
delete_temp_log_if_exists() {
|
||||
@@ -68,11 +66,15 @@ delete_temp_log_if_exists() {
|
||||
# bats teardown function
|
||||
teardown() {
|
||||
# shellcheck disable=SC2154
|
||||
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_ONE_NAME or contents"
|
||||
fi
|
||||
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_TWO_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_TWO_NAME or contents"
|
||||
log 4 "********** BEGIN TEARDOWN **********"
|
||||
if [ "$DELETE_BUCKETS_AFTER_TEST" != "false" ]; then
|
||||
log 5 "deleting or clearing buckets"
|
||||
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_ONE_NAME or contents"
|
||||
fi
|
||||
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_TWO_NAME"; then
|
||||
log 3 "error deleting bucket $BUCKET_TWO_NAME or contents"
|
||||
fi
|
||||
fi
|
||||
if user_exists "$USERNAME_ONE" && ! delete_user "$USERNAME_ONE"; then
|
||||
log 3 "error deleting user $USERNAME_ONE"
|
||||
@@ -86,6 +88,18 @@ teardown() {
|
||||
log 3 "unable to remove test file folder: $error"
|
||||
fi
|
||||
fi
|
||||
stop_versity
|
||||
if [[ $LOG_LEVEL -ge 5 ]] || [[ -n "$TIME_LOG" ]]; then
|
||||
end_time=$(date +%s)
|
||||
total_time=$((end_time - start_time))
|
||||
log 4 "Total test time: $total_time"
|
||||
if [[ -n "$TIME_LOG" ]]; then
|
||||
echo "$BATS_TEST_NAME: ${total_time}s" >> "$TIME_LOG"
|
||||
fi
|
||||
fi
|
||||
if [[ -n "$COVERAGE_DB" ]]; then
|
||||
record_result
|
||||
fi
|
||||
if [[ "$BATS_TEST_COMPLETED" -ne 1 ]]; then
|
||||
if [[ -e "$COMMAND_LOG" ]]; then
|
||||
cat "$COMMAND_LOG"
|
||||
@@ -108,16 +122,4 @@ teardown() {
|
||||
log 2 "error deleting temp log"
|
||||
fi
|
||||
fi
|
||||
stop_versity
|
||||
if [[ $LOG_LEVEL -ge 5 ]] || [[ -n "$TIME_LOG" ]]; then
|
||||
end_time=$(date +%s)
|
||||
total_time=$((end_time - start_time))
|
||||
log 4 "Total test time: $total_time"
|
||||
if [[ -n "$TIME_LOG" ]]; then
|
||||
echo "$BATS_TEST_NAME: ${total_time}s" >> "$TIME_LOG"
|
||||
fi
|
||||
fi
|
||||
if [[ -n "$COVERAGE_DB" ]]; then
|
||||
record_result
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
source ./tests/env.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/commands/create_bucket.sh
|
||||
|
||||
create_bucket_if_not_exists() {
|
||||
@@ -29,14 +29,14 @@ create_bucket_if_not_exists() {
|
||||
return 1
|
||||
fi
|
||||
if [[ $exists_result -eq 0 ]]; then
|
||||
log 5 "bucket '$2' already exists, skipping"
|
||||
echo "bucket '$2' already exists, skipping"
|
||||
return 0
|
||||
fi
|
||||
if ! create_bucket_object_lock_enabled "$2"; then
|
||||
log 2 "error creating bucket"
|
||||
return 1
|
||||
fi
|
||||
log 5 "bucket '$2' successfully created"
|
||||
echo "bucket '$2' successfully created"
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util.sh
|
||||
|
||||
if ! base_setup; then
|
||||
log 2 "error starting versity to set up static buckets"
|
||||
exit 1
|
||||
fi
|
||||
if ! delete_bucket_recursive "s3" "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error creating static bucket one"
|
||||
elif ! delete_bucket_recursive "s3" "$BUCKET_TWO_NAME"; then
|
||||
log 2 "error creating static bucket two"
|
||||
fi
|
||||
log 4 "buckets deleted successfully"
|
||||
if ! stop_versity; then
|
||||
log 2 "error stopping versity"
|
||||
fi
|
||||
@@ -15,11 +15,11 @@
|
||||
# under the License.
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_acl.sh
|
||||
source ./tests/util/util_bucket_location.sh
|
||||
source ./tests/util/util_file.sh
|
||||
source ./tests/util/util_list_buckets.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/util/util_policy.sh
|
||||
source ./tests/util/util_presigned_url.sh
|
||||
source ./tests/commands/copy_object.sh
|
||||
@@ -303,7 +303,8 @@ test_common_presigned_url_utf8_chars() {
|
||||
|
||||
run create_test_file "$bucket_file"
|
||||
assert_success
|
||||
dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1 || fail "error creating test file"
|
||||
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
|
||||
assert_success
|
||||
|
||||
run setup_bucket "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
@@ -52,8 +52,7 @@ test_common_put_bucket_acl() {
|
||||
run put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred"
|
||||
assert_success
|
||||
|
||||
username=$USERNAME_ONE
|
||||
run setup_user "$username" "HIJKLMN" "user"
|
||||
run setup_user "$USERNAME_ONE" "$PASSWORD_ONE" "user"
|
||||
assert_success
|
||||
|
||||
run get_check_acl_id "$1" "$BUCKET_ONE_NAME"
|
||||
@@ -64,47 +63,24 @@ test_common_put_bucket_acl() {
|
||||
assert_success
|
||||
|
||||
if [[ $DIRECT == "true" ]]; then
|
||||
grantee="{\"Type\": \"Group\", \"URI\": \"http://acs.amazonaws.com/groups/global/AllUsers\"}"
|
||||
grantee_type="Group"
|
||||
grantee_id="http://acs.amazonaws.com/groups/global/AllUsers"
|
||||
else
|
||||
grantee="{\"ID\": \"$username\", \"Type\": \"CanonicalUser\"}"
|
||||
grantee_type="CanonicalUser"
|
||||
grantee_id="$USERNAME_ONE"
|
||||
fi
|
||||
run setup_acl_json "$TEST_FILE_FOLDER/$acl_file" "$grantee_type" "$grantee_id" "READ" "$AWS_ACCESS_KEY_ID"
|
||||
assert_success
|
||||
|
||||
cat <<EOF > "$TEST_FILE_FOLDER"/"$acl_file"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
"Grantee": $grantee,
|
||||
"Permission": "READ"
|
||||
}
|
||||
],
|
||||
"Owner": {
|
||||
"ID": "$AWS_ACCESS_KEY_ID"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
log 5 "acl: $(cat "$TEST_FILE_FOLDER/$acl_file")"
|
||||
run put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER"/"$acl_file"
|
||||
assert_success
|
||||
|
||||
run get_check_acl_after_first_put "$1" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
cat <<EOF > "$TEST_FILE_FOLDER"/"$acl_file"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
"Grantee": {
|
||||
"ID": "$username",
|
||||
"Type": "CanonicalUser"
|
||||
},
|
||||
"Permission": "FULL_CONTROL"
|
||||
}
|
||||
],
|
||||
"Owner": {
|
||||
"ID": "$AWS_ACCESS_KEY_ID"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
run setup_acl_json "$TEST_FILE_FOLDER/$acl_file" "CanonicalUser" "$USERNAME_ONE" "FULL_CONTROL" "$AWS_ACCESS_KEY_ID"
|
||||
assert_success
|
||||
|
||||
run put_bucket_acl_s3api "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER"/"$acl_file"
|
||||
assert_success
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util_create_bucket.sh
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/test_common.sh
|
||||
|
||||
export RUN_MC=true
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/commands/create_multipart_upload.sh
|
||||
source ./tests/commands/delete_object_tagging.sh
|
||||
source ./tests/commands/get_bucket_versioning.sh
|
||||
@@ -28,20 +31,25 @@ source ./tests/commands/put_object_retention.sh
|
||||
source ./tests/commands/put_object_tagging.sh
|
||||
source ./tests/logger.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_acl.sh
|
||||
source ./tests/util/util_attributes.sh
|
||||
source ./tests/util/util_delete_object.sh
|
||||
source ./tests/util/util_head_object.sh
|
||||
source ./tests/util/util_legal_hold.sh
|
||||
source ./tests/util/util_list_buckets.sh
|
||||
source ./tests/util/util_list_objects.sh
|
||||
source ./tests/util/util_list_parts.sh
|
||||
source ./tests/util/util_lock_config.sh
|
||||
source ./tests/util/util_multipart_before_completion.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/util/util_ownership.sh
|
||||
source ./tests/util/util_policy.sh
|
||||
source ./tests/util/util_public_access_block.sh
|
||||
source ./tests/util/util_rest.sh
|
||||
source ./tests/util/util_tags.sh
|
||||
source ./tests/util/util_time.sh
|
||||
source ./tests/util/util_versioning.sh
|
||||
source ./tests/util/util_xml.sh
|
||||
|
||||
export RUN_USERS=true
|
||||
|
||||
@@ -244,9 +252,6 @@ export RUN_USERS=true
|
||||
}
|
||||
|
||||
@test "versioning - retrieve after delete" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/888"
|
||||
fi
|
||||
test_file="test_file"
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
@@ -269,9 +274,6 @@ export RUN_USERS=true
|
||||
}
|
||||
|
||||
@test "REST - legal hold, get without config" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/883"
|
||||
fi
|
||||
test_file="test_file"
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
@@ -322,7 +324,7 @@ export RUN_USERS=true
|
||||
|
||||
@test "REST - get object attributes" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/916"
|
||||
skip "https://github.com/versity/versitygw/issues/1000"
|
||||
fi
|
||||
test_file="test_file"
|
||||
|
||||
@@ -344,7 +346,7 @@ export RUN_USERS=true
|
||||
|
||||
@test "REST - attributes - invalid param" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/917"
|
||||
skip "https://github.com/versity/versitygw/issues/1001"
|
||||
fi
|
||||
test_file="test_file"
|
||||
|
||||
@@ -363,7 +365,7 @@ export RUN_USERS=true
|
||||
|
||||
@test "REST - attributes - checksum" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/928"
|
||||
skip "https://github.com/versity/versitygw/issues/1006"
|
||||
fi
|
||||
test_file="test_file"
|
||||
|
||||
@@ -386,9 +388,6 @@ export RUN_USERS=true
|
||||
}
|
||||
|
||||
@test "REST - bucket tagging - tags" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/932"
|
||||
fi
|
||||
test_key="testKey"
|
||||
test_value="testValue"
|
||||
|
||||
@@ -443,13 +442,157 @@ export RUN_USERS=true
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - get ACL" {
|
||||
@test "REST - list objects v2 - invalid continuation token" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/971"
|
||||
skip "https://github.com/versity/versitygw/issues/993"
|
||||
fi
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run get_and_check_acl_rest "$BUCKET_ONE_NAME"
|
||||
test_file="test_file"
|
||||
test_file_two="test_file_2"
|
||||
test_file_three="test_file_3"
|
||||
run create_test_files "$test_file" "$test_file_two" "$test_file_three"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file_two" "$BUCKET_ONE_NAME" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file_three" "$BUCKET_ONE_NAME" "$test_file_three"
|
||||
assert_success
|
||||
|
||||
run list_objects_check_params_get_token "$BUCKET_ONE_NAME" "$test_file" "$test_file_two" "TRUE"
|
||||
assert_success
|
||||
continuation_token=$output
|
||||
|
||||
# interestingly, AWS appears to accept continuation tokens that are a few characters off, so have to remove three chars
|
||||
run list_objects_check_continuation_error "$BUCKET_ONE_NAME" "${continuation_token:0:${#continuation_token}-3}"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - list objects v1 - no NextMarker without delimiter" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/999"
|
||||
fi
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
test_file_two="test_file_2"
|
||||
run create_test_files "$test_file" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file_two" "$BUCKET_ONE_NAME" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
run list_objects_v1_check_nextmarker_empty "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - complete upload - invalid part" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/1008"
|
||||
fi
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
run create_large_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run create_upload_finish_wrong_etag "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - upload part copy" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
run create_large_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run create_upload_part_copy_rest "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file"
|
||||
assert_success
|
||||
|
||||
run download_and_compare_file "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$TEST_FILE_FOLDER/$test_file-copy"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - head object" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/1018"
|
||||
fi
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
run create_test_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
|
||||
run get_etag_rest "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
expected_etag=$output
|
||||
|
||||
run get_etag_attribute_rest "$BUCKET_ONE_NAME" "$test_file" "$expected_etag"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - POST call on root endpoint" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/1036"
|
||||
fi
|
||||
run delete_object_empty_bucket_check_error
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - delete objects - no content-md5 header" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/1040"
|
||||
fi
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run delete_objects_no_content_md5_header "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - delete objects command" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file_one="test_file"
|
||||
test_file_two="test_file_two"
|
||||
run create_test_files "$test_file_one" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file_one" "$BUCKET_ONE_NAME" "$test_file_one"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file_two" "$BUCKET_ONE_NAME" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
run verify_object_exists "$BUCKET_ONE_NAME" "$test_file_one"
|
||||
assert_success
|
||||
|
||||
run verify_object_exists "$BUCKET_ONE_NAME" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
run delete_objects_verify_success "$BUCKET_ONE_NAME" "$test_file_one" "$test_file_two"
|
||||
assert_success
|
||||
|
||||
run verify_object_not_found "$BUCKET_ONE_NAME" "$test_file_one"
|
||||
assert_success
|
||||
|
||||
run verify_object_not_found "$BUCKET_ONE_NAME" "$test_file_two"
|
||||
assert_success
|
||||
}
|
||||
167
tests/test_rest_acl.sh
Executable file
167
tests/test_rest_acl.sh
Executable file
@@ -0,0 +1,167 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/commands/put_object.sh
|
||||
source ./tests/logger.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util_acl.sh
|
||||
source ./tests/util/util_object.sh
|
||||
|
||||
export RUN_USERS=true
|
||||
|
||||
@test "REST - get ACL" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run get_and_check_acl_rest "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - put ACL" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
run create_test_files "$test_file"
|
||||
assert_success
|
||||
|
||||
run put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
|
||||
run create_versitygw_acl_user_or_get_direct_user "$USERNAME_ONE" "$PASSWORD_ONE"
|
||||
assert_success
|
||||
canonical_id=${lines[0]}
|
||||
user_canonical_id=${lines[1]}
|
||||
username=${lines[2]}
|
||||
password=${lines[3]}
|
||||
|
||||
run setup_acl "$TEST_FILE_FOLDER/acl-file.txt" "$user_canonical_id" "READ" "$canonical_id"
|
||||
assert_success
|
||||
|
||||
run list_objects_with_user_rest_verify_access_denied "$BUCKET_ONE_NAME" "$username" "$password"
|
||||
assert_success
|
||||
|
||||
run put_acl_rest "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/acl-file.txt"
|
||||
assert_success
|
||||
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
run list_objects_with_user_rest_verify_success "$BUCKET_ONE_NAME" "$username" "$password" "$test_file"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - put public-read canned acl" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
run create_test_files "$test_file"
|
||||
assert_success
|
||||
|
||||
run put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
|
||||
assert_success
|
||||
|
||||
run create_versitygw_acl_user_or_get_direct_user "$USERNAME_ONE" "$PASSWORD_ONE"
|
||||
assert_success
|
||||
canonical_id=${lines[0]}
|
||||
user_canonical_id=${lines[1]}
|
||||
username=${lines[2]}
|
||||
password=${lines[3]}
|
||||
|
||||
run list_objects_with_user_rest_verify_access_denied "$BUCKET_ONE_NAME" "$username" "$password"
|
||||
assert_success
|
||||
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
run allow_public_access "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
fi
|
||||
run put_canned_acl_rest "$BUCKET_ONE_NAME" "public-read"
|
||||
assert_success
|
||||
|
||||
run list_objects_with_user_rest_verify_success "$BUCKET_ONE_NAME" "$username" "$password" "$test_file"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - put invalid ACL" {
|
||||
if [ "$DIRECT" != "true" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/986"
|
||||
fi
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred"
|
||||
assert_success
|
||||
|
||||
run create_versitygw_acl_user_or_get_direct_user "$USERNAME_ONE" "$PASSWORD_ONE"
|
||||
assert_success
|
||||
canonical_id=${lines[0]}
|
||||
user_canonical_id=${lines[1]}
|
||||
username=${lines[2]}
|
||||
password=${lines[3]}
|
||||
|
||||
run setup_acl "$TEST_FILE_FOLDER/acl-file.txt" "$user_canonical_id" "READD" "$canonical_id"
|
||||
assert_success
|
||||
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
run allow_public_access "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
fi
|
||||
run put_invalid_acl_rest_verify_failure "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/acl-file.txt"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "REST - put public-read-write canned acl" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
test_file="test_file"
|
||||
run create_test_files "$test_file"
|
||||
assert_success
|
||||
|
||||
run put_bucket_ownership_controls "$BUCKET_ONE_NAME" "BucketOwnerPreferred"
|
||||
assert_success
|
||||
|
||||
run create_versitygw_acl_user_or_get_direct_user "$USERNAME_ONE" "$PASSWORD_ONE"
|
||||
assert_success
|
||||
canonical_id=${lines[0]}
|
||||
user_canonical_id=${lines[1]}
|
||||
username=${lines[2]}
|
||||
password=${lines[3]}
|
||||
|
||||
run put_object_with_user "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password"
|
||||
assert_failure
|
||||
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
run allow_public_access "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
fi
|
||||
run put_canned_acl_rest "$BUCKET_ONE_NAME" "public-read-write"
|
||||
assert_success
|
||||
|
||||
run put_object_with_user "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file" "$username" "$password"
|
||||
assert_success
|
||||
}
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/util/util_file.sh
|
||||
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/util/util_file.sh
|
||||
|
||||
|
||||
@@ -14,12 +14,15 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_create_bucket.sh
|
||||
source ./tests/util/util_file.sh
|
||||
source ./tests/util/util_head_bucket.sh
|
||||
source ./tests/util/util_lock_config.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/util/util_tags.sh
|
||||
source ./tests/util/util_users.sh
|
||||
source ./tests/test_s3api_root_inner.sh
|
||||
|
||||
@@ -14,11 +14,15 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/test_s3api_root_inner.sh
|
||||
source ./tests/util/util_file.sh
|
||||
source ./tests/util/util_multipart.sh
|
||||
source ./tests/util/util_multipart_abort.sh
|
||||
source ./tests/util/util_multipart_before_completion.sh
|
||||
source ./tests/util/util_tags.sh
|
||||
source ./tests/commands/get_object.sh
|
||||
source ./tests/commands/put_object.sh
|
||||
@@ -75,28 +79,9 @@ source ./tests/commands/list_multipart_uploads.sh
|
||||
local expected_tag_key="TestTag"
|
||||
local expected_tag_val="TestTagVal"
|
||||
|
||||
os_name="$(uname)"
|
||||
if [[ "$os_name" == "Darwin" ]]; then
|
||||
now=$(date -u +"%Y-%m-%dT%H:%M:%S")
|
||||
later=$(date -j -v +15S -f "%Y-%m-%dT%H:%M:%S" "$now" +"%Y-%m-%dT%H:%M:%S")
|
||||
else
|
||||
now=$(date +"%Y-%m-%dT%H:%M:%S")
|
||||
later=$(date -d "$now 15 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
|
||||
run create_test_files "$bucket_file"
|
||||
run setup_multipart_upload_with_params "$BUCKET_ONE_NAME" "$bucket_file"
|
||||
assert_success
|
||||
|
||||
run dd if=/dev/urandom of="$TEST_FILE_FOLDER/$bucket_file" bs=5M count=1
|
||||
assert_success
|
||||
|
||||
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
# in static bucket config, bucket will still exist
|
||||
if ! bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
run create_bucket_object_lock_enabled "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
fi
|
||||
later=${lines[${#lines[@]}-1]}
|
||||
|
||||
run multipart_upload_with_params "$BUCKET_ONE_NAME" "$bucket_file" "$TEST_FILE_FOLDER"/"$bucket_file" 4 \
|
||||
"$expected_content_type" \
|
||||
|
||||
@@ -14,11 +14,14 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_create_bucket.sh
|
||||
source ./tests/util/util_file.sh
|
||||
source ./tests/util/util_lock_config.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/util/util_tags.sh
|
||||
source ./tests/util/util_users.sh
|
||||
source ./tests/test_s3api_root_inner.sh
|
||||
@@ -69,7 +72,7 @@ export RUN_USERS=true
|
||||
# delete-objects
|
||||
@test "test_delete_objects" {
|
||||
if [ "$RECREATE_BUCKETS" == "false" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/888"
|
||||
skip "https://github.com/versity/versitygw/issues/1029"
|
||||
fi
|
||||
test_delete_objects_s3api_root
|
||||
}
|
||||
@@ -128,14 +131,14 @@ export RUN_USERS=true
|
||||
# test adding and removing an object on versitygw
|
||||
@test "test_put_object_with_data" {
|
||||
if [ "$RECREATE_BUCKETS" == "false" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/888"
|
||||
skip "https://github.com/versity/versitygw/issues/1029"
|
||||
fi
|
||||
test_common_put_object_with_data "s3api"
|
||||
}
|
||||
|
||||
@test "test_put_object_no_data" {
|
||||
if [ "$RECREATE_BUCKETS" == "false" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/888"
|
||||
skip "https://github.com/versity/versitygw/issues/1029"
|
||||
fi
|
||||
test_common_put_object_no_data "s3api"
|
||||
}
|
||||
@@ -233,3 +236,54 @@ export RUN_USERS=true
|
||||
test_common_ls_directory_object "s3api"
|
||||
}
|
||||
|
||||
@test "directory objects can't contain data" {
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
skip
|
||||
fi
|
||||
test_file="a"
|
||||
|
||||
run create_test_file "$test_file"
|
||||
assert_success
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file/"
|
||||
assert_failure
|
||||
assert_output -p "Directory object contains data payload"
|
||||
}
|
||||
|
||||
#@test "objects containing data can't be copied to directory objects" {
|
||||
# # TODO finish test after https://github.com/versity/versitygw/issues/1021
|
||||
# skip "https://github.com/versity/versitygw/issues/1021"
|
||||
# test_file="a"
|
||||
#
|
||||
# run create_test_file "$test_file" 0
|
||||
# assert_success
|
||||
#
|
||||
# run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
# assert_success
|
||||
#
|
||||
# run put_object "s3api" "$TEST_FILE_FOLDER/$test_file" "$BUCKET_ONE_NAME" "$test_file"
|
||||
# assert_success
|
||||
#
|
||||
# if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$BUCKET_ONE_NAME" OBJECT_KEY="$test_file/" COPY_SOURCE="$BUCKET_ONE_NAME/$test_file" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/copy_object.sh); then
|
||||
# log 2 "error listing multipart upload parts: $result"
|
||||
# return 1
|
||||
# fi
|
||||
# if [ "$result" != "400" ]; then
|
||||
# log 2 "response code '$result': $(cat "$TEST_FILE_FOLDER/result.txt")"
|
||||
# return 1
|
||||
# fi
|
||||
# return 0
|
||||
#}
|
||||
|
||||
@test "directory object - create multipart upload" {
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run create_multipart_upload "$BUCKET_ONE_NAME" "test_file/"
|
||||
assert_failure
|
||||
assert_output -p "Directory object contains data payload"
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/logger.sh
|
||||
source ./tests/setup.sh
|
||||
source ./tests/test_s3api_policy_bucket.sh
|
||||
@@ -21,6 +24,7 @@ source ./tests/test_s3api_policy_multipart.sh
|
||||
source ./tests/test_s3api_policy_object.sh
|
||||
source ./tests/util/util_multipart.sh
|
||||
source ./tests/util/util_multipart_abort.sh
|
||||
source ./tests/util/util_multipart_before_completion.sh
|
||||
source ./tests/util/util_file.sh
|
||||
source ./tests/util/util_policy.sh
|
||||
source ./tests/util/util_tags.sh
|
||||
|
||||
@@ -323,15 +323,10 @@ test_s3api_policy_put_wildcard() {
|
||||
username=${lines[0]}
|
||||
password=${lines[1]}
|
||||
|
||||
effect="Allow"
|
||||
principal="$username"
|
||||
action="s3:PutObject"
|
||||
resource="arn:aws:s3:::$BUCKET_ONE_NAME/$test_folder/*"
|
||||
|
||||
run setup_bucket "s3api" "$BUCKET_ONE_NAME"
|
||||
assert_success
|
||||
|
||||
run setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "$effect" "$principal" "$action" "$resource"
|
||||
run setup_policy_with_single_statement "$TEST_FILE_FOLDER/$policy_file" "dummy" "Allow" "$username" "s3:PutObject" "arn:aws:s3:::$BUCKET_ONE_NAME/$test_folder/*"
|
||||
assert_success
|
||||
|
||||
run put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$policy_file"
|
||||
|
||||
@@ -14,11 +14,14 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/test_common.sh
|
||||
source ./tests/test_common_acl.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_create_bucket.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/util/util_users.sh
|
||||
source ./tests/commands/delete_bucket_policy.sh
|
||||
source ./tests/commands/get_bucket_policy.sh
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/test_common.sh
|
||||
|
||||
export RUN_S3CMD=true
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/test_user_common.sh
|
||||
source ./tests/util/util_users.sh
|
||||
source ./tests/commands/get_object.sh
|
||||
@@ -125,9 +128,6 @@ export RUN_USERS=true
|
||||
}
|
||||
|
||||
@test "test_admin_put_get_object" {
|
||||
if [ "$RECREATE_BUCKETS" == "false" ]; then
|
||||
skip "https://github.com/versity/versitygw/issues/888"
|
||||
fi
|
||||
test_file="test_file"
|
||||
|
||||
run setup_user_versitygw_or_direct "$USERNAME_ONE" "$PASSWORD_ONE" "admin" "$BUCKET_ONE_NAME"
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
# under the License.
|
||||
|
||||
source ./tests/setup.sh
|
||||
source ./tests/util/util.sh
|
||||
source ./tests/util/util_create_bucket.sh
|
||||
source ./tests/util/util_list_buckets.sh
|
||||
source ./tests/util/util_object.sh
|
||||
source ./tests/util/util_users.sh
|
||||
source ./tests/commands/list_buckets.sh
|
||||
|
||||
@@ -45,6 +45,8 @@ test_admin_user() {
|
||||
assert_success
|
||||
|
||||
if [ "$RECREATE_BUCKETS" == "true" ]; then
|
||||
run bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_TWO_NAME"
|
||||
assert_success
|
||||
run create_bucket_with_user "s3api" "$BUCKET_TWO_NAME" "$admin_username" "$admin_password"
|
||||
assert_success
|
||||
else
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
load ./bats-support/load
|
||||
load ./bats-assert/load
|
||||
|
||||
source ./tests/test_user_common.sh
|
||||
|
||||
export RUN_S3CMD=true
|
||||
|
||||
@@ -1,5 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
source ./tests/util/util_users.sh
|
||||
|
||||
get_check_default_acl_s3cmd() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'get_check_acl_s3cmd' requires bucket name"
|
||||
@@ -243,3 +259,189 @@ get_and_check_acl_rest() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_acl() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "'setup_acl' requires acl file, grantee, permission, owner ID"
|
||||
return 1
|
||||
fi
|
||||
cat <<EOF > "$1"
|
||||
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Owner>
|
||||
<ID>$4</ID>
|
||||
</Owner>
|
||||
<AccessControlList>
|
||||
<Grant>
|
||||
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">
|
||||
<ID>$2</ID>
|
||||
</Grantee>
|
||||
<Permission>$3</Permission>
|
||||
</Grant>
|
||||
</AccessControlList>
|
||||
</AccessControlPolicy>
|
||||
EOF
|
||||
}
|
||||
|
||||
setup_acl_json() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "'setup_acl_json' requires acl file, grantee type, grantee ID, permission, owner ID"
|
||||
return 1
|
||||
fi
|
||||
cat <<EOF > "$1"
|
||||
{
|
||||
"Grants": [
|
||||
{
|
||||
"Grantee": {
|
||||
"Type": "$2",
|
||||
"ID": "$3"
|
||||
},
|
||||
"Permission": "$4"
|
||||
}
|
||||
],
|
||||
"Owner": {
|
||||
"ID": "$5"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
create_versitygw_acl_user_or_get_direct_user() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'create_versitygw_acl_user_or_get_direct_user' requires username, password"
|
||||
return 1
|
||||
fi
|
||||
if [ "$DIRECT" == "true" ]; then
|
||||
if [ -z "$AWS_CANONICAL_ID" ] || [ -z "$ACL_AWS_CANONICAL_ID" ] || [ -z "$ACL_AWS_ACCESS_KEY_ID" ] || [ -z "$ACL_AWS_SECRET_ACCESS_KEY" ]; then
|
||||
log 2 "direct ACL calls require the following env vars: ACL_CANONICAL_ID, ACL_AWS_ACCESS_KEY_ID, ACL_AWS_SECRET_ACCESS_KEY"
|
||||
return 1
|
||||
fi
|
||||
echo "$AWS_CANONICAL_ID"
|
||||
echo "$ACL_AWS_CANONICAL_ID"
|
||||
echo "$ACL_AWS_ACCESS_KEY_ID"
|
||||
echo "$ACL_AWS_SECRET_ACCESS_KEY"
|
||||
else
|
||||
echo "$AWS_ACCESS_KEY_ID"
|
||||
if ! create_user_versitygw "$1" "$2" "user"; then
|
||||
log 2 "error creating versitygw user"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
echo "$1"
|
||||
echo "$1"
|
||||
# shellcheck disable=SC2154
|
||||
echo "$2"
|
||||
fi
|
||||
}
|
||||
|
||||
put_acl_rest() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'put_acl_rest' requires bucket name, ACL file"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" ACL_FILE="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/put_bucket_acl.sh); then
|
||||
log 2 "error attempting to put bucket acl: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 5 "response returned code: $result (error: $(cat "$TEST_FILE_FOLDER/response.txt")"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_invalid_acl_rest_verify_failure() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'put_invalid_acl_rest_verify_failure' requires bucket name, ACL file"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" ACL_FILE="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/put_bucket_acl.sh); then
|
||||
log 2 "error attempting to put bucket acl: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "400" ]; then
|
||||
log 2 "response returned code: $result (error: $(cat "$TEST_FILE_FOLDER/response.txt"))"
|
||||
return 1
|
||||
fi
|
||||
if ! error_code=$(xmllint --xpath '//*[local-name()="Code"]/text()' "$TEST_FILE_FOLDER/response.txt" 2>&1); then
|
||||
log 2 "error getting display name: $error_code"
|
||||
return 1
|
||||
fi
|
||||
if [ "$error_code" != "MalformedACLError" ]; then
|
||||
log 2 "invalid error code, expected 'MalformedACLError', was '$error_code'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
put_canned_acl_rest() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'put_canned_acl_rest' requires bucket name, canned acl"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" CANNED_ACL="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/put_bucket_acl.sh); then
|
||||
log 2 "error attempting to put bucket acl: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "response code '$result' (message: $(cat "$TEST_FILE_FOLDER/response.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
check_ownership_rule_and_reset_acl() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'check_ownership_rule_and_reset_acl' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_ownership_controls "$1"; then
|
||||
log 2 "error getting bucket ownership controls"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if ! object_ownership_rule=$(echo "$bucket_ownership_controls" | jq -r ".OwnershipControls.Rules[0].ObjectOwnership" 2>&1); then
|
||||
log 2 "error getting object ownership rule: $object_ownership_rule"
|
||||
return 1
|
||||
fi
|
||||
if [[ $object_ownership_rule != "BucketOwnerEnforced" ]] && ! reset_bucket_acl "$1"; then
|
||||
log 2 "error resetting bucket ACL"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# param: bucket name
|
||||
# return 1 for failure, 0 for success
|
||||
get_object_ownership_rule_and_update_acl() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'get_object_ownership_rule_and_update_acl' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! get_object_ownership_rule "$1"; then
|
||||
log 2 "error getting object ownership rule"
|
||||
return 1
|
||||
fi
|
||||
log 5 "object ownership rule: $object_ownership_rule"
|
||||
if [[ "$object_ownership_rule" != "BucketOwnerEnforced" ]] && ! put_bucket_canned_acl "$1" "private"; then
|
||||
log 2 "error resetting bucket ACLs"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# get object acl
|
||||
# param: object path
|
||||
# export acl for success, return 1 for error
|
||||
get_object_acl() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "object ACL command missing object name"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
acl=$(aws --no-verify-ssl s3api get-object-acl --bucket "$1" --key "$2" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "Error getting object ACLs: $acl"
|
||||
return 1
|
||||
fi
|
||||
export acl
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ check_attributes_after_upload() {
|
||||
log 2 "'check_attributes_after_upload' requires file size"
|
||||
return 1
|
||||
fi
|
||||
log 5 "attributes: $(cat "$TEST_FILE_FOLDER/attributes.txt")"
|
||||
if ! object_size=$(xmllint --xpath '//*[local-name()="ObjectSize"]/text()' "$TEST_FILE_FOLDER/attributes.txt" 2>&1); then
|
||||
log 2 "error getting checksum: $object_size"
|
||||
return 1
|
||||
@@ -54,10 +55,11 @@ check_attributes_after_upload() {
|
||||
log 2 "unexpected parts count, expected 4, was $parts_count"
|
||||
return 1
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
check_attributes_invalid_param() {
|
||||
if [ "$1" -ne 1 ]; then
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'check_attributes_invalid_param' requires test file"
|
||||
return 1
|
||||
fi
|
||||
@@ -110,4 +112,21 @@ add_and_check_checksum() {
|
||||
log 2 "empty checksum"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
get_etag_attribute_rest() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'get_etag_attribute_rest' requires bucket name, object key, expected etag"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" ATTRIBUTES="ETag" OUTPUT_FILE="$TEST_FILE_FOLDER/attributes.txt" ./tests/rest_scripts/get_object_attributes.sh); then
|
||||
log 2 "error attempting to get object info: $result"
|
||||
return 1
|
||||
fi
|
||||
log 5 "attributes: $(cat "$TEST_FILE_FOLDER/attributes.txt")"
|
||||
if ! check_xml_element "$TEST_FILE_FOLDER/attributes.txt" "$3" "GetObjectAttributesResponse" "ETag"; then
|
||||
log 2 "etag mismatch"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -51,4 +51,18 @@ abort_all_multipart_uploads() {
|
||||
fi
|
||||
done <<< "$lines"
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
remove_insecure_request_warning() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "remove insecure request warning requires input lines"
|
||||
return 1
|
||||
fi
|
||||
parsed_output=()
|
||||
while IFS= read -r line; do
|
||||
if [[ $line != *InsecureRequestWarning* ]]; then
|
||||
parsed_output+=("$line")
|
||||
fi
|
||||
done <<< "$1"
|
||||
export parsed_output
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/util/util_acl.sh
|
||||
source ./tests/util/util_multipart_abort.sh
|
||||
source ./tests/util/util_policy.sh
|
||||
source ./tests/util/util_retention.sh
|
||||
|
||||
# recursively delete an AWS bucket
|
||||
# param: client, bucket name
|
||||
@@ -74,23 +77,21 @@ clear_bucket_s3api() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
#run check_ownership_rule_and_reset_acl "$1"
|
||||
#assert_success "error checking ownership rule and resetting acl"
|
||||
if ! check_ownership_rule_and_reset_acl "$1"; then
|
||||
log 2 "error checking ownership rule and resetting acl"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $lock_config_exists == true ]] && ! put_object_lock_configuration_disabled "$1"; then
|
||||
log 2 "error disabling object lock config"
|
||||
return 1
|
||||
fi
|
||||
#if ! put_bucket_versioning "s3api" "$1" "Suspended"; then
|
||||
# log 2 "error suspending bucket versioning"
|
||||
# return 1
|
||||
#fi
|
||||
|
||||
#if ! change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$AWS_ACCESS_KEY_ID"; then
|
||||
# log 2 "error changing bucket owner back to root"
|
||||
# return 1
|
||||
#fi
|
||||
if [ "$RUN_USERS" == "true" ] && ! change_bucket_owner "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$AWS_ACCESS_KEY_ID"; then
|
||||
log 2 "error changing bucket owner back to root"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# params: bucket name
|
||||
@@ -213,12 +214,12 @@ bucket_cleanup() {
|
||||
# return 0 for success, 1 for error
|
||||
bucket_cleanup_if_bucket_exists() {
|
||||
log 6 "bucket_cleanup_if_bucket_exists"
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'bucket_cleanup_if_bucket_exists' requires client, bucket name"
|
||||
if [ $# -lt 2 ]; then
|
||||
log 2 "'bucket_cleanup_if_bucket_exists' requires client, bucket name, bucket known to exist (optional)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if bucket_exists "$1" "$2"; then
|
||||
if [ "$3" == "true" ] || bucket_exists "$1" "$2"; then
|
||||
if ! bucket_cleanup "$1" "$2"; then
|
||||
log 2 "error deleting bucket and/or contents"
|
||||
return 1
|
||||
@@ -254,12 +255,16 @@ setup_bucket() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! bucket_exists "$1" "$2" && [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
log 2 "When RECREATE_BUCKETS isn't set to \"true\", buckets should be pre-created by user"
|
||||
return 1
|
||||
bucket_exists="true"
|
||||
if ! bucket_exists "$1" "$2"; then
|
||||
if [[ $RECREATE_BUCKETS == "false" ]]; then
|
||||
log 2 "When RECREATE_BUCKETS isn't set to \"true\", buckets should be pre-created by user"
|
||||
return 1
|
||||
fi
|
||||
bucket_exists="false"
|
||||
fi
|
||||
|
||||
if ! bucket_cleanup_if_bucket_exists "$1" "$2"; then
|
||||
if ! bucket_cleanup_if_bucket_exists "$1" "$2" "$bucket_exists"; then
|
||||
log 2 "error deleting bucket or contents if they exist"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -15,4 +15,82 @@ block_delete_object_without_permission() {
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
delete_object_empty_bucket_check_error() {
|
||||
if ! result=$(OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="" ./tests/rest_scripts/delete_objects.sh); then
|
||||
log 2 "error deleting objects: $result"
|
||||
return 1
|
||||
fi
|
||||
log 5 "result: $(cat "$TEST_FILE_FOLDER/result.txt")"
|
||||
if ! error=$(xmllint --xpath "Error" "$TEST_FILE_FOLDER/result.txt" 2>&1); then
|
||||
log 2 "error getting XML error data: $error"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$error") "MethodNotAllowed" "Code"; then
|
||||
log 2 "Code mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$error") "POST" "Method"; then
|
||||
log 2 "Method mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$error") "SERVICE" "ResourceType"; then
|
||||
log 2 "ResourceType mismatch"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_objects_no_content_md5_header() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "delete_objects_no_content_md5_header requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
data="<Delete xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">
|
||||
<Object>
|
||||
<Key>dontcare</Key>
|
||||
</Object>
|
||||
<Object>
|
||||
<Key>dontcareeither</Key>
|
||||
</Object>
|
||||
</Delete>"
|
||||
|
||||
if ! result=$(OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" COMMAND_LOG="$COMMAND_LOG" PAYLOAD="$data" BUCKET_NAME="$1" HAS_CONTENT_MD5="false" ./tests/rest_scripts/delete_objects.sh); then
|
||||
log 2 "error deleting objects: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "400" ]; then
|
||||
log 2 "expected response code '400', actual '$result' ($(cat "$TEST_FILE_FOLDER/result.txt")"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element "$TEST_FILE_FOLDER/result.txt" "InvalidRequest" "Error" "Code"; then
|
||||
log 2 "error checking error element"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
delete_objects_verify_success() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'delete_objects_verify_success' requires bucket name, two objects"
|
||||
return 1
|
||||
fi
|
||||
data="<Delete xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">
|
||||
<Object>
|
||||
<Key>$2</Key>
|
||||
</Object>
|
||||
<Object>
|
||||
<Key>$3</Key>
|
||||
</Object>
|
||||
</Delete>"
|
||||
|
||||
if ! result=$(OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" COMMAND_LOG="$COMMAND_LOG" PAYLOAD="$data" BUCKET_NAME="$1" ./tests/rest_scripts/delete_objects.sh); then
|
||||
log 2 "error deleting objects: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "expected '200', was '$result ($(cat "$TEST_FILE_FOLDER/result.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -25,12 +25,6 @@ create_test_files() {
|
||||
log 2 "'create_test_files' requires file names"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! create_test_file "$name"; then
|
||||
log 2 "error creating test file"
|
||||
@@ -46,12 +40,6 @@ create_test_file() {
|
||||
log 2 "'create_test_file' requires filename, size (optional)"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
if [[ -e "$TEST_FILE_FOLDER/$1" ]]; then
|
||||
if ! error=$(rm "$TEST_FILE_FOLDER/$1" 2>&1); then
|
||||
log 2 "error removing existing file: $error"
|
||||
@@ -84,12 +72,6 @@ create_test_folder() {
|
||||
log 2 "'create_test_folder' requires folder names"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
for name in "$@"; do
|
||||
if ! error=$(mkdir -p "$TEST_FILE_FOLDER"/"$name" 2>&1); then
|
||||
log 2 "error creating folder $name: $error"
|
||||
@@ -136,9 +118,7 @@ split_file() {
|
||||
fi
|
||||
|
||||
local error
|
||||
local split_result
|
||||
error=$(split -a 1 -d -b "$part_size" "$1" "$1"-) || split_result=$?
|
||||
if [[ $split_result -ne 0 ]]; then
|
||||
if ! error=$(split -a 1 -d -b "$part_size" "$1" "$1"- 2>&1); then
|
||||
log 2 "error splitting file: $error"
|
||||
return 1
|
||||
fi
|
||||
@@ -167,19 +147,6 @@ compare_files() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# return 0 on success, 1 on error
|
||||
create_test_file_folder() {
|
||||
log 6 "create_test_file_folder"
|
||||
if ! error=$(mkdir -p "$TEST_FILE_FOLDER" 2>&1); then
|
||||
# shellcheck disable=SC2035
|
||||
if [[ "$error" != *"File exists"* ]]; then
|
||||
log 2 "error making test file folder: $error"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# generate 160MB file
|
||||
# input: filename
|
||||
# fail on error
|
||||
@@ -189,12 +156,6 @@ create_large_file() {
|
||||
log 2 "'create_large_file' requires file name"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
filesize=$((160*1024*1024))
|
||||
if ! error=$(dd if=/dev/urandom of="$TEST_FILE_FOLDER"/"$1" bs=1024 count=$((filesize/1024)) 2>&1); then
|
||||
@@ -211,12 +172,6 @@ create_test_file_count() {
|
||||
log 2 "'create_test_file_count' requires number of files"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
for ((i=1;i<=$1;i++)) {
|
||||
if ! error=$(touch "$TEST_FILE_FOLDER/file_$i" 2>&1); then
|
||||
log 2 "error creating file_$i: $error"
|
||||
|
||||
@@ -69,4 +69,54 @@ get_and_verify_metadata() {
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
get_etag_rest() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'get_etag_rest' requires bucket name, object key"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/head_object.txt" ./tests/rest_scripts/head_object.sh); then
|
||||
log 2 "error attempting to get object info: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "response code '$result', data: $(cat "$TEST_FILE_FOLDER/head_object.txt")"
|
||||
return 1
|
||||
fi
|
||||
log 5 "head object data: $(cat "$TEST_FILE_FOLDER/head_object.txt")"
|
||||
etag_value=$(grep "E[Tt]ag:" "$TEST_FILE_FOLDER/head_object.txt" | sed -n 's/E[Tt]ag: "\([^"]*\)"/\1/p' | tr -d '\r')
|
||||
echo "$etag_value"
|
||||
}
|
||||
|
||||
verify_object_not_found() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'verify_object_not_found' requires bucket name, object key"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" ./tests/rest_scripts/head_object.sh); then
|
||||
log 2 "error getting result: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "404" ]; then
|
||||
log 2 "expected '404', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
verify_object_exists() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'verify_object_not_found' requires bucket name, object key"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" ./tests/rest_scripts/head_object.sh); then
|
||||
log 2 "error getting result: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "expected '200', was '$result' ($(cat "$TEST_FILE_FOLDER/result.txt"))"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source ./tests/commands/list_objects_v2.sh
|
||||
source ./tests/util/util_xml.sh
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
@@ -198,3 +199,132 @@ check_object_listing_with_prefixes() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_objects_with_user_rest_verify_access_denied() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "list_objects_with_user_rest_verify_access_denied' requires bucket, username, password"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/objects.txt" ./tests/rest_scripts/list_objects.sh); then
|
||||
log 2 "error attempting to list objects: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "403" ]; then
|
||||
log 2 "expected response code of '403', was '$result'"
|
||||
return 1
|
||||
fi
|
||||
error_message="$(cat "$TEST_FILE_FOLDER/objects.txt")"
|
||||
if [[ "$error_message" != *"Access Denied"* ]]; then
|
||||
log 2 "unexpected error message: $error_message"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_objects_with_user_rest_verify_success() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "list_objects_with_user_rest_verify_access_denied' requires bucket, username, password, expected object"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(AWS_ACCESS_KEY_ID="$2" AWS_SECRET_ACCESS_KEY="$3" COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/objects.txt" ./tests/rest_scripts/list_objects.sh); then
|
||||
log 2 "error attempting to list objects: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "expected response code of '200', was '$result' (error: $(cat "$TEST_FILE_FOLDER/objects.txt"))"
|
||||
return 1
|
||||
fi
|
||||
if ! key=$(xmllint --xpath '//*[local-name()="Key"]/text()' "$TEST_FILE_FOLDER/objects.txt" 2>&1); then
|
||||
log 2 "error getting object key: $key"
|
||||
return 1
|
||||
fi
|
||||
if [ "$key" != "$4" ]; then
|
||||
log 2 "expected '$4', was '$key'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_objects_check_params_get_token() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "'list_objects_check_params_get_token' requires bucket name, files, version two"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" VERSION_TWO="$4" MAX_KEYS=1 OUTPUT_FILE="$TEST_FILE_FOLDER/objects.txt" ./tests/rest_scripts/list_objects.sh); then
|
||||
log 2 "error attempting to get bucket ACL response: $result"
|
||||
return 1
|
||||
fi
|
||||
log 5 "objects: $(cat "$TEST_FILE_FOLDER/objects.txt")"
|
||||
if ! list_bucket_result=$(xmllint --xpath '//*[local-name()="ListBucketResult"]' "$TEST_FILE_FOLDER/objects.txt" 2>&1); then
|
||||
log 2 "error getting list bucket result: $list_bucket_result"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$list_bucket_result") "$2" "Key"; then
|
||||
log 2 "key mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$list_bucket_result") "1" "MaxKeys"; then
|
||||
log 2 "max keys mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$list_bucket_result") "1" "KeyCount"; then
|
||||
log 2 "key count mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$list_bucket_result") "true" "IsTruncated"; then
|
||||
log 2 "key count mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! continuation_token=$(xmllint --xpath '//*[local-name()="NextContinuationToken"]/text()' <(echo "$list_bucket_result") 2>&1); then
|
||||
log 2 "error getting next continuation token: $continuation_token"
|
||||
return 1
|
||||
fi
|
||||
echo "$continuation_token"
|
||||
return 0
|
||||
}
|
||||
|
||||
list_objects_check_continuation_error() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'list_objects_check_continuation_error' requires bucket name, continuation token"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" VERSION_TWO="TRUE" MAX_KEYS=1 CONTINUATION_TOKEN="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/objects.txt" ./tests/rest_scripts/list_objects.sh); then
|
||||
log 2 "error attempting to get bucket ACL response: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "400" ]; then
|
||||
log 2 "expected result code of '400' was '$result'"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element "$TEST_FILE_FOLDER/objects.txt" "InvalidArgument" "Error" "Code"; then
|
||||
log 2 "invalid error code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
list_objects_v1_check_nextmarker_empty() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'get_next_objects_v1' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" VERSION_TWO="FALSE" MAX_KEYS=1 OUTPUT_FILE="$TEST_FILE_FOLDER/objects.txt" ./tests/rest_scripts/list_objects.sh); then
|
||||
log 2 "error attempting to get bucket ACL response: $result"
|
||||
return 1
|
||||
fi
|
||||
log 5 "output: $(cat "$TEST_FILE_FOLDER/objects.txt")"
|
||||
if ! next_marker=$(xmllint --xpath '//*[local-name()="NextMarker"]' "$TEST_FILE_FOLDER/objects.txt" 2>&1); then
|
||||
if [[ "$next_marker" != *"XPath set is empty"* ]]; then
|
||||
log 2 "unexpected error: $next_marker"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
log 5 "next marker: $next_marker"
|
||||
marker_text=$(xmllint --xpath 'string(/NextMarker)' <(echo "$next_marker") 2>&1)
|
||||
log 5 "marker text: $marker_text"
|
||||
if [[ "$marker_text" != *"Document is empty"* ]]; then
|
||||
log 2 "NextMarker text should be empty, but is $marker_text"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -14,208 +14,6 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
create_upload_and_test_parts_listing() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'create_upload_and_test_parts_listing' requires test file, policy_file"
|
||||
return 1
|
||||
fi
|
||||
if ! create_multipart_upload_with_user "$BUCKET_ONE_NAME" "$1" "$USERNAME_ONE" "$PASSWORD_ONE"; then
|
||||
log 2 "error creating multipart upload with user"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if list_parts_with_user "$USERNAME_ONE" "$PASSWORD_ONE" "$BUCKET_ONE_NAME" "$1" "$upload_id"; then
|
||||
log 2 "list parts with user succeeded despite lack of policy permissions"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! setup_policy_with_single_statement "$TEST_FILE_FOLDER/$2" "2012-10-17" "Allow" "$USERNAME_ONE" "s3:ListMultipartUploadParts" "arn:aws:s3:::$BUCKET_ONE_NAME/*"; then
|
||||
log 2 "error setting up policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$2"; then
|
||||
log 2 "error putting policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! list_parts_with_user "$USERNAME_ONE" "$PASSWORD_ONE" "$BUCKET_ONE_NAME" "$1" "$upload_id"; then
|
||||
log 2 "error listing parts after policy add"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
start_multipart_upload_list_check_parts() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'start_multipart_upload_and_list_parts' requires bucket, key, original source"
|
||||
return 1
|
||||
fi
|
||||
if ! start_multipart_upload_and_list_parts "$1" "$2" "$3" 4; then
|
||||
log 2 "error starting upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
declare -a parts_map
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "parts: $parts"
|
||||
for i in {0..3}; do
|
||||
if ! parse_parts_and_etags "$i"; then
|
||||
log 2 "error parsing part $i"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
if [[ ${#parts_map[@]} -eq 0 ]]; then
|
||||
log 2 "error loading multipart upload parts to check"
|
||||
return 1
|
||||
fi
|
||||
|
||||
for i in {0..3}; do
|
||||
if ! compare_parts_to_listed_parts "$i"; then
|
||||
log 2 "error comparing parts to listed parts"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
parse_parts_and_etags() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'parse_parts_and_etags' requires part id"
|
||||
return 1
|
||||
fi
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
if ! part=$(echo "$parts" | grep -v "InsecureRequestWarning" | jq -r ".[$i]" 2>&1); then
|
||||
log 2 "error getting part: $part"
|
||||
return 1
|
||||
fi
|
||||
if ! part_number=$(echo "$part" | jq ".PartNumber" 2>&1); then
|
||||
log 2 "error parsing part number: $part_number"
|
||||
return 1
|
||||
fi
|
||||
if [[ $part_number == "" ]]; then
|
||||
log 2 "error: blank part number"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(echo "$part" | jq ".ETag" 2>&1); then
|
||||
log 2 "error parsing etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
if [[ $etag == "" ]]; then
|
||||
log 2 "error: blank etag"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2004
|
||||
parts_map[$part_number]=$etag
|
||||
}
|
||||
|
||||
compare_parts_to_listed_parts() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'compare_parts_to_listed_parts' requires part number"
|
||||
return 1
|
||||
fi
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
if ! listed_part=$(echo "$listed_parts" | grep -v "InsecureRequestWarning" | jq -r ".Parts[$i]" 2>&1); then
|
||||
log 2 "error parsing listed part: $listed_part"
|
||||
return 1
|
||||
fi
|
||||
if ! part_number=$(echo "$listed_part" | jq ".PartNumber" 2>&1); then
|
||||
log 2 "error parsing listed part number: $part_number"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(echo "$listed_part" | jq ".ETag" 2>&1); then
|
||||
log 2 "error getting listed etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
if [[ ${parts_map[$part_number]} != "$etag" ]]; then
|
||||
log 2 "error: etags don't match (part number: $part_number, etags ${parts_map[$part_number]},$etag)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# list parts of an unfinished multipart upload
|
||||
# params: bucket, key, local file location, and parts to split into before upload
|
||||
# export parts on success, return 1 for error
|
||||
start_multipart_upload_and_list_parts() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "list multipart upload parts command requires bucket, key, file, and part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! multipart_upload_before_completion "$1" "$2" "$3" "$4"; then
|
||||
log 2 "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! list_parts "$1" "$2" "$upload_id"; then
|
||||
log 2 "Error listing multipart upload parts: $listed_parts"
|
||||
return 1
|
||||
fi
|
||||
export listed_parts
|
||||
}
|
||||
|
||||
create_list_check_multipart_uploads() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "list multipart uploads command requires bucket and two keys"
|
||||
return 1
|
||||
fi
|
||||
if ! create_and_list_multipart_uploads "$1" "$2" "$3"; then
|
||||
log 2 "error creating and listing multipart uploads"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "Uploads: $uploads"
|
||||
raw_uploads=$(echo "$uploads" | grep -v "InsecureRequestWarning")
|
||||
if ! key_one=$(echo "$raw_uploads" | jq -r '.Uploads[0].Key' 2>&1); then
|
||||
log 2 "error getting key one: $key_one"
|
||||
return 1
|
||||
fi
|
||||
if ! key_two=$(echo "$raw_uploads" | jq -r '.Uploads[1].Key' 2>&1); then
|
||||
log 2 "error getting key two: $key_two"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$2" != "$key_one" ]]; then
|
||||
log 2 "Key mismatch ($2, $key_one)"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$3" != "$key_two" ]]; then
|
||||
log 2 "Key mismatch ($3, $key_two)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# list unfinished multipart uploads
|
||||
# params: bucket, key one, key two
|
||||
# export current two uploads on success, return 1 for error
|
||||
create_and_list_multipart_uploads() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "list multipart uploads command requires bucket and two keys"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! create_multipart_upload "$1" "$2"; then
|
||||
log 2 "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! create_multipart_upload "$1" "$3"; then
|
||||
log 2 "error creating multpart upload two"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! list_multipart_uploads "$1"; then
|
||||
log 2 "error listing uploads"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
multipart_upload_from_bucket() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "multipart upload from bucket command missing bucket, copy source, key, and/or part count"
|
||||
@@ -236,16 +34,18 @@ multipart_upload_from_bucket() {
|
||||
}
|
||||
|
||||
if ! create_multipart_upload "$1" "$2-copy"; then
|
||||
log 2 "error running first multpart upload"
|
||||
log 2 "error running first multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
# shellcheck disable=SC2154
|
||||
if ! upload_part_copy "$1" "$2-copy" "$upload_id" "$2" "$i"; then
|
||||
log 2 "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
@@ -260,6 +60,25 @@ multipart_upload_from_bucket() {
|
||||
return 0
|
||||
}
|
||||
|
||||
split_and_put_file() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "'split_and_put_file' requires bucket, key, copy source, part count"
|
||||
return 1
|
||||
fi
|
||||
if ! split_file "$3" "$4"; then
|
||||
log 2 "error splitting file"
|
||||
return 1
|
||||
fi
|
||||
for ((i=0;i<$4;i++)) {
|
||||
log 5 "key: $2, file info: $(ls -l "$3"-"$i")"
|
||||
if ! put_object "s3api" "$3-$i" "$1" "$2-$i"; then
|
||||
log 2 "error copying object"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
multipart_upload_from_bucket_range() {
|
||||
if [ $# -ne 5 ]; then
|
||||
log 2 "multipart upload from bucket with range command requires bucket, copy source, key, part count, and range"
|
||||
@@ -301,114 +120,6 @@ multipart_upload_from_bucket_range() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# perform all parts of a multipart upload before completion command
|
||||
# params: bucket, key, file to split and upload, number of file parts to upload
|
||||
# return: 0 for success, 1 for failure
|
||||
multipart_upload_before_completion() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "multipart upload pre-completion command missing bucket, key, file, and/or part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! split_file "$3" "$4"; then
|
||||
log 2 "error splitting file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! create_multipart_upload "$1" "$2"; then
|
||||
log 2 "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
# shellcheck disable=SC2154
|
||||
if ! upload_part "$1" "$2" "$upload_id" "$3" "$i"; then
|
||||
log 2 "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
export parts
|
||||
}
|
||||
|
||||
multipart_upload_before_completion_with_params() {
|
||||
if [ $# -ne 10 ]; then
|
||||
log 2 "multipart upload command missing bucket, key, file, part count, content type, metadata, hold status, lock mode, retain until date, tagging"
|
||||
return 1
|
||||
fi
|
||||
|
||||
split_file "$3" "$4" || split_result=$?
|
||||
if [[ $split_result -ne 0 ]]; then
|
||||
log 2 "error splitting file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
create_multipart_upload_params "$1" "$2" "$5" "$6" "$7" "$8" "$9" "${10}" || local create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
log 2 "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
upload_part "$1" "$2" "$upload_id" "$3" "$i" || local upload_result=$?
|
||||
if [[ $upload_result -ne 0 ]]; then
|
||||
log 2 "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
export parts
|
||||
}
|
||||
|
||||
multipart_upload_before_completion_custom() {
|
||||
if [ $# -lt 4 ]; then
|
||||
log 2 "multipart upload custom command missing bucket, key, file, part count, and/or optional params"
|
||||
return 1
|
||||
fi
|
||||
|
||||
split_file "$3" "$4" || local split_result=$?
|
||||
if [[ $split_result -ne 0 ]]; then
|
||||
log 2 "error splitting file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 disable=SC2048
|
||||
create_multipart_upload_custom "$1" "$2" ${*:5} || local create_result=$?
|
||||
if [[ $create_result -ne 0 ]]; then
|
||||
log 2 "error creating multipart upload"
|
||||
return 1
|
||||
fi
|
||||
log 5 "upload ID: $upload_id"
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
upload_part "$1" "$2" "$upload_id" "$3" "$i" || local upload_result=$?
|
||||
if [[ $upload_result -ne 0 ]]; then
|
||||
log 2 "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
export parts
|
||||
}
|
||||
|
||||
multipart_upload_custom() {
|
||||
if [ $# -lt 4 ]; then
|
||||
log 2 "multipart upload custom command missing bucket, key, file, part count, and/or optional additional params"
|
||||
@@ -416,15 +127,13 @@ multipart_upload_custom() {
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 disable=SC2048
|
||||
multipart_upload_before_completion_custom "$1" "$2" "$3" "$4" ${*:5} || local result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
if ! multipart_upload_before_completion_custom "$1" "$2" "$3" "$4" ${*:5}; then
|
||||
log 2 "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log 5 "upload ID: $upload_id, parts: $parts"
|
||||
complete_multipart_upload "$1" "$2" "$upload_id" "$parts" || local completed=$?
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
if ! complete_multipart_upload "$1" "$2" "$upload_id" "$parts"; then
|
||||
log 2 "Error completing upload"
|
||||
return 1
|
||||
fi
|
||||
@@ -437,14 +146,12 @@ multipart_upload() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
multipart_upload_before_completion "$1" "$2" "$3" "$4" || local result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
if ! multipart_upload_before_completion "$1" "$2" "$3" "$4"; then
|
||||
log 2 "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
complete_multipart_upload "$1" "$2" "$upload_id" "$parts" || local completed=$?
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
if ! complete_multipart_upload "$1" "$2" "$upload_id" "$parts"; then
|
||||
log 2 "Error completing upload"
|
||||
return 1
|
||||
fi
|
||||
@@ -461,101 +168,20 @@ multipart_upload_with_params() {
|
||||
fi
|
||||
log 5 "1: $1, 2: $2, 3: $3, 4: $4, 5: $5, 6: $6, 7: $7, 8: $8, 9: $9, 10: ${10}"
|
||||
|
||||
multipart_upload_before_completion_with_params "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9" "${10}" || result=$?
|
||||
if [[ $result -ne 0 ]]; then
|
||||
if ! multipart_upload_before_completion_with_params "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9" "${10}"; then
|
||||
log 2 "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
log 5 "Upload parts: $parts"
|
||||
|
||||
complete_multipart_upload "$1" "$2" "$upload_id" "$parts" || local completed=$?
|
||||
if [[ $completed -ne 0 ]]; then
|
||||
if ! complete_multipart_upload "$1" "$2" "$upload_id" "$parts"; then
|
||||
log 2 "Error completing upload"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_upload_and_get_id_rest() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'create_upload_and_get_id_rest' requires bucket, key"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG=$COMMAND_LOG BUCKET_NAME=$1 OBJECT_KEY=$2 OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" ./tests/rest_scripts/create_multipart_upload.sh); then
|
||||
log 2 "error creating multipart upload: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "error: response code: $result, output: $(cat "$TEST_FILE_FOLDER/output.txt")"
|
||||
return 1
|
||||
fi
|
||||
log 5 "multipart upload create info: $(cat "$TEST_FILE_FOLDER/output.txt")"
|
||||
if ! upload_id=$(xmllint --xpath '//*[local-name()="UploadId"]/text()' "$TEST_FILE_FOLDER/output.txt" 2>&1); then
|
||||
log 2 "error getting upload ID: $upload_id"
|
||||
return 1
|
||||
fi
|
||||
log 5 "upload ID: $upload_id"
|
||||
return 0
|
||||
}
|
||||
|
||||
multipart_upload_range_too_large() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'multipart_upload_range_too_large' requires bucket name, key, file location"
|
||||
return 1
|
||||
fi
|
||||
if multipart_upload_from_bucket_range "$1" "$2" "$3" 4 "bytes=0-1000000000"; then
|
||||
log 2 "multipart upload succeeded despite overly large range"
|
||||
return 1
|
||||
fi
|
||||
log 5 "error: $upload_part_copy_error"
|
||||
if [[ $upload_part_copy_error != *"Range specified is not valid"* ]] && [[ $upload_part_copy_error != *"InvalidRange"* ]]; then
|
||||
log 2 "unexpected error: $upload_part_copy_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_and_check_upload() {
|
||||
if [ $# -lt 2 ]; then
|
||||
log 2 "'list_and_check_upload' requires bucket, key, upload ID (optional)"
|
||||
return 1
|
||||
fi
|
||||
if ! uploads=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/uploads.txt" ./tests/rest_scripts/list_multipart_uploads.sh); then
|
||||
log 2 "error listing multipart uploads before upload: $result"
|
||||
return 1
|
||||
fi
|
||||
if ! upload_count=$(xmllint --xpath 'count(//*[local-name()="Upload"])' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
|
||||
log 2 "error retrieving upload count: $upload_count"
|
||||
return 1
|
||||
fi
|
||||
if [[ (( $# == 2 ) && ( $upload_count != 0 )) ]]; then
|
||||
log 2 "upload count mismatch (expected 0, actual $upload_count)"
|
||||
return 1
|
||||
elif [[ (( $# == 3 ) && ( $upload_count != 1 )) ]]; then
|
||||
log 2 "upload count mismatch (expected 1, actual $upload_count)"
|
||||
return 1
|
||||
fi
|
||||
if [ $# -eq 2 ]; then
|
||||
return 0
|
||||
fi
|
||||
if ! key=$(xmllint --xpath '//*[local-name()="Key"]/text()' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
|
||||
log 2 "error retrieving key: $key"
|
||||
return 1
|
||||
fi
|
||||
if [ "$key" != "$2" ]; then
|
||||
log 2 "key mismatch (expected '$2', actual '$key')"
|
||||
return 1
|
||||
fi
|
||||
if ! upload_id=$(xmllint --xpath '//*[local-name()="UploadId"]/text()' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
|
||||
log 2 "error retrieving upload ID: $upload_id"
|
||||
return 1
|
||||
fi
|
||||
if [ "$upload_id" != "$3" ]; then
|
||||
log 2 "upload ID mismatch (expected '$3', actual '$upload_id')"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
run_and_verify_multipart_upload_with_valid_range() {
|
||||
if [ $# -ne 3 ]; then
|
||||
@@ -583,24 +209,129 @@ run_and_verify_multipart_upload_with_valid_range() {
|
||||
return 0
|
||||
}
|
||||
|
||||
list_check_multipart_upload_key() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "'list_check_multipart_upload_key' requires bucket, username, password, expected key"
|
||||
create_upload_part_copy_rest() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'run_and_verify_multipart_upload_with_valid_range' requires bucket, key, >20MB file"
|
||||
return 1
|
||||
fi
|
||||
if ! list_multipart_uploads_with_user "$1" "$2" "$3"; then
|
||||
log 2 "error listing multipart uploads with user"
|
||||
if ! split_and_put_file "$1" "$2" "$3" 4; then
|
||||
log 2 "error splitting and putting file"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "$uploads"
|
||||
if ! upload_key=$(echo "$uploads" | grep -v "InsecureRequestWarning" | jq -r ".Uploads[0].Key" 2>&1); then
|
||||
log 2 "error parsing upload key from uploads message: $upload_key"
|
||||
if ! create_upload_and_get_id_rest "$1" "$2"; then
|
||||
log 2 "error creating upload and getting ID"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$4" != "$upload_key" ]]; then
|
||||
log 2 "upload key doesn't match file marked as being uploaded (expected: '$4', actual: '$upload_key')"
|
||||
parts_payload=""
|
||||
for ((i=0; i<=3; i++)); do
|
||||
part_number=$((i+1))
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" PART_NUMBER="$part_number" UPLOAD_ID="$upload_id" PART_LOCATION="$1/$2-$i" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/upload_part_copy.sh); then
|
||||
# shellcheck disable=SC2154
|
||||
log 2 "error uploading part $i: $result"
|
||||
return 1
|
||||
fi
|
||||
log 5 "result: $result"
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "error uploading part $i: $(cat "$TEST_FILE_FOLDER/response.txt")"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(xmllint --xpath '//*[local-name()="ETag"]/text()' "$TEST_FILE_FOLDER/response.txt" 2>&1); then
|
||||
log 2 "error retrieving etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
parts_payload+="<Part><ETag>$etag</ETag><PartNumber>$part_number</PartNumber></Part>"
|
||||
done
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$upload_id" PARTS="$parts_payload" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh); then
|
||||
log 2 "error completing multipart upload: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "complete multipart upload returned code $result: $(cat "$TEST_FILE_FOLDER/result.txt")"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_upload_finish_wrong_etag() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'create_upload_finish_wrong_etag' requires bucket, key"
|
||||
return 1
|
||||
fi
|
||||
|
||||
etag="gibberish"
|
||||
part_number=1
|
||||
if ! create_upload_and_get_id_rest "$1" "$2"; then
|
||||
log 2 "error creating upload and getting ID"
|
||||
return 1
|
||||
fi
|
||||
parts_payload="<Part><ETag>$etag</ETag><PartNumber>$part_number</PartNumber></Part>"
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OBJECT_KEY="$2" UPLOAD_ID="$upload_id" PARTS="$parts_payload" OUTPUT_FILE="$TEST_FILE_FOLDER/result.txt" ./tests/rest_scripts/complete_multipart_upload.sh); then
|
||||
log 2 "error completing multipart upload: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "400" ]; then
|
||||
log 2 "complete multipart upload returned code $result: $(cat "$TEST_FILE_FOLDER/result.txt")"
|
||||
return 1
|
||||
fi
|
||||
if ! error=$(xmllint --xpath '//*[local-name()="Error"]' "$TEST_FILE_FOLDER/result.txt" 2>&1); then
|
||||
log 2 "error retrieving error info: $error"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$error") "InvalidPart" "Code"; then
|
||||
log 2 "code mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$error") "$upload_id" "UploadId"; then
|
||||
log 2 "upload ID mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$error") "$part_number" "PartNumber"; then
|
||||
log 2 "part number mismatch"
|
||||
return 1
|
||||
fi
|
||||
if ! check_xml_element <(echo "$error") "$etag" "ETag"; then
|
||||
log 2 "etag mismatch"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_multipart_upload_with_params() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'setup_multipart_upload_with_params' requires bucket name, file name"
|
||||
return 1
|
||||
fi
|
||||
os_name="$(uname)"
|
||||
if [[ "$os_name" == "Darwin" ]]; then
|
||||
now=$(date -u +"%Y-%m-%dT%H:%M:%S")
|
||||
later=$(date -j -v +15S -f "%Y-%m-%dT%H:%M:%S" "$now" +"%Y-%m-%dT%H:%M:%S")
|
||||
else
|
||||
now=$(date +"%Y-%m-%dT%H:%M:%S")
|
||||
later=$(date -d "$now 15 seconds" +"%Y-%m-%dT%H:%M:%S")
|
||||
fi
|
||||
|
||||
if ! create_test_files "$2"; then
|
||||
log 2 "error creating test file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! result=$(dd if=/dev/urandom of="$TEST_FILE_FOLDER/$2" bs=5M count=1 2>&1); then
|
||||
log 2 "error creating large file: $result"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! bucket_cleanup_if_bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error cleaning up bucket"
|
||||
return 1
|
||||
fi
|
||||
# in static bucket config, bucket will still exist
|
||||
if ! bucket_exists "s3api" "$BUCKET_ONE_NAME"; then
|
||||
if ! create_bucket_object_lock_enabled "$BUCKET_ONE_NAME"; then
|
||||
log 2 "error creating bucket with object lock enabled"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
log 5 "later in function: $later"
|
||||
echo "$later"
|
||||
return 0
|
||||
}
|
||||
|
||||
423
tests/util/util_multipart_before_completion.sh
Normal file
423
tests/util/util_multipart_before_completion.sh
Normal file
@@ -0,0 +1,423 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
create_upload_and_test_parts_listing() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'create_upload_and_test_parts_listing' requires test file, policy_file"
|
||||
return 1
|
||||
fi
|
||||
if ! create_multipart_upload_with_user "$BUCKET_ONE_NAME" "$1" "$USERNAME_ONE" "$PASSWORD_ONE"; then
|
||||
log 2 "error creating multipart upload with user"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if list_parts_with_user "$USERNAME_ONE" "$PASSWORD_ONE" "$BUCKET_ONE_NAME" "$1" "$upload_id"; then
|
||||
log 2 "list parts with user succeeded despite lack of policy permissions"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! setup_policy_with_single_statement "$TEST_FILE_FOLDER/$2" "2012-10-17" "Allow" "$USERNAME_ONE" "s3:ListMultipartUploadParts" "arn:aws:s3:::$BUCKET_ONE_NAME/*"; then
|
||||
log 2 "error setting up policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! put_bucket_policy "s3api" "$BUCKET_ONE_NAME" "$TEST_FILE_FOLDER/$2"; then
|
||||
log 2 "error putting policy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! list_parts_with_user "$USERNAME_ONE" "$PASSWORD_ONE" "$BUCKET_ONE_NAME" "$1" "$upload_id"; then
|
||||
log 2 "error listing parts after policy add"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
start_multipart_upload_list_check_parts() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'start_multipart_upload_and_list_parts' requires bucket, key, original source"
|
||||
return 1
|
||||
fi
|
||||
if ! start_multipart_upload_and_list_parts "$1" "$2" "$3" 4; then
|
||||
log 2 "error starting upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
declare -a parts_map
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "parts: $parts"
|
||||
for i in {0..3}; do
|
||||
if ! parse_parts_and_etags "$i"; then
|
||||
log 2 "error parsing part $i"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
if [[ ${#parts_map[@]} -eq 0 ]]; then
|
||||
log 2 "error loading multipart upload parts to check"
|
||||
return 1
|
||||
fi
|
||||
|
||||
for i in {0..3}; do
|
||||
if ! compare_parts_to_listed_parts "$i"; then
|
||||
log 2 "error comparing parts to listed parts"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
parse_parts_and_etags() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'parse_parts_and_etags' requires part id"
|
||||
return 1
|
||||
fi
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
if ! part=$(echo "$parts" | grep -v "InsecureRequestWarning" | jq -r ".[$i]" 2>&1); then
|
||||
log 2 "error getting part: $part"
|
||||
return 1
|
||||
fi
|
||||
if ! part_number=$(echo "$part" | jq ".PartNumber" 2>&1); then
|
||||
log 2 "error parsing part number: $part_number"
|
||||
return 1
|
||||
fi
|
||||
if [[ $part_number == "" ]]; then
|
||||
log 2 "error: blank part number"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(echo "$part" | jq ".ETag" 2>&1); then
|
||||
log 2 "error parsing etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
if [[ $etag == "" ]]; then
|
||||
log 2 "error: blank etag"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2004
|
||||
parts_map[$part_number]=$etag
|
||||
}
|
||||
|
||||
compare_parts_to_listed_parts() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'compare_parts_to_listed_parts' requires part number"
|
||||
return 1
|
||||
fi
|
||||
local part_number
|
||||
local etag
|
||||
# shellcheck disable=SC2154
|
||||
if ! listed_part=$(echo "$listed_parts" | grep -v "InsecureRequestWarning" | jq -r ".Parts[$i]" 2>&1); then
|
||||
log 2 "error parsing listed part: $listed_part"
|
||||
return 1
|
||||
fi
|
||||
if ! part_number=$(echo "$listed_part" | jq ".PartNumber" 2>&1); then
|
||||
log 2 "error parsing listed part number: $part_number"
|
||||
return 1
|
||||
fi
|
||||
if ! etag=$(echo "$listed_part" | jq ".ETag" 2>&1); then
|
||||
log 2 "error getting listed etag: $etag"
|
||||
return 1
|
||||
fi
|
||||
if [[ ${parts_map[$part_number]} != "$etag" ]]; then
|
||||
log 2 "error: etags don't match (part number: $part_number, etags ${parts_map[$part_number]},$etag)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# list parts of an unfinished multipart upload
|
||||
# params: bucket, key, local file location, and parts to split into before upload
|
||||
# export parts on success, return 1 for error
|
||||
start_multipart_upload_and_list_parts() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "list multipart upload parts command requires bucket, key, file, and part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! multipart_upload_before_completion "$1" "$2" "$3" "$4"; then
|
||||
log 2 "error performing pre-completion multipart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! list_parts "$1" "$2" "$upload_id"; then
|
||||
log 2 "Error listing multipart upload parts: $listed_parts"
|
||||
return 1
|
||||
fi
|
||||
export listed_parts
|
||||
}
|
||||
|
||||
create_list_check_multipart_uploads() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "list multipart uploads command requires bucket and two keys"
|
||||
return 1
|
||||
fi
|
||||
if ! create_and_list_multipart_uploads "$1" "$2" "$3"; then
|
||||
log 2 "error creating and listing multipart uploads"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "Uploads: $uploads"
|
||||
raw_uploads=$(echo "$uploads" | grep -v "InsecureRequestWarning")
|
||||
if ! key_one=$(echo "$raw_uploads" | jq -r '.Uploads[0].Key' 2>&1); then
|
||||
log 2 "error getting key one: $key_one"
|
||||
return 1
|
||||
fi
|
||||
if ! key_two=$(echo "$raw_uploads" | jq -r '.Uploads[1].Key' 2>&1); then
|
||||
log 2 "error getting key two: $key_two"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$2" != "$key_one" ]]; then
|
||||
log 2 "Key mismatch ($2, $key_one)"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$3" != "$key_two" ]]; then
|
||||
log 2 "Key mismatch ($3, $key_two)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# list unfinished multipart uploads
|
||||
# params: bucket, key one, key two
|
||||
# export current two uploads on success, return 1 for error
|
||||
create_and_list_multipart_uploads() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "list multipart uploads command requires bucket and two keys"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! create_multipart_upload "$1" "$2"; then
|
||||
log 2 "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! create_multipart_upload "$1" "$3"; then
|
||||
log 2 "error creating multpart upload two"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! list_multipart_uploads "$1"; then
|
||||
log 2 "error listing uploads"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# perform all parts of a multipart upload before completion command
|
||||
# params: bucket, key, file to split and upload, number of file parts to upload
|
||||
# return: 0 for success, 1 for failure
|
||||
multipart_upload_before_completion() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "multipart upload pre-completion command missing bucket, key, file, and/or part count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! split_file "$3" "$4"; then
|
||||
log 2 "error splitting file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! create_multipart_upload "$1" "$2"; then
|
||||
log 2 "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
# shellcheck disable=SC2154
|
||||
if ! upload_part "$1" "$2" "$upload_id" "$3" "$i"; then
|
||||
log 2 "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
export parts
|
||||
}
|
||||
|
||||
multipart_upload_before_completion_with_params() {
|
||||
if [ $# -ne 10 ]; then
|
||||
log 2 "multipart upload command missing bucket, key, file, part count, content type, metadata, hold status, lock mode, retain until date, tagging"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! result=$(split_file "$3" "$4" 2>&1); then
|
||||
log 2 "error splitting file: $result"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! create_multipart_upload_params "$1" "$2" "$5" "$6" "$7" "$8" "$9" "${10}"; then
|
||||
log 2 "error creating multpart upload"
|
||||
return 1
|
||||
fi
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
if ! upload_part "$1" "$2" "$upload_id" "$3" "$i"; then
|
||||
log 2 "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
export parts
|
||||
}
|
||||
|
||||
multipart_upload_before_completion_custom() {
|
||||
if [ $# -lt 4 ]; then
|
||||
log 2 "multipart upload custom command missing bucket, key, file, part count, and/or optional params"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! result=$(split_file "$3" "$4" 2>&1); then
|
||||
log 2 "error splitting file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 disable=SC2048
|
||||
if ! create_multipart_upload_custom "$1" "$2" ${*:5}; then
|
||||
log 2 "error creating multipart upload"
|
||||
return 1
|
||||
fi
|
||||
log 5 "upload ID: $upload_id"
|
||||
|
||||
parts="["
|
||||
for ((i = 1; i <= $4; i++)); do
|
||||
if ! upload_part "$1" "$2" "$upload_id" "$3" "$i"; then
|
||||
log 2 "error uploading part $i"
|
||||
return 1
|
||||
fi
|
||||
parts+="{\"ETag\": $etag, \"PartNumber\": $i}"
|
||||
if [[ $i -ne $4 ]]; then
|
||||
parts+=","
|
||||
fi
|
||||
done
|
||||
parts+="]"
|
||||
|
||||
export parts
|
||||
}
|
||||
|
||||
create_upload_and_get_id_rest() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "'create_upload_and_get_id_rest' requires bucket, key"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG=$COMMAND_LOG BUCKET_NAME=$1 OBJECT_KEY=$2 OUTPUT_FILE="$TEST_FILE_FOLDER/output.txt" ./tests/rest_scripts/create_multipart_upload.sh); then
|
||||
log 2 "error creating multipart upload: $result"
|
||||
return 1
|
||||
fi
|
||||
if [ "$result" != "200" ]; then
|
||||
log 2 "error: response code: $result, output: $(cat "$TEST_FILE_FOLDER/output.txt")"
|
||||
return 1
|
||||
fi
|
||||
log 5 "multipart upload create info: $(cat "$TEST_FILE_FOLDER/output.txt")"
|
||||
if ! upload_id=$(xmllint --xpath '//*[local-name()="UploadId"]/text()' "$TEST_FILE_FOLDER/output.txt" 2>&1); then
|
||||
log 2 "error getting upload ID: $upload_id"
|
||||
return 1
|
||||
fi
|
||||
log 5 "upload ID: $upload_id"
|
||||
return 0
|
||||
}
|
||||
|
||||
multipart_upload_range_too_large() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'multipart_upload_range_too_large' requires bucket name, key, file location"
|
||||
return 1
|
||||
fi
|
||||
if multipart_upload_from_bucket_range "$1" "$2" "$3" 4 "bytes=0-1000000000"; then
|
||||
log 2 "multipart upload succeeded despite overly large range"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "error: $upload_part_copy_error"
|
||||
if [[ $upload_part_copy_error != *"Range specified is not valid"* ]] && [[ $upload_part_copy_error != *"InvalidRange"* ]]; then
|
||||
log 2 "unexpected error: $upload_part_copy_error"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_and_check_upload() {
|
||||
if [ $# -lt 2 ]; then
|
||||
log 2 "'list_and_check_upload' requires bucket, key, upload ID (optional)"
|
||||
return 1
|
||||
fi
|
||||
if ! uploads=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/uploads.txt" ./tests/rest_scripts/list_multipart_uploads.sh); then
|
||||
log 2 "error listing multipart uploads before upload: $result"
|
||||
return 1
|
||||
fi
|
||||
if ! upload_count=$(xmllint --xpath 'count(//*[local-name()="Upload"])' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
|
||||
log 2 "error retrieving upload count: $upload_count"
|
||||
return 1
|
||||
fi
|
||||
if [[ (( $# == 2 ) && ( $upload_count != 0 )) ]]; then
|
||||
log 2 "upload count mismatch (expected 0, actual $upload_count)"
|
||||
return 1
|
||||
elif [[ (( $# == 3 ) && ( $upload_count != 1 )) ]]; then
|
||||
log 2 "upload count mismatch (expected 1, actual $upload_count)"
|
||||
return 1
|
||||
fi
|
||||
if [ $# -eq 2 ]; then
|
||||
return 0
|
||||
fi
|
||||
if ! key=$(xmllint --xpath '//*[local-name()="Key"]/text()' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
|
||||
log 2 "error retrieving key: $key"
|
||||
return 1
|
||||
fi
|
||||
if [ "$key" != "$2" ]; then
|
||||
log 2 "key mismatch (expected '$2', actual '$key')"
|
||||
return 1
|
||||
fi
|
||||
if ! upload_id=$(xmllint --xpath '//*[local-name()="UploadId"]/text()' "$TEST_FILE_FOLDER/uploads.txt" 2>&1); then
|
||||
log 2 "error retrieving upload ID: $upload_id"
|
||||
return 1
|
||||
fi
|
||||
if [ "$upload_id" != "$3" ]; then
|
||||
log 2 "upload ID mismatch (expected '$3', actual '$upload_id')"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
list_check_multipart_upload_key() {
|
||||
if [ $# -ne 4 ]; then
|
||||
log 2 "'list_check_multipart_upload_key' requires bucket, username, password, expected key"
|
||||
return 1
|
||||
fi
|
||||
if ! list_multipart_uploads_with_user "$1" "$2" "$3"; then
|
||||
log 2 "error listing multipart uploads with user"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "$uploads"
|
||||
if ! upload_key=$(echo "$uploads" | grep -v "InsecureRequestWarning" | jq -r ".Uploads[0].Key" 2>&1); then
|
||||
log 2 "error parsing upload key from uploads message: $upload_key"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$4" != "$upload_key" ]]; then
|
||||
log 2 "upload key doesn't match file marked as being uploaded (expected: '$4', actual: '$upload_key')"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -47,52 +47,6 @@ source ./tests/commands/upload_part_copy.sh
|
||||
source ./tests/commands/upload_part.sh
|
||||
source ./tests/util/util_users.sh
|
||||
|
||||
# params: bucket name
|
||||
# return 0 for success, 1 for error
|
||||
add_governance_bypass_policy() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'add governance bypass policy' command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$GITHUB_ACTIONS" ]]; then
|
||||
if ! create_test_file_folder; then
|
||||
log 2 "error creating test file folder"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
cat <<EOF > "$TEST_FILE_FOLDER/policy-bypass-governance.txt"
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:BypassGovernanceRetention",
|
||||
"Resource": "arn:aws:s3:::$1/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
if ! put_bucket_policy "s3api" "$1" "$TEST_FILE_FOLDER/policy-bypass-governance.txt"; then
|
||||
log 2 "error putting governance bypass policy"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
log_bucket_policy() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'log_bucket_policy' requires bucket name"
|
||||
return
|
||||
fi
|
||||
if ! get_bucket_policy "s3api" "$1"; then
|
||||
log 2 "error getting bucket policy"
|
||||
return
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "BUCKET POLICY: $bucket_policy"
|
||||
}
|
||||
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
list_and_delete_objects() {
|
||||
@@ -120,28 +74,6 @@ list_and_delete_objects() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# param: bucket name
|
||||
# return 0 for success, 1 for failure
|
||||
check_ownership_rule_and_reset_acl() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'check_ownership_rule_and_reset_acl' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! get_bucket_ownership_controls "$1"; then
|
||||
log 2 "error getting bucket ownership controls"
|
||||
return 1
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
if ! object_ownership_rule=$(echo "$bucket_ownership_controls" | jq -r ".OwnershipControls.Rules[0].ObjectOwnership" 2>&1); then
|
||||
log 2 "error getting object ownership rule: $object_ownership_rule"
|
||||
return 1
|
||||
fi
|
||||
if [[ $object_ownership_rule != "BucketOwnerEnforced" ]] && ! reset_bucket_acl "$1"; then
|
||||
log 2 "error resetting bucket ACL"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_object_lock_config() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'check_object_lock_config' requires bucket name"
|
||||
@@ -180,76 +112,6 @@ clear_object_in_bucket() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: bucket, object, possible WORM error after deletion attempt
|
||||
# return 0 for success, 1 for no WORM protection, 2 for error
|
||||
check_for_and_remove_worm_protection() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'check_for_and_remove_worm_protection' command requires bucket, object, error"
|
||||
return 2
|
||||
fi
|
||||
|
||||
if [[ $3 == *"WORM"* ]]; then
|
||||
log 5 "WORM protection found"
|
||||
if ! put_object_legal_hold "$1" "$2" "OFF"; then
|
||||
log 2 "error removing object legal hold"
|
||||
return 2
|
||||
fi
|
||||
sleep 1
|
||||
if [[ $LOG_LEVEL_INT -ge 5 ]]; then
|
||||
log_worm_protection "$1" "$2"
|
||||
fi
|
||||
if ! add_governance_bypass_policy "$1"; then
|
||||
log 2 "error adding new governance bypass policy"
|
||||
return 2
|
||||
fi
|
||||
if ! delete_object_bypass_retention "$1" "$2" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"; then
|
||||
log 2 "error deleting object after legal hold removal"
|
||||
return 2
|
||||
fi
|
||||
else
|
||||
log 5 "no WORM protection found"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: bucket name, object
|
||||
log_worm_protection() {
|
||||
if ! get_object_legal_hold "$1" "$2"; then
|
||||
log 2 "error getting object legal hold status"
|
||||
return
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "LEGAL HOLD: $legal_hold"
|
||||
if ! get_object_retention "$1" "$2"; then
|
||||
log 2 "error getting object retention"
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $get_object_retention_error != *"NoSuchObjectLockConfiguration"* ]]; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "RETENTION: $retention"
|
||||
}
|
||||
|
||||
# param: bucket name
|
||||
# return 1 for failure, 0 for success
|
||||
get_object_ownership_rule_and_update_acl() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'get_object_ownership_rule_and_update_acl' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! get_object_ownership_rule "$1"; then
|
||||
log 2 "error getting object ownership rule"
|
||||
return 1
|
||||
fi
|
||||
log 5 "object ownership rule: $object_ownership_rule"
|
||||
if [[ "$object_ownership_rule" != "BucketOwnerEnforced" ]] && ! put_bucket_canned_acl "$1" "private"; then
|
||||
log 2 "error resetting bucket ACLs"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# check if object exists on S3 via gateway
|
||||
# param: command, object path
|
||||
# return 0 for true, 1 for false, 2 for error
|
||||
@@ -365,20 +227,6 @@ check_and_put_object() {
|
||||
return 0
|
||||
}
|
||||
|
||||
remove_insecure_request_warning() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "remove insecure request warning requires input lines"
|
||||
return 1
|
||||
fi
|
||||
parsed_output=()
|
||||
while IFS= read -r line; do
|
||||
if [[ $line != *InsecureRequestWarning* ]]; then
|
||||
parsed_output+=("$line")
|
||||
fi
|
||||
done <<< "$1"
|
||||
export parsed_output
|
||||
}
|
||||
|
||||
# check if object info (etag) is accessible
|
||||
# param: path of object
|
||||
# return 0 for yes, 1 for no, 2 for error
|
||||
@@ -400,23 +248,6 @@ object_is_accessible() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# get object acl
|
||||
# param: object path
|
||||
# export acl for success, return 1 for error
|
||||
get_object_acl() {
|
||||
if [ $# -ne 2 ]; then
|
||||
log 2 "object ACL command missing object name"
|
||||
return 1
|
||||
fi
|
||||
local exit_code=0
|
||||
acl=$(aws --no-verify-ssl s3api get-object-acl --bucket "$1" --key "$2" 2>&1) || exit_code="$?"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
log 2 "Error getting object ACLs: $acl"
|
||||
return 1
|
||||
fi
|
||||
export acl
|
||||
}
|
||||
|
||||
# copy a file to/from S3
|
||||
# params: source, destination
|
||||
# return 0 for success, 1 for failure
|
||||
@@ -330,3 +330,16 @@ put_and_check_policy_rest() {
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
log_bucket_policy() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'log_bucket_policy' requires bucket name"
|
||||
return
|
||||
fi
|
||||
if ! get_bucket_policy "s3api" "$1"; then
|
||||
log 2 "error getting bucket policy"
|
||||
return
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "BUCKET POLICY: $bucket_policy"
|
||||
}
|
||||
|
||||
13
tests/util/util_public_access_block.sh
Normal file
13
tests/util/util_public_access_block.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
allow_public_access() {
|
||||
if [ $# -ne 1 ]; then
|
||||
log 2 "'allow_public_access' requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" BLOCK_PUBLIC_ACLS="FALSE" IGNORE_PUBLIC_ACLS="FALSE" RESTRICT_PUBLIC_BUCKETS="FALSE" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/put_public_access_block.sh); then
|
||||
log 2 "error getting public access block: $result"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
94
tests/util/util_retention.sh
Normal file
94
tests/util/util_retention.sh
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# params: bucket name
|
||||
# return 0 for success, 1 for error
|
||||
add_governance_bypass_policy() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
log 2 "'add governance bypass policy' command requires bucket name"
|
||||
return 1
|
||||
fi
|
||||
cat <<EOF > "$TEST_FILE_FOLDER/policy-bypass-governance.txt"
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:BypassGovernanceRetention",
|
||||
"Resource": "arn:aws:s3:::$1/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
if ! put_bucket_policy "s3api" "$1" "$TEST_FILE_FOLDER/policy-bypass-governance.txt"; then
|
||||
log 2 "error putting governance bypass policy"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: bucket, object, possible WORM error after deletion attempt
|
||||
# return 0 for success, 1 for no WORM protection, 2 for error
|
||||
check_for_and_remove_worm_protection() {
|
||||
if [ $# -ne 3 ]; then
|
||||
log 2 "'check_for_and_remove_worm_protection' command requires bucket, object, error"
|
||||
return 2
|
||||
fi
|
||||
|
||||
if [[ $3 == *"WORM"* ]]; then
|
||||
log 5 "WORM protection found"
|
||||
if ! put_object_legal_hold "$1" "$2" "OFF"; then
|
||||
log 2 "error removing object legal hold"
|
||||
return 2
|
||||
fi
|
||||
sleep 1
|
||||
if [[ $LOG_LEVEL_INT -ge 5 ]]; then
|
||||
log_worm_protection "$1" "$2"
|
||||
fi
|
||||
if ! add_governance_bypass_policy "$1"; then
|
||||
log 2 "error adding new governance bypass policy"
|
||||
return 2
|
||||
fi
|
||||
if ! delete_object_bypass_retention "$1" "$2" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"; then
|
||||
log 2 "error deleting object after legal hold removal"
|
||||
return 2
|
||||
fi
|
||||
else
|
||||
log 5 "no WORM protection found"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# params: bucket name, object
|
||||
log_worm_protection() {
|
||||
if ! get_object_legal_hold "$1" "$2"; then
|
||||
log 2 "error getting object legal hold status"
|
||||
return
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "LEGAL HOLD: $legal_hold"
|
||||
if ! get_object_retention "$1" "$2"; then
|
||||
log 2 "error getting object retention"
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $get_object_retention_error != *"NoSuchObjectLockConfiguration"* ]]; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
# shellcheck disable=SC2154
|
||||
log 5 "RETENTION: $retention"
|
||||
}
|
||||
@@ -110,10 +110,6 @@ put_user_policy_userplus() {
|
||||
log 2 "'put user policy userplus' function requires username"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$TEST_FILE_FOLDER" ]] && [[ -z "$GITHUB_ACTIONS" ]] && ! create_test_file_folder; then
|
||||
log 2 "unable to create test file folder"
|
||||
return 1
|
||||
fi
|
||||
|
||||
cat <<EOF > "$TEST_FILE_FOLDER"/user_policy_file
|
||||
{
|
||||
@@ -154,10 +150,6 @@ put_user_policy() {
|
||||
log 2 "attaching user policy requires user ID, role, bucket name"
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$TEST_FILE_FOLDER" ]] && [[ -z "$GITHUB_ACTIONS" ]] && ! create_test_file_folder; then
|
||||
log 2 "unable to create test file folder"
|
||||
return 1
|
||||
fi
|
||||
|
||||
case $2 in
|
||||
"user")
|
||||
|
||||
22
tests/util/util_xml.sh
Normal file
22
tests/util/util_xml.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
check_xml_element() {
|
||||
if [ $# -lt 3 ]; then
|
||||
log 2 "'check_xml_element' requires data source, expected value, XML tree"
|
||||
return 1
|
||||
fi
|
||||
local xpath='//'
|
||||
for tree_val in "${@:3}"; do
|
||||
xpath+='*[local-name()="'$tree_val'"]/'
|
||||
done
|
||||
xpath+='text()'
|
||||
if ! xml_val=$(xmllint --xpath "$xpath" "$1" 2>&1); then
|
||||
log 2 "error getting XML value matching $xpath: $xml_val"
|
||||
return 1
|
||||
fi
|
||||
if [ "$2" != "$xml_val" ]; then
|
||||
log 2 "XML data mismatch, expected '$2', actual '$xml_val'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
@@ -21,10 +21,6 @@ start_versity_process() {
|
||||
log 1 "start versity process function requires number"
|
||||
exit 1
|
||||
fi
|
||||
if ! create_test_file_folder; then
|
||||
log 1 "error creating test log folder"
|
||||
exit 1
|
||||
fi
|
||||
build_run_and_log_command
|
||||
# shellcheck disable=SC2181
|
||||
if [[ $? -ne 0 ]]; then
|
||||
|
||||
Reference in New Issue
Block a user