Compare commits

..

1 Commits

234 changed files with 6051 additions and 21868 deletions

23
.github/workflows/betteralign.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: betteralign
on: pull_request
jobs:
build:
name: Check
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "stable"
id: go
- name: Install betteralign
run: go install github.com/dkorunic/betteralign/cmd/betteralign@latest
- name: Run betteralign
run: betteralign -test_files ./...

View File

@@ -14,7 +14,6 @@ jobs:
run: |
cp tests/.env.docker.default tests/.env.docker
cp tests/.secrets.default tests/.secrets
# see https://github.com/versity/versitygw/issues/1034
docker build \
--build-arg="GO_LIBRARY=go1.23.1.linux-amd64.tar.gz" \
--build-arg="AWS_CLI=awscli-exe-linux-x86_64.zip" \

View File

@@ -73,17 +73,16 @@ jobs:
RUN_SET: "s3api-user"
RECREATE_BUCKETS: "false"
BACKEND: "posix"
# TODO fix/debug s3 gateway
#- set: "s3api, s3, multipart|object, non-static, folder IAM"
# IAM_TYPE: folder
# RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
# RECREATE_BUCKETS: "true"
# BACKEND: "s3"
#- set: "s3api, s3, policy|user, non-static, folder IAM"
# IAM_TYPE: folder
# RUN_SET: "s3api-policy,s3api-user"
# RECREATE_BUCKETS: "true"
# BACKEND: "s3"
- set: "s3api, s3, multipart|object, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-bucket,s3api-object,s3api-multipart"
RECREATE_BUCKETS: "true"
BACKEND: "s3"
- set: "s3api, s3, policy|user, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3api-policy,s3api-user"
RECREATE_BUCKETS: "true"
BACKEND: "s3"
- set: "s3cmd, posix, file count, non-static, folder IAM"
IAM_TYPE: folder
RUN_SET: "s3cmd-file-count"
@@ -133,14 +132,6 @@ jobs:
run: |
sudo apt-get install libxml2-utils
# see https://github.com/versity/versitygw/issues/1034
- name: Install AWS cli
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.22.35.zip" -o "awscliv2.zip"
unzip -o awscliv2.zip
./aws/install -i ${{ github.workspace }}/aws-cli -b ${{ github.workspace }}/bin
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Build and run
env:
IAM_TYPE: ${{ matrix.IAM_TYPE }}
@@ -172,7 +163,6 @@ jobs:
VERSIONING_DIR: ${{ github.workspace }}/versioning
COMMAND_LOG: command.log
TIME_LOG: time.log
PYTHON_ENV_FOLDER: ${{ github.workspace }}/env
run: |
make testbin
export AWS_ACCESS_KEY_ID=ABCDEFGHIJKLMNOPQRST
@@ -180,7 +170,6 @@ jobs:
export AWS_REGION=us-east-1
export AWS_ACCESS_KEY_ID_TWO=user
export AWS_SECRET_ACCESS_KEY_TWO=pass
export AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile versity
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile versity
aws configure set aws_region $AWS_REGION --profile versity

View File

@@ -40,7 +40,7 @@ Versity Gateway, a simple to use tool for seamless inline translation between AW
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage, Versitys open source ScoutFS filesystem, Azure Blob Storage, and other S3 servers.
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage and Versitys open source ScoutFS filesystem.
The gateway is completely stateless. Multiple Versity Gateway instances may be deployed in a cluster to increase aggregate throughput. The Versity Gateways stateless architecture allows any request to be serviced by any gateway thereby distributing workloads and enhancing performance. Load balancers may be used to evenly distribute requests across the cluster of gateways for optimal performance.

View File

@@ -17,7 +17,6 @@ package auth
import (
"context"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"strings"
@@ -34,151 +33,46 @@ type ACL struct {
}
type Grantee struct {
Permission Permission
Permission types.Permission
Access string
Type types.Type
}
type GetBucketAclOutput struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy"`
Owner *types.Owner
AccessControlList AccessControlList
}
type PutBucketAclInput struct {
Bucket *string
ACL types.BucketCannedACL
AccessControlPolicy *AccessControlPolicy
GrantFullControl *string
GrantRead *string
GrantReadACP *string
GrantWrite *string
GrantWriteACP *string
ACL types.BucketCannedACL
}
type AccessControlPolicy struct {
AccessControlList AccessControlList `xml:"AccessControlList"`
Owner *types.Owner
}
func (acp *AccessControlPolicy) Validate() error {
if !acp.AccessControlList.isValid() {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
// The Owner can't be nil
if acp.Owner == nil {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
// The Owner ID can't be empty
if acp.Owner.ID == nil || *acp.Owner.ID == "" {
return s3err.GetAPIError(s3err.ErrMalformedACL)
}
return nil
AccessControlList AccessControlList `xml:"AccessControlList"`
}
type AccessControlList struct {
Grants []Grant `xml:"Grant"`
}
// Validates the AccessControlList
func (acl *AccessControlList) isValid() bool {
for _, el := range acl.Grants {
if !el.isValid() {
return false
}
}
return true
}
type Permission string
const (
PermissionFullControl Permission = "FULL_CONTROL"
PermissionWrite Permission = "WRITE"
PermissionWriteAcp Permission = "WRITE_ACP"
PermissionRead Permission = "READ"
PermissionReadAcp Permission = "READ_ACP"
)
// Check if the permission is valid
func (p Permission) isValid() bool {
return p == PermissionFullControl ||
p == PermissionRead ||
p == PermissionReadAcp ||
p == PermissionWrite ||
p == PermissionWriteAcp
}
type Grant struct {
Grantee *Grt `xml:"Grantee"`
Permission Permission `xml:"Permission"`
}
// Checks if Grant is valid
func (g *Grant) isValid() bool {
return g.Permission.isValid() && g.Grantee.isValid()
Grantee *Grt
Permission types.Permission
}
type Grt struct {
XMLNS string `xml:"xmlns:xsi,attr"`
Type types.Type `xml:"xsi:type,attr"`
ID string `xml:"ID"`
}
// Custom Unmarshalling for Grt to parse xsi:type properly
func (g *Grt) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Iterate through the XML tokens to process the attributes
for _, attr := range start.Attr {
// Check if the attribute is xsi:type and belongs to the xsi namespace
if attr.Name.Space == "http://www.w3.org/2001/XMLSchema-instance" && attr.Name.Local == "type" {
g.Type = types.Type(attr.Value)
}
// Handle xmlns:xsi
if attr.Name.Local == "xmlns:xsi" {
g.XMLNS = attr.Value
}
}
// Decode the inner XML elements like ID
for {
t, err := d.Token()
if err != nil {
return err
}
switch se := t.(type) {
case xml.StartElement:
if se.Name.Local == "ID" {
if err := d.DecodeElement(&g.ID, &se); err != nil {
return err
}
}
case xml.EndElement:
if se.Name.Local == start.Name.Local {
return nil
}
}
}
}
// Validates Grt
func (g *Grt) isValid() bool {
// Validate the Type
// Only these 2 types are supported in the gateway
if g.Type != types.TypeCanonicalUser && g.Type != types.TypeGroup {
return false
}
// The ID prop shouldn't be empty
if g.ID == "" {
return false
}
return true
XMLNS string `xml:"xmlns:xsi,attr"`
XMLXSI types.Type `xml:"xsi:type,attr"`
Type types.Type `xml:"Type"`
ID string `xml:"ID"`
}
func ParseACL(data []byte) (ACL, error) {
@@ -193,32 +87,22 @@ func ParseACL(data []byte) (ACL, error) {
return acl, nil
}
func ParseACLOutput(data []byte, owner string) (GetBucketAclOutput, error) {
grants := []Grant{}
if len(data) == 0 {
return GetBucketAclOutput{
Owner: &types.Owner{
ID: &owner,
},
AccessControlList: AccessControlList{
Grants: grants,
},
}, nil
}
func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
var acl ACL
if err := json.Unmarshal(data, &acl); err != nil {
return GetBucketAclOutput{}, fmt.Errorf("parse acl: %w", err)
}
grants := []Grant{}
for _, elem := range acl.Grantees {
acs := elem.Access
grants = append(grants, Grant{
Grantee: &Grt{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
ID: acs,
Type: elem.Type,
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: elem.Type,
ID: acs,
Type: elem.Type,
},
Permission: elem.Permission,
})
@@ -241,7 +125,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
defaultGrantees := []Grantee{
{
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Access: acl.Owner,
Type: types.TypeCanonicalUser,
},
@@ -252,19 +136,19 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
switch input.ACL {
case types.BucketCannedACLPublicRead:
defaultGrantees = append(defaultGrantees, Grantee{
Permission: PermissionRead,
Permission: types.PermissionRead,
Access: "all-users",
Type: types.TypeGroup,
})
case types.BucketCannedACLPublicReadWrite:
defaultGrantees = append(defaultGrantees, []Grantee{
{
Permission: PermissionRead,
Permission: types.PermissionRead,
Access: "all-users",
Type: types.TypeGroup,
},
{
Permission: PermissionWrite,
Permission: types.PermissionWrite,
Access: "all-users",
Type: types.TypeGroup,
},
@@ -281,7 +165,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range fullControlList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Type: types.TypeCanonicalUser,
})
}
@@ -291,7 +175,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range readList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionRead,
Permission: types.PermissionRead,
Type: types.TypeCanonicalUser,
})
}
@@ -301,7 +185,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range readACPList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionReadAcp,
Permission: types.PermissionReadAcp,
Type: types.TypeCanonicalUser,
})
}
@@ -311,7 +195,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range writeList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionWrite,
Permission: types.PermissionWrite,
Type: types.TypeCanonicalUser,
})
}
@@ -321,7 +205,7 @@ func UpdateACL(input *PutBucketAclInput, acl ACL, iam IAMService, isAdmin bool)
for _, str := range writeACPList {
defaultGrantees = append(defaultGrantees, Grantee{
Access: str,
Permission: PermissionWriteAcp,
Permission: types.PermissionWriteAcp,
Type: types.TypeCanonicalUser,
})
}
@@ -378,8 +262,8 @@ func CheckIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
result = append(result, acc)
continue
}
if errors.Is(err, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)) {
return nil, err
if err == ErrNotSupported {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
return nil, fmt.Errorf("check user account: %w", err)
}
@@ -402,7 +286,7 @@ func splitUnique(s, divider string) []string {
return result
}
func verifyACL(acl ACL, access string, permission Permission) error {
func verifyACL(acl ACL, access string, permission types.Permission) error {
grantee := Grantee{
Access: access,
Permission: permission,
@@ -410,7 +294,7 @@ func verifyACL(acl ACL, access string, permission Permission) error {
}
granteeFullCtrl := Grantee{
Access: access,
Permission: PermissionFullControl,
Permission: types.PermissionFullControl,
Type: types.TypeCanonicalUser,
}
granteeAllUsers := Grantee{
@@ -468,19 +352,19 @@ func IsAdminOrOwner(acct Account, isRoot bool, acl ACL) error {
}
type AccessOptions struct {
Acl ACL
AclPermission Permission
IsRoot bool
Acc Account
AclPermission types.Permission
Bucket string
Object string
Action Action
Acl ACL
Acc Account
IsRoot bool
Readonly bool
}
func VerifyAccess(ctx context.Context, be backend.Backend, opts AccessOptions) error {
if opts.Readonly {
if opts.AclPermission == PermissionWrite || opts.AclPermission == PermissionWriteAcp {
if opts.AclPermission == types.PermissionWrite || opts.AclPermission == types.PermissionWriteAcp {
return s3err.GetAPIError(s3err.ErrAccessDenied)
}
}
@@ -538,7 +422,7 @@ func VerifyObjectCopyAccess(ctx context.Context, be backend.Backend, copySource
if err := VerifyAccess(ctx, be, AccessOptions{
Acl: srcBucketAcl,
AclPermission: PermissionRead,
AclPermission: types.PermissionRead,
IsRoot: opts.IsRoot,
Acc: opts.Acc,
Bucket: srcBucket,

View File

@@ -22,46 +22,20 @@ import (
"github.com/versity/versitygw/s3err"
)
type policyErr string
func (p policyErr) Error() string {
return string(p)
}
const (
policyErrResourceMismatch = policyErr("Action does not apply to any resource(s) in statement")
policyErrInvalidResource = policyErr("Policy has invalid resource")
policyErrInvalidPrincipal = policyErr("Invalid principal in policy")
policyErrInvalidAction = policyErr("Policy has invalid action")
policyErrInvalidPolicy = policyErr("This policy contains invalid Json")
policyErrInvalidFirstChar = policyErr("Policies must be valid JSON and the first byte must be '{'")
policyErrEmptyStatement = policyErr("Could not parse the policy: Statement is empty!")
policyErrMissingStatmentField = policyErr("Missing required field Statement")
var (
errResourceMismatch = errors.New("Action does not apply to any resource(s) in statement")
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
errInvalidResource = errors.New("Policy has invalid resource")
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
errInvalidPrincipal = errors.New("Invalid principal in policy")
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
errInvalidAction = errors.New("Policy has invalid action")
)
type BucketPolicy struct {
Statement []BucketPolicyItem `json:"Statement"`
}
func (bp *BucketPolicy) UnmarshalJSON(data []byte) error {
var tmp struct {
Statement *[]BucketPolicyItem `json:"Statement"`
}
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
// If Statement is nil (not present in JSON), return an error
if tmp.Statement == nil {
return policyErrMissingStatmentField
}
// Assign the parsed value to the actual struct
bp.Statement = *tmp.Statement
return nil
}
func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
for _, statement := range bp.Statement {
err := statement.Validate(bucket, iam)
@@ -74,26 +48,25 @@ func (bp *BucketPolicy) Validate(bucket string, iam IAMService) error {
}
func (bp *BucketPolicy) isAllowed(principal string, action Action, resource string) bool {
var isAllowed bool
for _, statement := range bp.Statement {
if statement.findMatch(principal, action, resource) {
switch statement.Effect {
case BucketPolicyAccessTypeAllow:
isAllowed = true
return true
case BucketPolicyAccessTypeDeny:
return false
}
}
}
return isAllowed
return false
}
type BucketPolicyItem struct {
Effect BucketPolicyAccessType `json:"Effect"`
Principals Principals `json:"Principal"`
Actions Actions `json:"Action"`
Resources Resources `json:"Resource"`
Effect BucketPolicyAccessType `json:"Effect"`
}
func (bpi *BucketPolicyItem) Validate(bucket string, iam IAMService) error {
@@ -116,10 +89,10 @@ func (bpi *BucketPolicyItem) Validate(bucket string, iam IAMService) error {
break
}
if *isObjectAction && !containsObjectAction {
return policyErrResourceMismatch
return errResourceMismatch
}
if !*isObjectAction && !containsBucketAction {
return policyErrResourceMismatch
return errResourceMismatch
}
}
@@ -143,20 +116,14 @@ func getMalformedPolicyError(err error) error {
}
func ValidatePolicyDocument(policyBin []byte, bucket string, iam IAMService) error {
if len(policyBin) == 0 || policyBin[0] != '{' {
return getMalformedPolicyError(policyErrInvalidFirstChar)
}
var policy BucketPolicy
if err := json.Unmarshal(policyBin, &policy); err != nil {
var pe policyErr
if errors.As(err, &pe) {
return getMalformedPolicyError(err)
}
return getMalformedPolicyError(policyErrInvalidPolicy)
return getMalformedPolicyError(err)
}
if len(policy.Statement) == 0 {
return getMalformedPolicyError(policyErrEmptyStatement)
//lint:ignore ST1005 Reason: This error message is intended for end-user clarity and follows their expectations
return getMalformedPolicyError(errors.New("Could not parse the policy: Statement is empty!"))
}
if err := policy.Validate(bucket, iam); err != nil {

View File

@@ -58,8 +58,6 @@ const (
BypassGovernanceRetentionAction Action = "s3:BypassGovernanceRetention"
PutBucketOwnershipControlsAction Action = "s3:PutBucketOwnershipControls"
GetBucketOwnershipControlsAction Action = "s3:GetBucketOwnershipControls"
PutBucketCorsAction Action = "s3:PutBucketCORS"
GetBucketCorsAction Action = "s3:GetBucketCORS"
AllActions Action = "s3:*"
)
@@ -99,8 +97,6 @@ var supportedActionList = map[Action]struct{}{
BypassGovernanceRetentionAction: {},
PutBucketOwnershipControlsAction: {},
GetBucketOwnershipControlsAction: {},
PutBucketCorsAction: {},
GetBucketCorsAction: {},
AllActions: {},
}
@@ -129,7 +125,7 @@ var supportedObjectActionList = map[Action]struct{}{
// Validates Action: it should either wildcard match with supported actions list or be in it
func (a Action) IsValid() error {
if !strings.HasPrefix(string(a), "s3:") {
return policyErrInvalidAction
return errInvalidAction
}
if a == AllActions {
@@ -144,12 +140,12 @@ func (a Action) IsValid() error {
}
}
return policyErrInvalidAction
return errInvalidAction
}
_, found := supportedActionList[a]
if !found {
return policyErrInvalidAction
return errInvalidAction
}
return nil
}
@@ -195,7 +191,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
var err error
if err = json.Unmarshal(data, &ss); err == nil {
if len(ss) == 0 {
return policyErrInvalidAction
return errInvalidAction
}
*a = make(Actions)
for _, s := range ss {
@@ -208,7 +204,7 @@ func (a *Actions) UnmarshalJSON(data []byte) error {
var s string
if err = json.Unmarshal(data, &s); err == nil {
if s == "" {
return policyErrInvalidAction
return errInvalidAction
}
*a = make(Actions)
err = a.Add(s)

View File

@@ -36,7 +36,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
if err = json.Unmarshal(data, &ss); err == nil {
if len(ss) == 0 {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
for _, s := range ss {
@@ -45,7 +45,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
return nil
} else if err = json.Unmarshal(data, &s); err == nil {
if s == "" {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
p.Add(s)
@@ -53,7 +53,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
return nil
} else if err = json.Unmarshal(data, &k); err == nil {
if k.AWS == "" {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
p.Add(k.AWS)
@@ -65,7 +65,7 @@ func (p *Principals) UnmarshalJSON(data []byte) error {
}
if err = json.Unmarshal(data, &sk); err == nil {
if len(sk.AWS) == 0 {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
*p = make(Principals)
for _, s := range sk.AWS {
@@ -97,7 +97,7 @@ func (p Principals) Validate(iam IAMService) error {
if len(p) == 1 {
return nil
}
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
accs, err := CheckIfAccountsExist(p.ToSlice(), iam)
@@ -105,7 +105,7 @@ func (p Principals) Validate(iam IAMService) error {
return err
}
if len(accs) > 0 {
return policyErrInvalidPrincipal
return errInvalidPrincipal
}
return nil

View File

@@ -29,7 +29,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
var err error
if err = json.Unmarshal(data, &ss); err == nil {
if len(ss) == 0 {
return policyErrInvalidResource
return errInvalidResource
}
*r = make(Resources)
for _, s := range ss {
@@ -42,7 +42,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
var s string
if err = json.Unmarshal(data, &s); err == nil {
if s == "" {
return policyErrInvalidResource
return errInvalidResource
}
*r = make(Resources)
err = r.Add(s)
@@ -59,7 +59,7 @@ func (r *Resources) UnmarshalJSON(data []byte) error {
func (r Resources) Add(rc string) error {
ok, pattern := isValidResource(rc)
if !ok {
return policyErrInvalidResource
return errInvalidResource
}
r[pattern] = struct{}{}
@@ -93,7 +93,7 @@ func (r Resources) ContainsBucketPattern() bool {
func (r Resources) Validate(bucket string) error {
for resource := range r {
if !strings.HasPrefix(resource, bucket) {
return policyErrInvalidResource
return errInvalidResource
}
}
@@ -102,45 +102,21 @@ func (r Resources) Validate(bucket string) error {
func (r Resources) FindMatch(resource string) bool {
for res := range r {
if r.Match(res, resource) {
return true
if strings.HasSuffix(res, "*") {
pattern := strings.TrimSuffix(res, "*")
if strings.HasPrefix(resource, pattern) {
return true
}
} else {
if res == resource {
return true
}
}
}
return false
}
// Match checks if the input string matches the given pattern with wildcards (`*`, `?`).
// - `?` matches exactly one occurrence of any character.
// - `*` matches arbitrary many (including zero) occurrences of any character.
func (r Resources) Match(pattern, input string) bool {
pIdx, sIdx := 0, 0
starIdx, matchIdx := -1, 0
for sIdx < len(input) {
if pIdx < len(pattern) && (pattern[pIdx] == '?' || pattern[pIdx] == input[sIdx]) {
sIdx++
pIdx++
} else if pIdx < len(pattern) && pattern[pIdx] == '*' {
starIdx = pIdx
matchIdx = sIdx
pIdx++
} else if starIdx != -1 {
pIdx = starIdx + 1
matchIdx++
sIdx = matchIdx
} else {
return false
}
}
for pIdx < len(pattern) && pattern[pIdx] == '*' {
pIdx++
}
return pIdx == len(pattern)
}
// Checks the resource to have arn prefix and not starting with /
func isValidResource(rc string) (isValid bool, pattern string) {
if !strings.HasPrefix(rc, ResourceArnPrefix) {

View File

@@ -1,182 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"encoding/json"
"testing"
)
func TestUnmarshalJSON(t *testing.T) {
var r Resources
cases := []struct {
input string
expected int
wantErr bool
}{
{`"arn:aws:s3:::my-bucket/*"`, 1, false},
{`["arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket"]`, 2, false},
{`""`, 0, true},
{`[]`, 0, true},
{`["invalid-bucket"]`, 0, true},
}
for _, tc := range cases {
r = Resources{}
err := json.Unmarshal([]byte(tc.input), &r)
if (err != nil) != tc.wantErr {
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
}
if len(r) != tc.expected {
t.Errorf("Expected %d resources, got %d", tc.expected, len(r))
}
}
}
func TestAdd(t *testing.T) {
r := Resources{}
cases := []struct {
input string
wantErr bool
}{
{"arn:aws:s3:::valid-bucket/*", false},
{"arn:aws:s3:::valid-bucket/object", false},
{"invalid-bucket/*", true},
{"/invalid-start", true},
}
for _, tc := range cases {
err := r.Add(tc.input)
if (err != nil) != tc.wantErr {
t.Errorf("Unexpected error status for input %s: %v", tc.input, err)
}
}
}
func TestContainsObjectPattern(t *testing.T) {
cases := []struct {
resources []string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket/my-object"}, true},
{[]string{"arn:aws:s3:::my-bucket/*"}, true},
{[]string{"arn:aws:s3:::my-bucket"}, false},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.ContainsObjectPattern() != tc.expected {
t.Errorf("Expected object pattern to be %v for %v", tc.expected, tc.resources)
}
}
}
func TestContainsBucketPattern(t *testing.T) {
cases := []struct {
resources []string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket"}, true},
{[]string{"arn:aws:s3:::my-bucket/*"}, false},
{[]string{"arn:aws:s3:::my-bucket/object"}, false},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.ContainsBucketPattern() != tc.expected {
t.Errorf("Expected bucket pattern to be %v for %v", tc.expected, tc.resources)
}
}
}
func TestValidate(t *testing.T) {
cases := []struct {
resources []string
bucket string
expected bool
}{
{[]string{"arn:aws:s3:::valid-bucket/*"}, "valid-bucket", true},
{[]string{"arn:aws:s3:::wrong-bucket/*"}, "valid-bucket", false},
{[]string{"arn:aws:s3:::valid-bucket/*", "arn:aws:s3:::valid-bucket/object/*"}, "valid-bucket", true},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if (r.Validate(tc.bucket) == nil) != tc.expected {
t.Errorf("Expected validation to be %v for bucket %s", tc.expected, tc.bucket)
}
}
}
func TestFindMatch(t *testing.T) {
cases := []struct {
resources []string
input string
expected bool
}{
{[]string{"arn:aws:s3:::my-bucket/*"}, "my-bucket/my-object", true},
{[]string{"arn:aws:s3:::my-bucket/object"}, "other-bucket/my-object", false},
{[]string{"arn:aws:s3:::my-bucket/object"}, "my-bucket/object", true},
{[]string{"arn:aws:s3:::my-bucket/*", "arn:aws:s3:::other-bucket/*"}, "other-bucket/something", true},
}
for _, tc := range cases {
r := Resources{}
for _, res := range tc.resources {
r.Add(res)
}
if r.FindMatch(tc.input) != tc.expected {
t.Errorf("Expected FindMatch to be %v for input %s", tc.expected, tc.input)
}
}
}
func TestMatch(t *testing.T) {
r := Resources{}
cases := []struct {
pattern string
input string
expected bool
}{
{"my-bucket/*", "my-bucket/object", true},
{"my-bucket/?bject", "my-bucket/object", true},
{"my-bucket/*", "other-bucket/object", false},
{"*", "any-bucket/object", true},
{"my-bucket/*", "my-bucket/subdir/object", true},
{"my-bucket/*", "other-bucket", false},
{"my-bucket/*/*", "my-bucket/hello", false},
{"my-bucket/*/*", "my-bucket/hello/world", true},
{"foo/???/bar", "foo/qux/bar", true},
{"foo/???/bar", "foo/quxx/bar", false},
{"foo/???/bar/*/?", "foo/qux/bar/hello/g", true},
{"foo/???/bar/*/?", "foo/qux/bar/hello/smth", false},
}
for _, tc := range cases {
if r.Match(tc.pattern, tc.input) != tc.expected {
t.Errorf("Match(%s, %s) failed, expected %v", tc.pattern, tc.input, tc.expected)
}
}
}

View File

@@ -93,7 +93,6 @@ var (
)
type Opts struct {
RootAccount Account
Dir string
LDAPServerURL string
LDAPBindDN string
@@ -119,17 +118,12 @@ type Opts struct {
S3Region string
S3Bucket string
S3Endpoint string
RootAccount Account
CacheTTL int
CachePrune int
S3DisableSSlVerfiy bool
S3Debug bool
CacheDisable bool
CacheTTL int
CachePrune int
IpaHost string
IpaVaultName string
IpaUser string
IpaPassword string
IpaInsecure bool
IpaDebug bool
}
func New(o *Opts) (IAMService, error) {
@@ -155,13 +149,10 @@ func New(o *Opts) (IAMService, error) {
o.VaultMountPath, o.VaultRootToken, o.VaultRoleId, o.VaultRoleSecret,
o.VaultServerCert, o.VaultClientCert, o.VaultClientCertKey)
fmt.Printf("initializing Vault IAM with %q\n", o.VaultEndpointURL)
case o.IpaHost != "":
svc, err = NewIpaIAMService(o.RootAccount, o.IpaHost, o.IpaVaultName, o.IpaUser, o.IpaPassword, o.IpaInsecure, o.IpaDebug)
fmt.Printf("initializing IPA IAM with %q\n", o.IpaHost)
default:
// if no iam options selected, default to the single user mode
fmt.Println("No IAM service configured, enabling single account mode")
return NewIAMServiceSingle(o.RootAccount), nil
return IAMServiceSingle{}, nil
}
if err != nil {

View File

@@ -36,14 +36,14 @@ type IAMCache struct {
var _ IAMService = &IAMCache{}
type item struct {
value Account
exp time.Time
value Account
}
type icache struct {
sync.RWMutex
expire time.Duration
items map[string]item
expire time.Duration
sync.RWMutex
}
func (i *icache) set(k string, v Account) {

View File

@@ -33,6 +33,8 @@ const (
// IAMServiceInternal manages the internal IAM service
type IAMServiceInternal struct {
dir string
rootAcc Account
// This mutex will help with racing updates to the IAM data
// from multiple requests to this gateway instance, but
// will not help with racing updates to multiple load balanced
@@ -40,8 +42,6 @@ type IAMServiceInternal struct {
// IAM service. All account updates should be sent to a single
// gateway instance if possible.
sync.RWMutex
dir string
rootAcc Account
}
// UpdateAcctFunc accepts the current data and returns the new data to be stored

View File

@@ -1,446 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
"strings"
)
const IpaVersion = "2.254"
type IpaIAMService struct {
client http.Client
id int
version string
host string
vaultName string
username string
password string
kraTransportKey *rsa.PublicKey
debug bool
rootAcc Account
}
var _ IAMService = &IpaIAMService{}
func NewIpaIAMService(rootAcc Account, host, vaultName, username, password string, isInsecure, debug bool) (*IpaIAMService, error) {
ipa := IpaIAMService{
id: 0,
version: IpaVersion,
host: host,
vaultName: vaultName,
username: username,
password: password,
debug: debug,
rootAcc: rootAcc,
}
jar, err := cookiejar.New(nil)
if err != nil {
// this should never happen
return nil, fmt.Errorf("cookie jar creation: %w", err)
}
mTLSConfig := &tls.Config{InsecureSkipVerify: isInsecure}
tr := &http.Transport{
TLSClientConfig: mTLSConfig,
}
ipa.client = http.Client{Jar: jar, Transport: tr}
err = ipa.login()
if err != nil {
return nil, fmt.Errorf("ipa login failed: %w", err)
}
req, err := ipa.newRequest("vaultconfig_show/1", []string{}, map[string]any{"all": true})
if err != nil {
return nil, fmt.Errorf("ipa vaultconfig_show: %w", err)
}
vaultConfig := struct {
Kra_Server_Server []string
Transport_Cert Base64EncodedWrapped
Wrapping_default_algorithm string
Wrapping_supported_algorithms []string
}{}
err = ipa.rpc(req, &vaultConfig)
if err != nil {
return nil, fmt.Errorf("ipa vault config: %w", err)
}
cert, err := x509.ParseCertificate(vaultConfig.Transport_Cert)
if err != nil {
return nil, fmt.Errorf("ipa cannot parse vault certificate: %w", err)
}
ipa.kraTransportKey = cert.PublicKey.(*rsa.PublicKey)
isSupported := false
for _, algo := range vaultConfig.Wrapping_supported_algorithms {
if algo == "aes-128-cbc" {
isSupported = true
break
}
}
if !isSupported {
return nil,
fmt.Errorf("IPA vault does not support aes-128-cbc. Only %v supported",
vaultConfig.Wrapping_supported_algorithms)
}
return &ipa, nil
}
func (ipa *IpaIAMService) CreateAccount(account Account) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) GetUserAccount(access string) (Account, error) {
if access == ipa.rootAcc.Access {
return ipa.rootAcc, nil
}
req, err := ipa.newRequest("user_show/1", []string{access}, map[string]any{})
if err != nil {
return Account{}, fmt.Errorf("ipa user_show: %w", err)
}
userResult := struct {
Gidnumber []string
Uidnumber []string
}{}
err = ipa.rpc(req, &userResult)
if err != nil {
return Account{}, err
}
uid, err := strconv.Atoi(userResult.Uidnumber[0])
if err != nil {
return Account{}, fmt.Errorf("ipa uid invalid: %w", err)
}
gid, err := strconv.Atoi(userResult.Gidnumber[0])
if err != nil {
return Account{}, fmt.Errorf("ipa gid invalid: %w", err)
}
account := Account{
Access: access,
Role: RoleUser,
UserID: uid,
GroupID: gid,
}
session_key := make([]byte, 16)
_, err = rand.Read(session_key)
if err != nil {
return account, fmt.Errorf("ipa cannot generate session key: %w", err)
}
encryptedKey, err := rsa.EncryptPKCS1v15(rand.Reader, ipa.kraTransportKey, session_key)
if err != nil {
return account, fmt.Errorf("ipa vault secret retrieval: %w", err)
}
req, err = ipa.newRequest("vault_retrieve_internal/1", []string{ipa.vaultName},
map[string]any{"username": access,
"session_key": Base64EncodedWrapped(encryptedKey),
"wrapping_algo": "aes-128-cbc"})
if err != nil {
return Account{}, fmt.Errorf("ipa vault_retrieve_internal: %w", err)
}
data := struct {
Vault_data Base64EncodedWrapped
Nonce Base64EncodedWrapped
}{}
err = ipa.rpc(req, &data)
if err != nil {
return account, err
}
aes, err := aes.NewCipher(session_key)
if err != nil {
return account, fmt.Errorf("ipa cannot create AES cipher: %w", err)
}
cbc := cipher.NewCBCDecrypter(aes, data.Nonce)
cbc.CryptBlocks(data.Vault_data, data.Vault_data)
secretUnpaddedJson, err := pkcs7Unpad(data.Vault_data, 16)
if err != nil {
return account, fmt.Errorf("ipa cannot unpad decrypted result: %w", err)
}
secret := struct {
Data Base64Encoded
}{}
json.Unmarshal(secretUnpaddedJson, &secret)
account.Secret = string(secret.Data)
return account, nil
}
func (ipa *IpaIAMService) UpdateUserAccount(access string, props MutableProps) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) DeleteUserAccount(access string) error {
return fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) ListUserAccounts() ([]Account, error) {
return []Account{}, fmt.Errorf("not implemented")
}
func (ipa *IpaIAMService) Shutdown() error {
return nil
}
// Implementation
func (ipa *IpaIAMService) login() error {
form := url.Values{}
form.Set("user", ipa.username)
form.Set("password", ipa.password)
req, err := http.NewRequest(
"POST",
fmt.Sprintf("%s/ipa/session/login_password", ipa.host),
strings.NewReader(form.Encode()))
if err != nil {
return err
}
req.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := ipa.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == 401 {
return errors.New("cannot login to FreeIPA: invalid credentials")
}
if resp.StatusCode != 200 {
return fmt.Errorf("cannot login to FreeIPA: status code %d", resp.StatusCode)
}
return nil
}
type rpcRequest = string
type rpcResponse struct {
Result json.RawMessage
Principal string
Id int
Version string
}
func (p rpcResponse) String() string {
return string(p.Result)
}
var errRpc = errors.New("IPA RPC error")
func (ipa *IpaIAMService) rpc(req rpcRequest, value any) error {
err := ipa.login()
if err != nil {
return err
}
res, err := ipa.rpcInternal(req)
if err != nil {
return err
}
return json.Unmarshal(res.Result, value)
}
func (ipa *IpaIAMService) rpcInternal(req rpcRequest) (rpcResponse, error) {
httpReq, err := http.NewRequest("POST",
fmt.Sprintf("%s/ipa/session/json", ipa.host),
strings.NewReader(req))
if err != nil {
return rpcResponse{}, err
}
ipa.log(fmt.Sprintf("%v", req))
httpReq.Header.Set("referer", fmt.Sprintf("%s/ipa", ipa.host))
httpReq.Header.Set("Content-Type", "application/json")
httpResp, err := ipa.client.Do(httpReq)
if err != nil {
return rpcResponse{}, err
}
bytes, err := io.ReadAll(httpResp.Body)
ipa.log(string(bytes))
if err != nil {
return rpcResponse{}, err
}
result := struct {
Result struct {
Json json.RawMessage `json:"result"`
Value string `json:"value"`
Summary any `json:"summary"`
} `json:"result"`
Error json.RawMessage `json:"error"`
Id int `json:"id"`
Principal string `json:"principal"`
Version string `json:"version"`
}{}
err = json.Unmarshal(bytes, &result)
if err != nil {
return rpcResponse{}, err
}
if string(result.Error) != "null" {
return rpcResponse{}, fmt.Errorf("%s: %w", string(result.Error), errRpc)
}
return rpcResponse{
Result: result.Result.Json,
Principal: result.Principal,
Id: result.Id,
Version: result.Version,
}, nil
}
func (ipa *IpaIAMService) newRequest(method string, args []string, dict map[string]any) (rpcRequest, error) {
id := ipa.id
ipa.id++
dict["version"] = ipa.version
jmethod, errMethod := json.Marshal(method)
jargs, errArgs := json.Marshal(args)
jdict, errDict := json.Marshal(dict)
err := errors.Join(errMethod, errArgs, errDict)
if err != nil {
return "", fmt.Errorf("ipa request invalid: %w", err)
}
request := map[string]interface{}{
"id": id,
"method": json.RawMessage(jmethod),
"params": []json.RawMessage{json.RawMessage(jargs), json.RawMessage(jdict)},
}
requestJSON, err := json.Marshal(request)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
return string(requestJSON), nil
}
// pkcs7Unpad validates and unpads data from the given bytes slice.
// The returned value will be 1 to n bytes smaller depending on the
// amount of padding, where n is the block size.
func pkcs7Unpad(b []byte, blocksize int) ([]byte, error) {
if blocksize <= 0 {
return nil, errors.New("invalid blocksize")
}
if len(b) == 0 {
return nil, errors.New("invalid PKCS7 data (empty or not padded)")
}
if len(b)%blocksize != 0 {
return nil, errors.New("invalid padding on input")
}
c := b[len(b)-1]
n := int(c)
if n == 0 || n > len(b) {
return nil, errors.New("invalid padding on input")
}
for i := 0; i < n; i++ {
if b[len(b)-n+i] != c {
return nil, errors.New("invalid padding on input")
}
}
return b[:len(b)-n], nil
}
/*
e.g.
"value" {
"__base64__": "aGVsbG93b3JsZAo="
}
*/
type Base64EncodedWrapped []byte
func (b *Base64EncodedWrapped) UnmarshalJSON(data []byte) error {
intermediate := struct {
Base64 string `json:"__base64__"`
}{}
err := json.Unmarshal(data, &intermediate)
if err != nil {
return err
}
*b, err = base64.StdEncoding.DecodeString(intermediate.Base64)
return err
}
func (b *Base64EncodedWrapped) MarshalJSON() ([]byte, error) {
intermediate := struct {
Base64 string `json:"__base64__"`
}{Base64: base64.StdEncoding.EncodeToString(*b)}
return json.Marshal(intermediate)
}
/*
e.g.
"value": "aGVsbG93b3JsZAo="
*/
type Base64Encoded []byte
func (b *Base64Encoded) UnmarshalJSON(data []byte) error {
var intermediate string
err := json.Unmarshal(data, &intermediate)
if err != nil {
return err
}
*b, err = base64.StdEncoding.DecodeString(intermediate)
return err
}
func (ipa *IpaIAMService) log(msg string) {
if ipa.debug {
log.Println(msg)
}
}

View File

@@ -111,13 +111,11 @@ func (ld *LdapIAMService) GetUserAccount(access string) (Account, error) {
entry := result.Entries[0]
groupId, err := strconv.Atoi(entry.GetAttributeValue(ld.groupIdAtr))
if err != nil {
return Account{}, fmt.Errorf("invalid entry value for group-id %q: %w",
entry.GetAttributeValue(ld.groupIdAtr), err)
return Account{}, fmt.Errorf("invalid entry value for group-id: %v", entry.GetAttributeValue(ld.groupIdAtr))
}
userId, err := strconv.Atoi(entry.GetAttributeValue(ld.userIdAtr))
if err != nil {
return Account{}, fmt.Errorf("invalid entry value for user-id %q: %w",
entry.GetAttributeValue(ld.userIdAtr), err)
return Account{}, fmt.Errorf("invalid entry value for group-id: %v", entry.GetAttributeValue(ld.userIdAtr))
}
return Account{
Access: entry.GetAttributeValue(ld.accessAtr),
@@ -185,13 +183,11 @@ func (ld *LdapIAMService) ListUserAccounts() ([]Account, error) {
for _, el := range resp.Entries {
groupId, err := strconv.Atoi(el.GetAttributeValue(ld.groupIdAtr))
if err != nil {
return nil, fmt.Errorf("invalid entry value for group-id %q: %w",
el.GetAttributeValue(ld.groupIdAtr), err)
return nil, fmt.Errorf("invalid entry value for group-id: %v", el.GetAttributeValue(ld.groupIdAtr))
}
userId, err := strconv.Atoi(el.GetAttributeValue(ld.userIdAtr))
if err != nil {
return nil, fmt.Errorf("invalid entry value for user-id %q: %w",
el.GetAttributeValue(ld.userIdAtr), err)
return nil, fmt.Errorf("invalid entry value for group-id: %v", el.GetAttributeValue(ld.userIdAtr))
}
result = append(result, Account{
Access: el.GetAttributeValue(ld.accessAtr),

View File

@@ -42,6 +42,14 @@ import (
// coming from iAMConfig and iamFile in iam_internal.
type IAMServiceS3 struct {
client *s3.Client
access string
secret string
region string
bucket string
endpoint string
rootAcc Account
// This mutex will help with racing updates to the IAM data
// from multiple requests to this gateway instance, but
// will not help with racing updates to multiple load balanced
@@ -50,15 +58,8 @@ type IAMServiceS3 struct {
// gateway instance if possible.
sync.RWMutex
access string
secret string
region string
bucket string
endpoint string
sslSkipVerify bool
debug bool
rootAcc Account
client *s3.Client
}
var _ IAMService = &IAMServiceS3{}

View File

@@ -15,49 +15,39 @@
package auth
import (
"github.com/versity/versitygw/s3err"
"errors"
)
// IAMServiceSingle manages the single tenant (root-only) IAM service
type IAMServiceSingle struct {
root Account
}
type IAMServiceSingle struct{}
var _ IAMService = &IAMServiceSingle{}
func NewIAMServiceSingle(r Account) IAMService {
return &IAMServiceSingle{
root: r,
}
}
var ErrNotSupported = errors.New("method is not supported")
// CreateAccount not valid in single tenant mode
func (IAMServiceSingle) CreateAccount(account Account) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// GetUserAccount returns root account, if the root access key
// is provided and "ErrAdminUserNotFound" otherwise
func (s IAMServiceSingle) GetUserAccount(access string) (Account, error) {
if access == s.root.Access {
return s.root, nil
}
return Account{}, s3err.GetAPIError(s3err.ErrAdminUserNotFound)
// GetUserAccount no accounts in single tenant mode
func (IAMServiceSingle) GetUserAccount(access string) (Account, error) {
return Account{}, ErrNoSuchUser
}
// UpdateUserAccount no accounts in single tenant mode
func (IAMServiceSingle) UpdateUserAccount(access string, props MutableProps) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// DeleteUserAccount no accounts in single tenant mode
func (IAMServiceSingle) DeleteUserAccount(access string) error {
return s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return ErrNotSupported
}
// ListUserAccounts no accounts in single tenant mode
func (IAMServiceSingle) ListUserAccounts() ([]Account, error) {
return []Account{}, s3err.GetAPIError(s3err.ErrAdminMethodNotSupported)
return []Account{}, nil
}
// Shutdown graceful termination of service

View File

@@ -47,7 +47,7 @@ func NewVaultIAMService(rootAcc Account, endpoint, secretStoragePath, mountPath,
tls.ServerCertificate.FromBytes = []byte(serverCert)
if clientCert != "" {
if clientCertKey == "" {
return nil, fmt.Errorf("client certificate and client certificate key should both be specified")
return nil, fmt.Errorf("client certificate and client certificate should both be specified")
}
tls.ClientCertificate.FromBytes = []byte(clientCert)

View File

@@ -29,9 +29,9 @@ import (
)
type BucketLockConfig struct {
Enabled bool
DefaultRetention *types.DefaultRetention
CreatedAt *time.Time
Enabled bool
}
func ParseBucketLockConfigurationInput(input []byte) ([]byte, error) {
@@ -95,7 +95,7 @@ func ParseBucketLockConfigurationOutput(input []byte) (*types.ObjectLockConfigur
func ParseObjectLockRetentionInput(input []byte) ([]byte, error) {
var retention s3response.PutObjectRetentionInput
if err := xml.Unmarshal(input, &retention); err != nil {
return nil, s3err.GetAPIError(s3err.ErrMalformedXML)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if retention.RetainUntilDate.Before(time.Now()) {
@@ -120,18 +120,18 @@ func ParseObjectLockRetentionOutput(input []byte) (*types.ObjectLockRetention, e
return &retention, nil
}
func ParseObjectLegalHoldOutput(status *bool) *s3response.GetObjectLegalHoldResult {
func ParseObjectLegalHoldOutput(status *bool) *types.ObjectLockLegalHold {
if status == nil {
return nil
}
if *status {
return &s3response.GetObjectLegalHoldResult{
return &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOn,
}
}
return &s3response.GetObjectLegalHoldResult{
return &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOff,
}
}

View File

@@ -85,6 +85,10 @@ type keyDerivator interface {
// SignerOptions is the SigV4 Signer options.
type SignerOptions struct {
// The logger to send log messages to.
Logger logging.Logger
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
// request header to the request's query string. This is most commonly used
// with pre-signed requests preventing headers from being added to the
@@ -100,9 +104,6 @@ type SignerOptions struct {
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
// The logger to send log messages to.
Logger logging.Logger
// Enable logging of signed requests.
// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
// presigned URL.
@@ -117,8 +118,8 @@ type SignerOptions struct {
// Signer applies AWS v4 signing to given request. Use this to sign requests
// that need to be signed with AWS V4 Signatures.
type Signer struct {
options SignerOptions
keyDerivator keyDerivator
options SignerOptions
}
// NewSigner returns a new SigV4 Signer
@@ -133,17 +134,19 @@ func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
}
type httpSigner struct {
KeyDerivator keyDerivator
Request *http.Request
Credentials aws.Credentials
Time v4Internal.SigningTime
ServiceName string
Region string
Time v4Internal.SigningTime
Credentials aws.Credentials
KeyDerivator keyDerivator
IsPreSign bool
SignedHdrs []string
PayloadHash string
SignedHdrs []string
IsPreSign bool
DisableHeaderHoisting bool
DisableURIPathEscaping bool
DisableSessionToken bool

View File

@@ -27,6 +27,7 @@ import (
"math"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
@@ -63,7 +64,6 @@ const (
keyBucketLock key = "Bucketlock"
keyObjRetention key = "Objectretention"
keyObjLegalHold key = "Objectlegalhold"
keyExpires key = "Vgwexpires"
onameAttr key = "Objname"
onameAttrLower key = "objname"
metaTmpMultipartPrefix key = ".sgwtmp" + "/multipart"
@@ -77,7 +77,6 @@ func (key) Table() map[string]struct{} {
"policy": {},
"bucketlock": {},
"objectretention": {},
"vgwexpires": {},
"objectlegalhold": {},
"objname": {},
".sgwtmp/multipart": {},
@@ -197,6 +196,7 @@ func (az *Azure) CreateBucket(ctx context.Context, input *s3.CreateBucketInput,
}
func (az *Azure) ListBuckets(ctx context.Context, input s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
fmt.Printf("%+v\n", input)
pager := az.client.NewListContainersPager(
&service.ListContainersOptions{
Include: service.ListContainersInclude{
@@ -294,27 +294,14 @@ func (az *Azure) DeleteBucketOwnershipControls(ctx context.Context, bucket strin
return az.deleteContainerMetaData(ctx, bucket, string(keyOwnership))
}
func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
func (az *Azure) PutObject(ctx context.Context, po *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
tags, err := parseTags(po.Tagging)
if err != nil {
return s3response.PutObjectOutput{}, err
}
metadata := parseMetadata(po.Metadata)
// Store the "Expires" property in the object metadata
if getString(po.Expires) != "" {
if metadata == nil {
metadata = map[string]*string{
string(keyExpires): po.Expires,
}
} else {
metadata[string(keyExpires)] = po.Expires
}
}
opts := &blockblob.UploadStreamOptions{
Metadata: metadata,
Metadata: parseMetadata(po.Metadata),
Tags: tags,
}
@@ -322,8 +309,6 @@ func (az *Azure) PutObject(ctx context.Context, po s3response.PutObjectInput) (s
opts.HTTPHeaders.BlobContentEncoding = po.ContentEncoding
opts.HTTPHeaders.BlobContentLanguage = po.ContentLanguage
opts.HTTPHeaders.BlobContentDisposition = po.ContentDisposition
opts.HTTPHeaders.BlobContentLanguage = po.ContentLanguage
opts.HTTPHeaders.BlobCacheControl = po.CacheControl
if strings.HasSuffix(*po.Key, "/") {
// Hardcode "application/x-directory" for direcoty objects
opts.HTTPHeaders.BlobContentType = backend.GetPtrFromString(backend.DirContentType)
@@ -406,29 +391,17 @@ func (az *Azure) DeleteBucketTagging(ctx context.Context, bucket string) error {
}
func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
client, err := az.getBlobClient(*input.Bucket, *input.Key)
if err != nil {
return nil, err
}
resp, err := client.GetProperties(ctx, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
var opts *azblob.DownloadStreamOptions
if *input.Range != "" {
offset, count, isValid, err := backend.ParseGetObjectRange(*resp.ContentLength, *input.Range)
offset, count, err := backend.ParseRange(0, *input.Range)
if err != nil {
return nil, err
}
if isValid {
opts = &azblob.DownloadStreamOptions{
Range: blob.HTTPRange{
Count: count,
Offset: offset,
},
}
opts = &azblob.DownloadStreamOptions{
Range: blob.HTTPRange{
Count: count,
Offset: offset,
},
}
}
blobDownloadResponse, err := az.client.DownloadStream(ctx, *input.Bucket, *input.Key, opts)
@@ -447,21 +420,17 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G
}
return &s3.GetObjectOutput{
AcceptRanges: backend.GetPtrFromString("bytes"),
ContentLength: blobDownloadResponse.ContentLength,
ContentEncoding: blobDownloadResponse.ContentEncoding,
ContentType: contentType,
ContentDisposition: blobDownloadResponse.ContentDisposition,
ContentLanguage: blobDownloadResponse.ContentLanguage,
CacheControl: blobDownloadResponse.CacheControl,
ExpiresString: blobDownloadResponse.Metadata[string(keyExpires)],
ETag: (*string)(blobDownloadResponse.ETag),
LastModified: blobDownloadResponse.LastModified,
Metadata: parseAndFilterAzMetadata(blobDownloadResponse.Metadata),
TagCount: &tagcount,
ContentRange: blobDownloadResponse.ContentRange,
Body: blobDownloadResponse.Body,
StorageClass: types.StorageClassStandard,
AcceptRanges: input.Range,
ContentLength: blobDownloadResponse.ContentLength,
ContentEncoding: blobDownloadResponse.ContentEncoding,
ContentType: contentType,
ETag: (*string)(blobDownloadResponse.ETag),
LastModified: blobDownloadResponse.LastModified,
Metadata: parseAzMetadata(blobDownloadResponse.Metadata),
TagCount: &tagcount,
ContentRange: blobDownloadResponse.ContentRange,
Body: blobDownloadResponse.Body,
StorageClass: types.StorageClassStandard,
}, nil
}
@@ -515,11 +484,10 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3
ContentEncoding: resp.ContentEncoding,
ContentLanguage: resp.ContentLanguage,
ContentDisposition: resp.ContentDisposition,
CacheControl: resp.CacheControl,
ExpiresString: resp.Metadata[string(keyExpires)],
ETag: (*string)(resp.ETag),
LastModified: resp.LastModified,
Metadata: parseAndFilterAzMetadata(resp.Metadata),
Metadata: parseAzMetadata(resp.Metadata),
Expires: resp.ExpiresOn,
StorageClass: types.StorageClassStandard,
}
@@ -554,7 +522,7 @@ func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAtt
}
return s3response.GetObjectAttributesResponse{
ETag: backend.TrimEtag(data.ETag),
ETag: data.ETag,
ObjectSize: data.ContentLength,
StorageClass: data.StorageClass,
LastModified: data.LastModified,
@@ -584,18 +552,6 @@ func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s
maxKeys = *input.MaxKeys
}
// Retrieve the bucket acl to get the bucket owner
// All the objects in the bucket are owner by the bucket owner
aclBytes, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyAclCapital))
if err != nil {
return s3response.ListObjectsResult{}, azureErrToS3Err(err)
}
var acl auth.ACL
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return s3response.ListObjectsResult{}, fmt.Errorf("unmarshal acl: %w", err)
}
Pager:
for pager.More() {
resp, err := pager.NextPage(ctx)
@@ -609,14 +565,11 @@ Pager:
break Pager
}
objects = append(objects, s3response.Object{
ETag: backend.GetPtrFromString(fmt.Sprintf("%q", *v.Properties.ETag)),
ETag: (*string)(v.Properties.ETag),
Key: v.Name,
LastModified: v.Properties.LastModified,
Size: v.Properties.ContentLength,
StorageClass: types.ObjectStorageClassStandard,
Owner: &types.Owner{
ID: &acl.Owner,
},
})
}
for _, v := range resp.Segment.BlobPrefixes {
@@ -676,28 +629,10 @@ func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input
var nextMarker *string
var isTruncated bool
var maxKeys int32 = math.MaxInt32
var fetchOwner bool
if input.MaxKeys != nil {
maxKeys = *input.MaxKeys
}
if input.FetchOwner != nil {
fetchOwner = *input.FetchOwner
}
// Retrieve the bucket acl to get the bucket owner, if "fetchOwner" is true
// All the objects in the bucket are owner by the bucket owner
var acl auth.ACL
if fetchOwner {
aclBytes, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyAclCapital))
if err != nil {
return s3response.ListObjectsV2Result{}, azureErrToS3Err(err)
}
if err := json.Unmarshal(aclBytes, &acl); err != nil {
return s3response.ListObjectsV2Result{}, fmt.Errorf("unmarshal acl: %w", err)
}
}
Pager:
for pager.More() {
@@ -711,20 +646,13 @@ Pager:
isTruncated = true
break Pager
}
obj := s3response.Object{
ETag: backend.GetPtrFromString(fmt.Sprintf("%q", *v.Properties.ETag)),
objects = append(objects, s3response.Object{
ETag: (*string)(v.Properties.ETag),
Key: v.Name,
LastModified: v.Properties.LastModified,
Size: v.Properties.ContentLength,
StorageClass: types.ObjectStorageClassStandard,
}
if fetchOwner {
obj.Owner = &types.Owner{
ID: &acl.Owner,
}
}
objects = append(objects, obj)
})
}
for _, v := range resp.Segment.BlobPrefixes {
if *v.Name <= marker {
@@ -807,158 +735,41 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput
}, nil
}
func (az *Azure) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
dstClient, err := az.getBlobClient(*input.Bucket, *input.Key)
func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
bclient, err := az.getBlobClient(*input.Bucket, *input.Key)
if err != nil {
return nil, err
}
if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource {
if input.MetadataDirective != types.MetadataDirectiveReplace {
props, err := bclient.GetProperties(ctx, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
mdmap := props.Metadata
if isMetaSame(mdmap, input.Metadata) {
return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest)
}
// Set object meta http headers
res, err := dstClient.SetHTTPHeaders(ctx, blob.HTTPHeaders{
BlobCacheControl: input.CacheControl,
BlobContentDisposition: input.ContentDisposition,
BlobContentEncoding: input.ContentEncoding,
BlobContentLanguage: input.ContentLanguage,
BlobContentType: input.ContentType,
}, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
meta := input.Metadata
if meta == nil {
meta = make(map[string]string)
}
// Embed "Expires" in object metadata
if getString(input.Expires) != "" {
meta[string(keyExpires)] = *input.Expires
}
// Set object metadata
_, err = dstClient.SetMetadata(ctx, parseMetadata(meta), nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
// Set object legal hold
if input.ObjectLockLegalHoldStatus != "" {
err = az.PutObjectLegalHold(ctx, *input.Bucket, *input.Key, "", input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
// Set object retention
if input.ObjectLockMode != "" && input.ObjectLockRetainUntilDate != nil {
retention := s3response.PutObjectRetentionInput{
Mode: types.ObjectLockRetentionMode(input.ObjectLockMode),
RetainUntilDate: s3response.AmzDate{
Time: *input.ObjectLockRetainUntilDate,
},
}
retParsed, err := json.Marshal(retention)
if err != nil {
return nil, fmt.Errorf("parse object retention: %w", err)
}
err = az.PutObjectRetention(ctx, *input.Bucket, *input.Key, "", true, retParsed)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
// Set object Tagging, if tagging directive is "REPLACE"
if input.TaggingDirective == types.TaggingDirectiveReplace {
tags, err := parseTags(input.Tagging)
if err != nil {
return nil, err
}
_, err = dstClient.SetTags(ctx, tags, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
LastModified: res.LastModified,
ETag: (*string)(res.ETag),
},
}, nil
}
srcBucket, srcObj, _, err := backend.ParseCopySource(*input.CopySource)
tags, err := parseTags(input.Tagging)
if err != nil {
return nil, err
}
// Get the source object
downloadResp, err := az.client.DownloadStream(ctx, srcBucket, srcObj, nil)
resp, err := bclient.CopyFromURL(ctx, az.serviceURL+"/"+*input.CopySource, &blob.CopyFromURLOptions{
BlobTags: tags,
Metadata: parseMetadata(input.Metadata),
})
if err != nil {
return nil, azureErrToS3Err(err)
}
pInput := s3response.PutObjectInput{
Body: downloadResp.Body,
Bucket: input.Bucket,
Key: input.Key,
ContentLength: downloadResp.ContentLength,
ContentType: input.ContentType,
ContentEncoding: input.ContentEncoding,
ContentDisposition: input.ContentDisposition,
ContentLanguage: input.ContentLanguage,
CacheControl: input.CacheControl,
Expires: input.Expires,
Metadata: input.Metadata,
ObjectLockRetainUntilDate: input.ObjectLockRetainUntilDate,
ObjectLockMode: input.ObjectLockMode,
ObjectLockLegalHoldStatus: input.ObjectLockLegalHoldStatus,
}
if input.MetadataDirective == types.MetadataDirectiveCopy {
// Expires is in downloadResp.Metadata
pInput.Expires = nil
pInput.CacheControl = downloadResp.CacheControl
pInput.ContentDisposition = downloadResp.ContentDisposition
pInput.ContentEncoding = downloadResp.ContentEncoding
pInput.ContentLanguage = downloadResp.ContentLanguage
pInput.ContentType = downloadResp.ContentType
pInput.Metadata = parseAzMetadata(downloadResp.Metadata)
}
if input.TaggingDirective == types.TaggingDirectiveReplace {
pInput.Tagging = input.Tagging
}
// Create the destination object
resp, err := az.PutObject(ctx, pInput)
if err != nil {
return nil, err
}
// Copy the object tagging, if tagging directive is "COPY"
if input.TaggingDirective == types.TaggingDirectiveCopy {
srcClient, err := az.getBlobClient(srcBucket, srcObj)
if err != nil {
return nil, err
}
res, err := srcClient.GetTags(ctx, nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
_, err = dstClient.SetTags(ctx, parseAzTags(res.BlobTagSet), nil)
if err != nil {
return nil, azureErrToS3Err(err)
}
}
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{
ETag: &resp.ETag,
ETag: (*string)(resp.ETag),
LastModified: resp.LastModified,
},
}, nil
}
@@ -1005,7 +816,7 @@ func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string)
return nil
}
func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
if input.ObjectLockLegalHoldStatus != "" || input.ObjectLockMode != "" {
bucketLock, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyBucketLock))
if err != nil {
@@ -1029,10 +840,6 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
meta := parseMetadata(input.Metadata)
meta[string(onameAttr)] = input.Key
if getString(input.Expires) != "" {
meta[string(keyExpires)] = input.Expires
}
// parse object tags
tagsStr := getString(input.Tagging)
tags := map[string]string{}
@@ -1075,13 +882,12 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
opts := &blockblob.UploadBufferOptions{
Metadata: meta,
Tags: tags,
HTTPHeaders: &blob.HTTPHeaders{
BlobContentType: input.ContentType,
BlobContentEncoding: input.ContentEncoding,
BlobCacheControl: input.CacheControl,
BlobContentDisposition: input.ContentDisposition,
BlobContentLanguage: input.ContentLanguage,
},
}
if getString(input.ContentType) != "" {
opts.HTTPHeaders = &blob.HTTPHeaders{
BlobContentType: input.ContentType,
BlobContentEncoding: input.ContentEncoding,
}
}
// Create and empty blob in .sgwtmp/multipart/<uploadId>/<object hash>
@@ -1100,9 +906,9 @@ func (az *Azure) CreateMultipartUpload(ctx context.Context, input s3response.Cre
}
// Each part is translated into an uncommitted block in a newly created blob in staging area
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
return nil, err
return "", err
}
// TODO: request streamable version of StageBlock()
@@ -1111,34 +917,32 @@ func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3
// the body in memory to create an io.ReadSeekCloser
rdr, err := getReadSeekCloser(input.Body)
if err != nil {
return nil, err
return "", err
}
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
if err != nil {
return nil, err
return "", err
}
// block id serves as etag here
etag := blockIDInt32ToBase64(*input.PartNumber)
etag = blockIDInt32ToBase64(*input.PartNumber)
_, err = client.StageBlock(ctx, etag, rdr, nil)
if err != nil {
return nil, parseMpError(err)
return "", parseMpError(err)
}
return &s3.UploadPartOutput{
ETag: &etag,
}, nil
return etag, nil
}
func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
if err != nil {
return s3response.CopyPartResult{}, nil
return s3response.CopyObjectResult{}, nil
}
if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil {
return s3response.CopyPartResult{}, err
return s3response.CopyObjectResult{}, err
}
eTag := blockIDInt32ToBase64(*input.PartNumber)
@@ -1146,10 +950,10 @@ func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInp
//TODO: the action returns not implemented on azurite, maybe in production this will work?
_, err = client.StageBlockFromURL(ctx, eTag, *input.CopySource, nil)
if err != nil {
return s3response.CopyPartResult{}, parseMpError(err)
return s3response.CopyObjectResult{}, parseMpError(err)
}
return s3response.CopyPartResult{}, nil
return s3response.CopyObjectResult{}, nil
}
// Lists all uncommitted parts from the blob
@@ -1392,63 +1196,34 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
uncommittedBlocks := map[int32]*blockblob.Block{}
for _, el := range blockList.UncommittedBlocks {
ptNumber, err := decodeBlockId(backend.GetStringFromPtr(el.Name))
slices.SortFunc(blockList.UncommittedBlocks, func(a *blockblob.Block, b *blockblob.Block) int {
ptNumber, _ := decodeBlockId(*a.Name)
nextPtNumber, _ := decodeBlockId(*b.Name)
return ptNumber - nextPtNumber
})
for i, block := range blockList.UncommittedBlocks {
ptNumber, err := decodeBlockId(*block.Name)
if err != nil {
return nil, fmt.Errorf("invalid block name: %w", err)
}
uncommittedBlocks[int32(ptNumber)] = el
}
// The initialie values is the lower limit of partNumber: 0
var totalSize int64
var partNumber int32
last := len(blockList.UncommittedBlocks) - 1
for i, part := range input.MultipartUpload.Parts {
if part.PartNumber == nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
block, ok := uncommittedBlocks[*part.PartNumber]
if !ok {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.ETag != *block.Name {
if *input.MultipartUpload.Parts[i].ETag != *block.Name {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
// all parts except the last need to be greater, than
// the minimum allowed size (5 Mib)
if i < last && *block.Size < backend.MinPartSize {
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
if *input.MultipartUpload.Parts[i].PartNumber != int32(ptNumber) {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
totalSize += *block.Size
blockIds = append(blockIds, *block.Name)
}
if input.MpuObjectSize != nil && totalSize != *input.MpuObjectSize {
return nil, s3err.GetIncorrectMpObjectSizeErr(totalSize, *input.MpuObjectSize)
}
opts := &blockblob.CommitBlockListOptions{
Metadata: props.Metadata,
Tags: parseAzTags(tags.BlobTagSet),
}
opts.HTTPHeaders = &blob.HTTPHeaders{
BlobContentType: props.ContentType,
BlobContentEncoding: props.ContentEncoding,
BlobContentDisposition: props.ContentDisposition,
BlobContentLanguage: props.ContentLanguage,
BlobCacheControl: props.CacheControl,
BlobContentType: props.ContentType,
BlobContentEncoding: props.ContentEncoding,
}
resp, err := client.CommitBlockList(ctx, blockIds, opts)
@@ -1684,10 +1459,7 @@ func (az *Azure) ChangeBucketOwner(ctx context.Context, bucket string, acl []byt
// The action actually returns the containers owned by the user, who initialized the gateway
// TODO: Not sure if there's a way to list all the containers and owners?
func (az *Azure) ListBucketsAndOwners(ctx context.Context) (buckets []s3response.Bucket, err error) {
opts := &service.ListContainersOptions{
Include: service.ListContainersInclude{Metadata: true},
}
pager := az.client.NewListContainersPager(opts)
pager := az.client.NewListContainersPager(nil)
for pager.More() {
resp, err := pager.NextPage(ctx)
@@ -1786,7 +1558,7 @@ func parseMetadata(m map[string]string) map[string]*string {
return meta
}
func parseAndFilterAzMetadata(m map[string]*string) map[string]string {
func parseAzMetadata(m map[string]*string) map[string]string {
if m == nil {
return nil
}
@@ -1805,19 +1577,6 @@ func parseAndFilterAzMetadata(m map[string]*string) map[string]string {
return meta
}
func parseAzMetadata(m map[string]*string) map[string]string {
if m == nil {
return nil
}
meta := make(map[string]string)
for k, v := range m {
meta[k] = *v
}
return meta
}
func parseTags(tagstr *string) (map[string]string, error) {
tagsStr := getString(tagstr)
tags := make(map[string]string)
@@ -1976,11 +1735,9 @@ func (az *Azure) deleteContainerMetaData(ctx context.Context, bucket, key string
}
func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
var acl auth.ACL
data, ok := meta[string(key)]
if !ok {
return &acl, nil
return nil, s3err.GetAPIError(s3err.ErrInternalError)
}
value, err := decodeString(*data)
@@ -1988,6 +1745,7 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
return nil, err
}
var acl auth.ACL
if len(value) == 0 {
return &acl, nil
}
@@ -2000,6 +1758,24 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) {
return &acl, nil
}
func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool {
if len(azMeta) != len(awsMeta) {
return false
}
for key, val := range azMeta {
if key == string(keyAclCapital) || key == string(keyAclLower) {
continue
}
awsVal, ok := awsMeta[key]
if !ok || awsVal != *val {
return false
}
}
return true
}
func createMetaTmpPath(obj, uploadId string) string {
objNameSum := sha256.Sum256([]byte(obj))
return filepath.Join(string(metaTmpMultipartPrefix), uploadId, fmt.Sprintf("%x", objNameSum))

View File

@@ -46,26 +46,23 @@ type Backend interface {
PutBucketOwnershipControls(_ context.Context, bucket string, ownership types.ObjectOwnership) error
GetBucketOwnershipControls(_ context.Context, bucket string) (types.ObjectOwnership, error)
DeleteBucketOwnershipControls(_ context.Context, bucket string) error
PutBucketCors(context.Context, []byte) error
GetBucketCors(_ context.Context, bucket string) ([]byte, error)
DeleteBucketCors(_ context.Context, bucket string) error
// multipart operations
CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error)
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
// standard object operations
PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error)
PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error)
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
GetObject(context.Context, *s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error)
CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error)
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error)
DeleteObject(context.Context, *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
@@ -100,10 +97,6 @@ type Backend interface {
ListBucketsAndOwners(context.Context) ([]s3response.Bucket, error)
}
// InterfaceVersion tracks changes to the Backend interface for plugins.
// Increment this when the Backend interface changes.
const InterfaceVersion = 1
type BackendUnsupported struct{}
var _ Backend = &BackendUnsupported{}
@@ -157,17 +150,8 @@ func (BackendUnsupported) GetBucketOwnershipControls(_ context.Context, bucket s
func (BackendUnsupported) DeleteBucketOwnershipControls(_ context.Context, bucket string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketCors(context.Context, []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetBucketCors(_ context.Context, bucket string) ([]byte, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteBucketCors(_ context.Context, bucket string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CreateMultipartUpload(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
func (BackendUnsupported) CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
@@ -182,14 +166,14 @@ func (BackendUnsupported) ListMultipartUploads(context.Context, *s3.ListMultipar
func (BackendUnsupported) ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error) {
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error) {
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
return s3response.CopyPartResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObject(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
return s3response.PutObjectOutput{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
@@ -204,7 +188,7 @@ func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
return s3response.GetObjectAttributesResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyObject(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {

View File

@@ -19,6 +19,7 @@ import (
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
@@ -33,9 +34,6 @@ const (
// this is the media type for directories in AWS and Nextcloud
DirContentType = "application/x-directory"
DefaultContentType = "binary/octet-stream"
// this is the minimum allowed size for mp parts
MinPartSize = 5 * 1024 * 1024
)
func IsValidBucketName(name string) bool { return true }
@@ -70,118 +68,45 @@ func GetTimePtr(t time.Time) *time.Time {
return &t
}
func TrimEtag(etag *string) *string {
if etag == nil {
return nil
}
return GetPtrFromString(strings.Trim(*etag, "\""))
}
var (
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
errInvalidCopySourceRange = s3err.GetAPIError(s3err.ErrInvalidCopySourceRange)
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
)
// ParseGetObjectRange parses input range header and returns startoffset, length, isValid
// and error. If no endoffset specified, then length is set to the object size
// for invalid inputs, it returns no error, but isValid=false
// `InvalidRange` error is returnd, only if startoffset is greater than the object size
func ParseGetObjectRange(size int64, acceptRange string) (int64, int64, bool, error) {
if acceptRange == "" {
return 0, size, false, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) != 2 {
return 0, size, false, nil
}
if rangeKv[0] != "bytes" {
return 0, size, false, nil
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) != 2 {
return 0, size, false, nil
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, size, false, nil
}
if startOffset >= size {
return 0, 0, false, errInvalidRange
}
if bRange[1] == "" {
return startOffset, size - startOffset, true, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, size, false, nil
}
if endOffset < startOffset {
return 0, size, false, nil
}
if endOffset >= size {
return startOffset, size - startOffset, true, nil
}
return startOffset, endOffset - startOffset + 1, true, nil
}
// ParseCopySourceRange parses input range header and returns startoffset, length
// and error. If no endoffset specified, then length is set to the object size
func ParseCopySourceRange(size int64, acceptRange string) (int64, int64, error) {
// ParseRange parses input range header and returns startoffset, length, and
// error. If no endoffset specified, then length is set to -1.
func ParseRange(size int64, acceptRange string) (int64, int64, error) {
if acceptRange == "" {
return 0, size, nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) != 2 {
return 0, 0, errInvalidCopySourceRange
}
if rangeKv[0] != "bytes" {
return 0, 0, errInvalidCopySourceRange
if len(rangeKv) < 2 {
return 0, 0, errInvalidRange
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) != 2 {
return 0, 0, errInvalidCopySourceRange
if len(bRange) < 1 || len(bRange) > 2 {
return 0, 0, errInvalidRange
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, 0, errInvalidCopySourceRange
return 0, 0, errInvalidRange
}
if startOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
endOffset := int64(-1)
if len(bRange) == 1 || bRange[1] == "" {
return startOffset, endOffset, nil
}
if bRange[1] == "" {
return startOffset, size - startOffset + 1, nil
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, 0, errInvalidCopySourceRange
return 0, 0, errInvalidRange
}
if endOffset < startOffset {
return 0, 0, errInvalidCopySourceRange
}
if endOffset >= size {
return 0, 0, s3err.CreateExceedingRangeErr(size)
return 0, 0, errInvalidRange
}
return startOffset, endOffset - startOffset + 1, nil
@@ -211,28 +136,12 @@ func ParseCopySource(copySourceHeader string) (string, string, string, error) {
return srcBucket, srcObject, versionId, nil
}
// ParseObjectTags parses the url encoded input string into
// map[string]string key-value tag set
func ParseObjectTags(t string) (map[string]string, error) {
if t == "" {
return nil, nil
func CreateExceedingRangeErr(objSize int64) s3err.APIError {
return s3err.APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
HTTPStatusCode: http.StatusBadRequest,
}
tagging := make(map[string]string)
tagParts := strings.Split(t, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
if len(p[0]) > 128 || len(p[1]) > 256 {
return nil, s3err.GetAPIError(s3err.ErrInvalidTag)
}
tagging[p[0]] = p[1]
}
return tagging, nil
}
func GetMultipartMD5(parts []types.CompletedPart) string {
@@ -240,8 +149,8 @@ func GetMultipartMD5(parts []types.CompletedPart) string {
for _, part := range parts {
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)
}
return fmt.Sprintf("\"%s-%d\"", md5String(partsEtagBytes), len(parts))
s3MD5 := fmt.Sprintf("%s-%d", md5String(partsEtagBytes), len(parts))
return s3MD5
}
func getEtagBytes(etag string) []byte {

View File

@@ -1,54 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package meta
import (
"os"
)
// NoMeta is a metadata storer that does not store metadata.
// This can be useful for read only mounts where attempting to store metadata
// would fail.
type NoMeta struct{}
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
// always returns ErrNoSuchKey
func (NoMeta) RetrieveAttribute(_ *os.File, _, _, _ string) ([]byte, error) {
return nil, ErrNoSuchKey
}
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
// always returns nil without storing the attribute
func (NoMeta) StoreAttribute(_ *os.File, _, _, _ string, _ []byte) error {
return nil
}
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
// always returns nil without deleting the attribute
func (NoMeta) DeleteAttribute(_, _, _ string) error {
return nil
}
// ListAttributes lists all attributes for an object or a bucket.
// always returns an empty list of attributes
func (NoMeta) ListAttributes(_, _ string) ([]string, error) {
return []string{}, nil
}
// DeleteAttributes removes all attributes for an object or a bucket.
// always returns nil without deleting any attributes
func (NoMeta) DeleteAttributes(bucket, object string) error {
return nil
}

View File

@@ -1,139 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package meta
import (
"errors"
"fmt"
"os"
"path/filepath"
)
// SideCar is a metadata storer that uses sidecar files to store metadata.
type SideCar struct {
dir string
}
const (
sidecarmeta = "meta"
)
// NewSideCar creates a new SideCar metadata storer.
func NewSideCar(dir string) (SideCar, error) {
fi, err := os.Lstat(dir)
if err != nil {
return SideCar{}, fmt.Errorf("failed to stat directory: %v", err)
}
if !fi.IsDir() {
return SideCar{}, fmt.Errorf("not a directory")
}
return SideCar{dir: dir}, nil
}
// RetrieveAttribute retrieves the value of a specific attribute for an object or a bucket.
func (s SideCar) RetrieveAttribute(_ *os.File, bucket, object, attribute string) ([]byte, error) {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
attr := filepath.Join(metadir, attribute)
value, err := os.ReadFile(attr)
if errors.Is(err, os.ErrNotExist) {
return nil, ErrNoSuchKey
}
if err != nil {
return nil, fmt.Errorf("failed to read attribute: %v", err)
}
return value, nil
}
// StoreAttribute stores the value of a specific attribute for an object or a bucket.
func (s SideCar) StoreAttribute(_ *os.File, bucket, object, attribute string, value []byte) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
err := os.MkdirAll(metadir, 0777)
if err != nil {
return fmt.Errorf("failed to create metadata directory: %v", err)
}
attr := filepath.Join(metadir, attribute)
err = os.WriteFile(attr, value, 0666)
if err != nil {
return fmt.Errorf("failed to write attribute: %v", err)
}
return nil
}
// DeleteAttribute removes the value of a specific attribute for an object or a bucket.
func (s SideCar) DeleteAttribute(bucket, object, attribute string) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
attr := filepath.Join(metadir, attribute)
err := os.Remove(attr)
if errors.Is(err, os.ErrNotExist) {
return ErrNoSuchKey
}
if err != nil {
return fmt.Errorf("failed to remove attribute: %v", err)
}
return nil
}
// ListAttributes lists all attributes for an object or a bucket.
func (s SideCar) ListAttributes(bucket, object string) ([]string, error) {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
ents, err := os.ReadDir(metadir)
if errors.Is(err, os.ErrNotExist) {
return []string{}, nil
}
if err != nil {
return nil, fmt.Errorf("failed to list attributes: %v", err)
}
var attrs []string
for _, ent := range ents {
attrs = append(attrs, ent.Name())
}
return attrs, nil
}
// DeleteAttributes removes all attributes for an object or a bucket.
func (s SideCar) DeleteAttributes(bucket, object string) error {
metadir := filepath.Join(s.dir, bucket, object, sidecarmeta)
if object == "" {
metadir = filepath.Join(s.dir, bucket, sidecarmeta)
}
err := os.RemoveAll(metadir)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("failed to remove attributes: %v", err)
}
return nil
}

View File

@@ -23,7 +23,6 @@ import (
"syscall"
"github.com/pkg/xattr"
"github.com/versity/versitygw/s3err"
)
const (
@@ -57,18 +56,10 @@ func (x XattrMeta) RetrieveAttribute(f *os.File, bucket, object, attribute strin
// StoreAttribute stores the value of a specific attribute for an object in a bucket.
func (x XattrMeta) StoreAttribute(f *os.File, bucket, object, attribute string, value []byte) error {
if f != nil {
err := xattr.FSet(f, xattrPrefix+attribute, value)
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
return xattr.FSet(f, xattrPrefix+attribute, value)
}
err := xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
return xattr.Set(filepath.Join(bucket, object), xattrPrefix+attribute, value)
}
// DeleteAttribute removes the value of a specific attribute for an object in a bucket.
@@ -77,9 +68,6 @@ func (x XattrMeta) DeleteAttribute(bucket, object, attribute string) error {
if errors.Is(err, xattr.ENOATTR) {
return ErrNoSuchKey
}
if errors.Is(err, syscall.EROFS) {
return s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return err
}

View File

@@ -1,516 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package vgwplugin
import (
"bufio"
"context"
"fmt"
"plugin"
"reflect"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
// The plugin backend is used to dynamically load a Go plugin at runtime.
// It loads the plugin and calls the InitPlugin function to initialize it.
// A config string option is passed to init the plugin, it is expected that the
// plugin will handle its own configuration and initialization from this.
// If the plugin cannot be loaded or initialized, it returns an error.
// The InitPlugin function should be defined in the plugin and should have
// the signature func(configfile string) (version int, err error).
// The plugin should also implement the backend.Backend interface functions.
// However, the plugin does not need to implement all functions of the
// backend.Backend interface. It can implement only the functions it needs.
// Any non-implemented functions will return an error indicating that
// the function is not implemented.
// The plugin file should be compiled with the same Go version as the
// application using it. The plugin file should be built with the
// -buildmode=plugin flag.
// Example: go build -buildmode=plugin -o myplugin.so myplugin.go
// See the following for caveats and details:
// https://pkg.go.dev/plugin#hdr-Warnings
// PluginBackend implements the backend.Backend interface using Go plugins.
type PluginBackend struct {
p *plugin.Plugin
}
// NewPluginBackend creates a new PluginBackend. The path parameter should
// point to the compiled plugin file (e.g., .so file).
func NewPluginBackend(path, config string) (*PluginBackend, error) {
p, err := plugin.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open plugin: %w", err)
}
initSymbol, err := p.Lookup("InitPlugin")
if err != nil {
return nil, fmt.Errorf("failed to lookup InitPlugin symbol: %w", err)
}
initFunc, ok := initSymbol.(func(string) (int, error))
if !ok {
return nil, fmt.Errorf("InitPlugin symbol is not a func() (int, error)")
}
version, err := initFunc(config)
if err != nil {
return nil, fmt.Errorf("InitPlugin failed: %w", err)
}
if version != backend.InterfaceVersion {
return nil, fmt.Errorf("plugin interface version mismatch: gateway %v, plugin %v",
backend.InterfaceVersion, version)
}
return &PluginBackend{p: p}, nil
}
func (p *PluginBackend) callPluginFunc(name string, args []any) ([]reflect.Value, error) {
symbol, err := p.p.Lookup(name)
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
symbolValue := reflect.ValueOf(symbol)
if symbolValue.Kind() != reflect.Func {
return nil, fmt.Errorf("symbol %s is not a function", name)
}
numIn := symbolValue.Type().NumIn()
if len(args) != numIn {
return nil, fmt.Errorf("incorrect number of arguments for function %s, expected %d, got %d", name, numIn, len(args))
}
in := make([]reflect.Value, len(args))
for i := range args {
in[i] = reflect.ValueOf(args[i])
}
return symbolValue.Call(in), nil
}
func (p *PluginBackend) String() string { return "Plugin Gateway" }
func (p *PluginBackend) Shutdown() {}
func (p *PluginBackend) ListBuckets(ctx context.Context, input s3response.ListBucketsInput) (s3response.ListAllMyBucketsResult, error) {
results, err := p.callPluginFunc("ListBuckets", []any{ctx, input})
if err != nil {
return s3response.ListAllMyBucketsResult{}, err
}
return results[0].Interface().(s3response.ListAllMyBucketsResult), convertError(results[1])
}
func (p *PluginBackend) HeadBucket(ctx context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
results, err := p.callPluginFunc("HeadBucket", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.HeadBucketOutput), convertError(results[1])
}
func (p *PluginBackend) GetBucketAcl(ctx context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketAcl", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) CreateBucket(ctx context.Context, input *s3.CreateBucketInput, defaultACL []byte) error {
_, err := p.callPluginFunc("CreateBucket", []any{ctx, input, defaultACL})
return err
}
func (p *PluginBackend) PutBucketAcl(ctx context.Context, bucket string, data []byte) error {
_, err := p.callPluginFunc("PutBucketAcl", []any{ctx, bucket, data})
return err
}
func (p *PluginBackend) DeleteBucket(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucket", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketVersioning(ctx context.Context, bucket string, status types.BucketVersioningStatus) error {
_, err := p.callPluginFunc("PutBucketVersioning", []any{ctx, bucket, status})
return err
}
func (p *PluginBackend) GetBucketVersioning(ctx context.Context, bucket string) (s3response.GetBucketVersioningOutput, error) {
results, err := p.callPluginFunc("GetBucketVersioning", []any{ctx, bucket})
if err != nil {
return s3response.GetBucketVersioningOutput{}, err
}
return results[0].Interface().(s3response.GetBucketVersioningOutput), convertError(results[1])
}
func (p *PluginBackend) PutBucketPolicy(ctx context.Context, bucket string, policy []byte) error {
_, err := p.callPluginFunc("PutBucketPolicy", []any{ctx, bucket, policy})
return err
}
func (p *PluginBackend) GetBucketPolicy(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketPolicy", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketPolicy(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketPolicy", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketOwnershipControls(ctx context.Context, bucket string, ownership types.ObjectOwnership) error {
_, err := p.callPluginFunc("PutBucketOwnershipControls", []any{ctx, bucket, ownership})
return err
}
func (p *PluginBackend) GetBucketOwnershipControls(ctx context.Context, bucket string) (types.ObjectOwnership, error) {
results, err := p.callPluginFunc("GetBucketOwnershipControls", []any{ctx, bucket})
if err != nil {
return "", err
}
return results[0].Interface().(types.ObjectOwnership), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketOwnershipControls(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketOwnershipControls", []any{ctx, bucket})
return err
}
func (p *PluginBackend) PutBucketCors(ctx context.Context, data []byte) error {
_, err := p.callPluginFunc("PutBucketCors", []any{ctx, data})
return err
}
func (p *PluginBackend) GetBucketCors(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetBucketCors", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) DeleteBucketCors(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketCors", []any{ctx, bucket})
return err
}
func (p *PluginBackend) CreateMultipartUpload(ctx context.Context, input s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
results, err := p.callPluginFunc("CreateMultipartUpload", []any{ctx, input})
if err != nil {
return s3response.InitiateMultipartUploadResult{}, err
}
return results[0].Interface().(s3response.InitiateMultipartUploadResult), convertError(results[1])
}
func (p *PluginBackend) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
results, err := p.callPluginFunc("CompleteMultipartUpload", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.CompleteMultipartUploadOutput), convertError(results[1])
}
func (p *PluginBackend) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
_, err := p.callPluginFunc("AbortMultipartUpload", []any{ctx, input})
return err
}
func (p *PluginBackend) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
results, err := p.callPluginFunc("ListMultipartUploads", []any{ctx, input})
if err != nil {
return s3response.ListMultipartUploadsResult{}, err
}
return results[0].Interface().(s3response.ListMultipartUploadsResult), convertError(results[1])
}
func (p *PluginBackend) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
results, err := p.callPluginFunc("ListParts", []any{ctx, input})
if err != nil {
return s3response.ListPartsResult{}, err
}
return results[0].Interface().(s3response.ListPartsResult), convertError(results[1])
}
func (p *PluginBackend) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
results, err := p.callPluginFunc("UploadPart", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.UploadPartOutput), convertError(results[1])
}
func (p *PluginBackend) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
results, err := p.callPluginFunc("UploadPartCopy", []any{ctx, input})
if err != nil {
return s3response.CopyPartResult{}, err
}
return results[0].Interface().(s3response.CopyPartResult), convertError(results[1])
}
func (p *PluginBackend) PutObject(ctx context.Context, input s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
results, err := p.callPluginFunc("PutObject", []any{ctx, input})
if err != nil {
return s3response.PutObjectOutput{}, err
}
return results[0].Interface().(s3response.PutObjectOutput), convertError(results[1])
}
func (p *PluginBackend) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
results, err := p.callPluginFunc("HeadObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.HeadObjectOutput), convertError(results[1])
}
func (p *PluginBackend) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
results, err := p.callPluginFunc("GetObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.GetObjectOutput), convertError(results[1])
}
func (p *PluginBackend) GetObjectAcl(ctx context.Context, input *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
results, err := p.callPluginFunc("GetObjectAcl", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.GetObjectAclOutput), convertError(results[1])
}
func (p *PluginBackend) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAttributesInput) (s3response.GetObjectAttributesResponse, error) {
results, err := p.callPluginFunc("GetObjectAttributes", []any{ctx, input})
if err != nil {
return s3response.GetObjectAttributesResponse{}, err
}
return results[0].Interface().(s3response.GetObjectAttributesResponse), convertError(results[1])
}
func (p *PluginBackend) CopyObject(ctx context.Context, input s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
results, err := p.callPluginFunc("CopyObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.CopyObjectOutput), convertError(results[1])
}
func (p *PluginBackend) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
results, err := p.callPluginFunc("ListObjects", []any{ctx, input})
if err != nil {
return s3response.ListObjectsResult{}, err
}
return results[0].Interface().(s3response.ListObjectsResult), convertError(results[1])
}
func (p *PluginBackend) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
results, err := p.callPluginFunc("ListObjectsV2", []any{ctx, input})
if err != nil {
return s3response.ListObjectsV2Result{}, err
}
return results[0].Interface().(s3response.ListObjectsV2Result), convertError(results[1])
}
func (p *PluginBackend) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
results, err := p.callPluginFunc("DeleteObject", []any{ctx, input})
if err != nil {
return nil, err
}
return results[0].Interface().(*s3.DeleteObjectOutput), convertError(results[1])
}
func (p *PluginBackend) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteResult, error) {
results, err := p.callPluginFunc("DeleteObjects", []any{ctx, input})
if err != nil {
return s3response.DeleteResult{}, err
}
return results[0].Interface().(s3response.DeleteResult), convertError(results[1])
}
func (p *PluginBackend) PutObjectAcl(ctx context.Context, input *s3.PutObjectAclInput) error {
_, err := p.callPluginFunc("PutObjectAcl", []any{ctx, input})
return err
}
func (p *PluginBackend) ListObjectVersions(ctx context.Context, input *s3.ListObjectVersionsInput) (s3response.ListVersionsResult, error) {
results, err := p.callPluginFunc("ListObjectVersions", []any{ctx, input})
if err != nil {
return s3response.ListVersionsResult{}, err
}
return results[0].Interface().(s3response.ListVersionsResult), convertError(results[1])
}
func (p *PluginBackend) RestoreObject(ctx context.Context, input *s3.RestoreObjectInput) error {
_, err := p.callPluginFunc("RestoreObject", []any{ctx, input})
return err
}
func (p *PluginBackend) SelectObjectContent(ctx context.Context, input *s3.SelectObjectContentInput) func(w *bufio.Writer) {
results, err := p.callPluginFunc("SelectObjectContent", []any{ctx, input})
if err != nil {
return func(w *bufio.Writer) {}
}
return results[0].Interface().(func(w *bufio.Writer))
}
func (p *PluginBackend) GetBucketTagging(ctx context.Context, bucket string) (map[string]string, error) {
results, err := p.callPluginFunc("GetBucketTagging", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().(map[string]string), convertError(results[1])
}
func (p *PluginBackend) PutBucketTagging(ctx context.Context, bucket string, tags map[string]string) error {
_, err := p.callPluginFunc("PutBucketTagging", []any{ctx, bucket, tags})
return err
}
func (p *PluginBackend) DeleteBucketTagging(ctx context.Context, bucket string) error {
_, err := p.callPluginFunc("DeleteBucketTagging", []any{ctx, bucket})
return err
}
func (p *PluginBackend) GetObjectTagging(ctx context.Context, bucket, object string) (map[string]string, error) {
results, err := p.callPluginFunc("GetObjectTagging", []any{ctx, bucket, object})
if err != nil {
return nil, err
}
return results[0].Interface().(map[string]string), convertError(results[1])
}
func (p *PluginBackend) PutObjectTagging(ctx context.Context, bucket, object string, tags map[string]string) error {
_, err := p.callPluginFunc("PutObjectTagging", []any{ctx, bucket, object, tags})
return err
}
func (p *PluginBackend) DeleteObjectTagging(ctx context.Context, bucket, object string) error {
_, err := p.callPluginFunc("DeleteObjectTagging", []any{ctx, bucket, object})
return err
}
func (p *PluginBackend) PutObjectLockConfiguration(ctx context.Context, bucket string, config []byte) error {
_, err := p.callPluginFunc("PutObjectLockConfiguration", []any{ctx, bucket, config})
return err
}
func (p *PluginBackend) GetObjectLockConfiguration(ctx context.Context, bucket string) ([]byte, error) {
results, err := p.callPluginFunc("GetObjectLockConfiguration", []any{ctx, bucket})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) PutObjectRetention(ctx context.Context, bucket, object, versionId string, bypass bool, retention []byte) error {
_, err := p.callPluginFunc("PutObjectRetention", []any{ctx, bucket, object, versionId, bypass, retention})
return err
}
func (p *PluginBackend) GetObjectRetention(ctx context.Context, bucket, object, versionId string) ([]byte, error) {
results, err := p.callPluginFunc("GetObjectRetention", []any{ctx, bucket, object, versionId})
if err != nil {
return nil, err
}
return results[0].Interface().([]byte), convertError(results[1])
}
func (p *PluginBackend) PutObjectLegalHold(ctx context.Context, bucket, object, versionId string, status bool) error {
_, err := p.callPluginFunc("PutObjectLegalHold", []any{ctx, bucket, object, versionId, status})
return err
}
func (p *PluginBackend) GetObjectLegalHold(ctx context.Context, bucket, object, versionId string) (*bool, error) {
results, err := p.callPluginFunc("GetObjectLegalHold", []any{ctx, bucket, object, versionId})
if err != nil {
return nil, err
}
val := results[0].Interface()
if val == nil {
return nil, convertError(results[1])
}
return val.(*bool), convertError(results[1])
}
func (p *PluginBackend) ChangeBucketOwner(ctx context.Context, bucket string, acl []byte) error {
_, err := p.callPluginFunc("ChangeBucketOwner", []any{ctx, bucket, acl})
return err
}
func (p *PluginBackend) ListBucketsAndOwners(ctx context.Context) ([]s3response.Bucket, error) {
results, err := p.callPluginFunc("ListBucketsAndOwners", []any{ctx})
if err != nil {
return nil, err
}
return results[0].Interface().([]s3response.Bucket), convertError(results[1])
}
func convertError(result reflect.Value) error {
if result.IsNil() {
return nil
}
err, ok := result.Interface().(error)
if !ok {
return fmt.Errorf("expected error, got %T", result.Interface())
}
return err
}
var _ backend.Backend = &PluginBackend{}

File diff suppressed because it is too large Load Diff

View File

@@ -29,7 +29,6 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"golang.org/x/sys/unix"
)
@@ -39,12 +38,12 @@ type tmpfile struct {
f *os.File
bucket string
objname string
isOTmp bool
size int64
needsChown bool
uid int
gid int
newDirPerm fs.FileMode
isOTmp bool
needsChown bool
}
var (
@@ -63,10 +62,6 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
// this is not supported.
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
// O_TMPFILE not supported, try fallback
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {

View File

@@ -24,11 +24,9 @@ import (
"io/fs"
"os"
"path/filepath"
"syscall"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
)
type tmpfile struct {
@@ -45,17 +43,11 @@ func (p *Posix) openTmpFile(dir, bucket, obj string, size int64, acct auth.Accou
var err error
err = backend.MkdirAll(dir, uid, gid, doChown, p.newDirPerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, fmt.Errorf("create temp file: %w", err)
}

File diff suppressed because it is too large Load Diff

View File

@@ -20,12 +20,13 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
@@ -34,27 +35,34 @@ import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
type ScoutfsOpts struct {
ChownUID bool
ChownGID bool
GlacierMode bool
BucketLinks bool
NewDirPerm fs.FileMode
DisableNoArchive bool
ChownUID bool
ChownGID bool
GlacierMode bool
BucketLinks bool
NewDirPerm fs.FileMode
}
type ScoutFS struct {
// bucket/object metadata storage facility
meta meta.MetadataStorer
*posix.Posix
rootfd *os.File
rootdir string
// bucket/object metadata storage facility
meta meta.MetadataStorer
// euid/egid are the effective uid/gid of the running versitygw process
// used to determine if chowning is needed
euid int
egid int
// newDirPerm is the permissions to use when creating new directories
newDirPerm fs.FileMode
// glaciermode enables the following behavior:
// GET object: if file offline, return invalid object state
@@ -71,19 +79,6 @@ type ScoutFS struct {
// when objects are uploaded
chownuid bool
chowngid bool
// euid/egid are the effective uid/gid of the running versitygw process
// used to determine if chowning is needed
euid int
egid int
// newDirPerm is the permissions to use when creating new directories
newDirPerm fs.FileMode
// disableNoArchive is used to disable setting scoutam noarchive flag
// on mutlipart parts. This is enabled by default to prevent archive
// copies of temporary multipart parts.
disableNoArchive bool
}
var _ backend.Backend = &ScoutFS{}
@@ -95,13 +90,8 @@ const (
metaHdr = "X-Amz-Meta"
contentTypeHdr = "content-type"
contentEncHdr = "content-encoding"
contentLangHdr = "content-language"
contentDispHdr = "content-disposition"
cacheCtrlHdr = "cache-control"
expiresHdr = "expires"
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
etagkey = "etag"
checksumsKey = "checksums"
objectRetentionKey = "object-retention"
objectLegalHoldKey = "object-legal-hold"
)
@@ -118,6 +108,8 @@ const (
onameAttr = systemPrefix + "objname"
flagskey = systemPrefix + "sam_flags"
stagecopykey = systemPrefix + "sam_stagereq"
fsBlocksize = 4096
)
const (
@@ -165,31 +157,6 @@ func (s *ScoutFS) getChownIDs(acct auth.Account) (int, int, bool) {
return uid, gid, needsChown
}
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
out, err := s.Posix.UploadPart(ctx, input)
if err != nil {
return nil, err
}
if !s.disableNoArchive {
sum := sha256.Sum256([]byte(*input.Key))
partPath := filepath.Join(
*input.Bucket, // bucket
metaTmpMultipartDir, // temp multipart dir
fmt.Sprintf("%x", sum), // hashed objname
*input.UploadId, // upload id
fmt.Sprintf("%v", *input.PartNumber), // part number
)
err = setNoArchive(partPath)
if err != nil {
return nil, fmt.Errorf("set noarchive: %w", err)
}
}
return out, err
}
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
// ioctl to not have to read and copy the part data to the final object. This
// saves a read and write cycle for all mutlipart uploads.
@@ -232,39 +199,14 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
checksums, err := s.retrieveChecksums(nil, bucket, filepath.Join(objdir, uploadID))
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get mp checksums: %w", err)
}
// ChecksumType should be the same as specified on CreateMultipartUpload
if input.ChecksumType != "" && checksums.Type != input.ChecksumType {
checksumType := checksums.Type
if checksumType == "" {
checksumType = types.ChecksumType("null")
}
return nil, s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
}
// check all parts ok
last := len(parts) - 1
partsize := int64(0)
var totalsize int64
// The initialie values is the lower limit of partNumber: 0
var partNumber int32
for i, part := range parts {
if part.PartNumber == nil {
if part.PartNumber == nil || *part.PartNumber < 1 {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return nil, s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return nil, s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
fullPartPath := filepath.Join(bucket, partObjPath)
@@ -273,11 +215,20 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if i == 0 {
partsize = fi.Size()
}
// partsize must be a multiple of the filesystem blocksize
// except for last part
if i < last && partsize%fsBlocksize != 0 {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
totalsize += fi.Size()
// all parts except the last need to be greater, thena
// the minimum allowed size (5 Mib)
if i < last && fi.Size() < backend.MinPartSize {
return nil, s3err.GetAPIError(s3err.ErrEntityTooSmall)
// all parts except the last need to be the same size
if i < last && partsize != fi.Size() {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
b, err := s.meta.RetrieveAttribute(nil, bucket, partObjPath, etagkey)
@@ -288,21 +239,6 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
if parts[i].ETag == nil || etag != *parts[i].ETag {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
partChecksum, err := s.retrieveChecksums(nil, bucket, partObjPath)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return nil, fmt.Errorf("get part checksum: %w", err)
}
// If checksum has been provided on mp initalization
err = validatePartChecksum(partChecksum, part)
if err != nil {
return nil, err
}
}
if input.MpuObjectSize != nil && totalsize != *input.MpuObjectSize {
return nil, s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
}
// use totalsize=0 because we wont be writing to the file, only moving
@@ -340,11 +276,7 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
userMetaData := make(map[string]string)
upiddir := filepath.Join(objdir, uploadID)
objMeta := s.loadUserMetaData(bucket, upiddir, userMetaData)
err = s.storeObjectMetadata(f.File(), bucket, object, objMeta)
if err != nil {
return nil, err
}
cType, _ := s.loadUserMetaData(bucket, upiddir, userMetaData)
objname := filepath.Join(bucket, object)
dir := filepath.Dir(objname)
@@ -375,6 +307,14 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
}
}
// set content-type
if cType != "" {
err := s.meta.StoreAttribute(f.File(), bucket, object, contentTypeHdr, []byte(cType))
if err != nil {
return nil, fmt.Errorf("set object content type: %w", err)
}
}
// load and set legal hold
lHold, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectLegalHoldKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
@@ -413,10 +353,10 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
}
// cleanup tmp dirs
os.RemoveAll(filepath.Join(bucket, upiddir))
os.RemoveAll(upiddir)
// use Remove for objdir in case there are still other uploads
// for same object name outstanding
os.Remove(filepath.Join(bucket, objdir))
os.Remove(objdir)
return &s3.CompleteMultipartUploadOutput{
Bucket: &bucket,
@@ -425,125 +365,6 @@ func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.Complet
}, nil
}
func (s *ScoutFS) storeObjectMetadata(f *os.File, bucket, object string, m objectMetadata) error {
if getString(m.ContentType) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentTypeHdr, []byte(*m.ContentType))
if err != nil {
return fmt.Errorf("set content-type: %w", err)
}
}
if getString(m.ContentEncoding) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentEncHdr, []byte(*m.ContentEncoding))
if err != nil {
return fmt.Errorf("set content-encoding: %w", err)
}
}
if getString(m.ContentDisposition) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentDispHdr, []byte(*m.ContentDisposition))
if err != nil {
return fmt.Errorf("set content-disposition: %w", err)
}
}
if getString(m.ContentLanguage) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentLangHdr, []byte(*m.ContentLanguage))
if err != nil {
return fmt.Errorf("set content-language: %w", err)
}
}
if getString(m.CacheControl) != "" {
err := s.meta.StoreAttribute(f, bucket, object, cacheCtrlHdr, []byte(*m.CacheControl))
if err != nil {
return fmt.Errorf("set cache-control: %w", err)
}
}
if getString(m.Expires) != "" {
err := s.meta.StoreAttribute(f, bucket, object, expiresHdr, []byte(*m.Expires))
if err != nil {
return fmt.Errorf("set cache-control: %w", err)
}
}
return nil
}
func validatePartChecksum(checksum s3response.Checksum, part types.CompletedPart) error {
n := numberOfChecksums(part)
if n > 1 {
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
}
if checksum.Algorithm == "" {
if n != 0 {
return s3err.GetAPIError(s3err.ErrInvalidPart)
}
return nil
}
algo := checksum.Algorithm
if n == 0 {
return s3err.APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The upload was created using a %v checksum. The complete request must include the checksum for each part. It was missing for part %v in the request.", strings.ToLower(string(algo)), *part.PartNumber),
HTTPStatusCode: http.StatusBadRequest,
}
}
for _, cs := range []struct {
checksum *string
expectedChecksum string
algo types.ChecksumAlgorithm
}{
{part.ChecksumCRC32, getString(checksum.CRC32), types.ChecksumAlgorithmCrc32},
{part.ChecksumCRC32C, getString(checksum.CRC32C), types.ChecksumAlgorithmCrc32c},
{part.ChecksumSHA1, getString(checksum.SHA1), types.ChecksumAlgorithmSha1},
{part.ChecksumSHA256, getString(checksum.SHA256), types.ChecksumAlgorithmSha256},
{part.ChecksumCRC64NVME, getString(checksum.CRC64NVME), types.ChecksumAlgorithmCrc64nvme},
} {
if cs.checksum == nil {
continue
}
if !utils.IsValidChecksum(*cs.checksum, cs.algo) {
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
}
if *cs.checksum != cs.expectedChecksum {
if algo == cs.algo {
return s3err.GetAPIError(s3err.ErrInvalidPart)
}
return s3err.APIError{
Code: "BadDigest",
Description: fmt.Sprintf("The %v you specified for part %v did not match what we received.", strings.ToLower(string(cs.algo)), *part.PartNumber),
HTTPStatusCode: http.StatusBadRequest,
}
}
}
return nil
}
func numberOfChecksums(part types.CompletedPart) int {
counter := 0
if getString(part.ChecksumCRC32) != "" {
counter++
}
if getString(part.ChecksumCRC32C) != "" {
counter++
}
if getString(part.ChecksumSHA1) != "" {
counter++
}
if getString(part.ChecksumSHA256) != "" {
counter++
}
if getString(part.ChecksumCRC64NVME) != "" {
counter++
}
return counter
}
func (s *ScoutFS) checkUploadIDExists(bucket, object, uploadID string) ([32]byte, error) {
sum := sha256.Sum256([]byte(object))
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
@@ -558,21 +379,12 @@ func (s *ScoutFS) checkUploadIDExists(bucket, object, uploadID string) ([32]byte
return sum, nil
}
type objectMetadata struct {
ContentType *string
ContentEncoding *string
ContentDisposition *string
ContentLanguage *string
CacheControl *string
Expires *string
}
// fll out the user metadata map with the metadata for the object
// and return the content type and encoding
func (s *ScoutFS) loadUserMetaData(bucket, object string, m map[string]string) objectMetadata {
func (s *ScoutFS) loadUserMetaData(bucket, object string, m map[string]string) (string, string) {
ents, err := s.meta.ListAttributes(bucket, object)
if err != nil || len(ents) == 0 {
return objectMetadata{}
return "", ""
}
for _, e := range ents {
if !isValidMeta(e) {
@@ -589,39 +401,20 @@ func (s *ScoutFS) loadUserMetaData(bucket, object string, m map[string]string) o
m[strings.TrimPrefix(e, fmt.Sprintf("%v.", metaHdr))] = string(b)
}
var result objectMetadata
b, err := s.meta.RetrieveAttribute(nil, bucket, object, contentTypeHdr)
if err == nil {
result.ContentType = backend.GetPtrFromString(string(b))
var contentType, contentEncoding string
b, _ := s.meta.RetrieveAttribute(nil, bucket, object, contentTypeHdr)
contentType = string(b)
if contentType != "" {
m[contentTypeHdr] = contentType
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, contentEncHdr)
if err == nil {
result.ContentEncoding = backend.GetPtrFromString(string(b))
b, _ = s.meta.RetrieveAttribute(nil, bucket, object, contentEncHdr)
contentEncoding = string(b)
if contentEncoding != "" {
m[contentEncHdr] = contentEncoding
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, contentDispHdr)
if err == nil {
result.ContentDisposition = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, contentLangHdr)
if err == nil {
result.ContentLanguage = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, cacheCtrlHdr)
if err == nil {
result.CacheControl = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, expiresHdr)
if err == nil {
result.Expires = backend.GetPtrFromString(string(b))
}
return result
return contentType, contentEncoding
}
func isValidMeta(val string) bool {
@@ -635,17 +428,99 @@ func isValidMeta(val string) bool {
}
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
res, err := s.Posix.HeadObject(ctx, input)
if err != nil {
return nil, err
if input.Bucket == nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidBucketName)
}
if input.Key == nil {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
bucket := *input.Bucket
object := *input.Key
if input.PartNumber != nil {
uploadId, sum, err := s.retrieveUploadId(bucket, object)
if err != nil {
return nil, err
}
ents, err := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId))
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
return nil, fmt.Errorf("read parts: %w", err)
}
partPath := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId, fmt.Sprintf("%v", *input.PartNumber))
part, err := os.Stat(filepath.Join(bucket, partPath))
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
if errors.Is(err, syscall.ENAMETOOLONG) {
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
}
if err != nil {
return nil, fmt.Errorf("stat part: %w", err)
}
b, err := s.meta.RetrieveAttribute(nil, bucket, partPath, etagkey)
etag := string(b)
if err != nil {
etag = ""
}
partsCount := int32(len(ents))
size := part.Size()
return &s3.HeadObjectOutput{
LastModified: backend.GetTimePtr(part.ModTime()),
ETag: &etag,
PartsCount: &partsCount,
ContentLength: &size,
}, nil
}
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return nil, fmt.Errorf("stat bucket: %w", err)
}
objPath := filepath.Join(bucket, object)
fi, err := os.Stat(objPath)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if errors.Is(err, syscall.ENAMETOOLONG) {
return nil, s3err.GetAPIError(s3err.ErrKeyTooLong)
}
if err != nil {
return nil, fmt.Errorf("stat object: %w", err)
}
if strings.HasSuffix(object, "/") && !fi.IsDir() {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
userMetaData := make(map[string]string)
contentType, contentEncoding := s.loadUserMetaData(bucket, object, userMetaData)
if fi.IsDir() {
// this is the media type for directories in AWS and Nextcloud
contentType = "application/x-directory"
}
b, err := s.meta.RetrieveAttribute(nil, bucket, object, etagkey)
etag := string(b)
if err != nil {
etag = ""
}
stclass := types.StorageClassStandard
requestOngoing := ""
if s.glaciermode {
objPath := filepath.Join(*input.Bucket, *input.Key)
stclass := types.StorageClassStandard
requestOngoing := ""
requestOngoing = stageComplete
// Check if there are any offline exents associated with this file.
@@ -672,17 +547,62 @@ func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s
requestOngoing = stageInProgress
}
}
res.Restore = &requestOngoing
res.StorageClass = stclass
}
return res, nil
contentLength := fi.Size()
var objectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
status, err := s.Posix.GetObjectLegalHold(ctx, bucket, object, *input.VersionId)
if err == nil {
if *status {
objectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOn
} else {
objectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOff
}
}
var objectLockMode types.ObjectLockMode
var objectLockRetainUntilDate *time.Time
retention, err := s.Posix.GetObjectRetention(ctx, bucket, object, *input.VersionId)
if err == nil {
var config types.ObjectLockRetention
if err := json.Unmarshal(retention, &config); err == nil {
objectLockMode = types.ObjectLockMode(config.Mode)
objectLockRetainUntilDate = config.RetainUntilDate
}
}
return &s3.HeadObjectOutput{
ContentLength: &contentLength,
ContentType: &contentType,
ContentEncoding: &contentEncoding,
ETag: &etag,
LastModified: backend.GetTimePtr(fi.ModTime()),
Metadata: userMetaData,
StorageClass: stclass,
Restore: &requestOngoing,
ObjectLockLegalHoldStatus: objectLockLegalHoldStatus,
ObjectLockMode: objectLockMode,
ObjectLockRetainUntilDate: objectLockRetainUntilDate,
}, nil
}
func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
func (s *ScoutFS) retrieveUploadId(bucket, object string) (string, [32]byte, error) {
sum := sha256.Sum256([]byte(object))
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
entries, err := os.ReadDir(objdir)
if err != nil || len(entries) == 0 {
return "", [32]byte{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
return entries[0].Name(), sum, nil
}
func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
bucket := *input.Bucket
object := *input.Key
acceptRange := *input.Range
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
@@ -695,7 +615,7 @@ func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
objPath := filepath.Join(bucket, object)
fi, err := os.Stat(objPath)
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if errors.Is(err, syscall.ENAMETOOLONG) {
@@ -709,6 +629,31 @@ func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
startOffset, length, err := backend.ParseRange(fi.Size(), acceptRange)
if err != nil {
return nil, err
}
objSize := fi.Size()
if fi.IsDir() {
// directory objects are always 0 len
objSize = 0
length = 0
}
if length == -1 {
length = fi.Size() - startOffset + 1
}
if startOffset+length > fi.Size() {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
var contentRange string
if acceptRange != "" {
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, objSize)
}
if s.glaciermode {
// Check if there are any offline exents associated with this file.
// If so, we will return the InvalidObjectState error.
@@ -724,7 +669,67 @@ func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
}
}
return s.Posix.GetObject(ctx, input)
f, err := os.Open(objPath)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
return nil, fmt.Errorf("open object: %w", err)
}
rdr := io.NewSectionReader(f, startOffset, length)
userMetaData := make(map[string]string)
contentType, contentEncoding := s.loadUserMetaData(bucket, object, userMetaData)
b, err := s.meta.RetrieveAttribute(nil, bucket, object, etagkey)
etag := string(b)
if err != nil {
etag = ""
}
tags, err := s.getXattrTags(bucket, object)
if err != nil {
return nil, fmt.Errorf("get object tags: %w", err)
}
tagCount := int32(len(tags))
return &s3.GetObjectOutput{
AcceptRanges: &acceptRange,
ContentLength: &length,
ContentEncoding: &contentEncoding,
ContentType: &contentType,
ETag: &etag,
LastModified: backend.GetTimePtr(fi.ModTime()),
Metadata: userMetaData,
TagCount: &tagCount,
StorageClass: types.StorageClassStandard,
ContentRange: &contentRange,
Body: &backend.FileSectionReadCloser{R: rdr, F: f},
}, nil
}
func (s *ScoutFS) getXattrTags(bucket, object string) (map[string]string, error) {
tags := make(map[string]string)
b, err := xattr.Get(filepath.Join(bucket, object), "user."+tagHdr)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if isNoAttr(err) {
return tags, nil
}
if err != nil {
return nil, fmt.Errorf("get tags: %w", err)
}
err = json.Unmarshal(b, &tags)
if err != nil {
return nil, fmt.Errorf("unmarshal tags: %w", err)
}
return tags, nil
}
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
@@ -850,24 +855,17 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
return s3response.Object{}, fmt.Errorf("get fileinfo: %w", err)
}
size := int64(0)
key := path + "/"
mtime := fi.ModTime()
return s3response.Object{
ETag: &etag,
Key: &path,
Key: &key,
LastModified: &mtime,
Size: &size,
StorageClass: types.ObjectStorageClassStandard,
}, nil
}
// Retreive the object checksum algorithm
checksums, err := s.retrieveChecksums(nil, bucket, path)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return s3response.Object{}, backend.ErrSkipObj
}
// file object, get object info and fill out object data
b, err := s.meta.RetrieveAttribute(nil, bucket, path, etagkey)
if errors.Is(err, fs.ErrNotExist) {
@@ -909,27 +907,15 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
mtime := fi.ModTime()
return s3response.Object{
ETag: &etag,
Key: &path,
LastModified: &mtime,
Size: &size,
StorageClass: sc,
ChecksumAlgorithm: []types.ChecksumAlgorithm{checksums.Algorithm},
ChecksumType: checksums.Type,
ETag: &etag,
Key: &path,
LastModified: &mtime,
Size: &size,
StorageClass: sc,
}, nil
}
}
func (s *ScoutFS) retrieveChecksums(f *os.File, bucket, object string) (checksums s3response.Checksum, err error) {
checksumsAtr, err := s.meta.RetrieveAttribute(f, bucket, object, checksumsKey)
if err != nil {
return checksums, err
}
err = json.Unmarshal(checksumsAtr, &checksums)
return checksums, err
}
// RestoreObject will set stage request on file if offline and do nothing if
// file is online
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
@@ -955,11 +941,28 @@ func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput)
return nil
}
func getString(str *string) string {
if str == nil {
return ""
func setStaging(objname string) error {
b, err := xattr.Get(objname, flagskey)
if err != nil && !isNoAttr(err) {
return err
}
return *str
var oldflags uint64
if !isNoAttr(err) {
err = json.Unmarshal(b, &oldflags)
if err != nil {
return err
}
}
newflags := oldflags | Staging
if newflags == oldflags {
// no flags change, just return
return nil
}
return fSetNewGlobalFlags(objname, newflags)
}
func isStaging(objname string) (bool, error) {
@@ -979,28 +982,8 @@ func isStaging(objname string) (bool, error) {
return flags&Staging == Staging, nil
}
func setFlag(objname string, flag uint64) error {
b, err := xattr.Get(objname, flagskey)
if err != nil && !isNoAttr(err) {
return err
}
var oldflags uint64
if !isNoAttr(err) {
err = json.Unmarshal(b, &oldflags)
if err != nil {
return err
}
}
newflags := oldflags | flag
if newflags == oldflags {
// no flags change, just return
return nil
}
b, err = json.Marshal(&newflags)
func fSetNewGlobalFlags(objname string, flags uint64) error {
b, err := json.Marshal(&flags)
if err != nil {
return err
}
@@ -1008,14 +991,6 @@ func setFlag(objname string, flag uint64) error {
return xattr.Set(objname, flagskey, b)
}
func setStaging(objname string) error {
return setFlag(objname, Staging)
}
func setNoArchive(objname string) error {
return setFlag(objname, NoArchive)
}
func isNoAttr(err error) bool {
xerr, ok := err.(*xattr.Error)
if ok && xerr.Err == xattr.ENOATTR {

View File

@@ -52,15 +52,14 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
}
return &ScoutFS{
Posix: p,
rootfd: f,
rootdir: rootdir,
meta: metastore,
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
glaciermode: opts.GlacierMode,
newDirPerm: opts.NewDirPerm,
disableNoArchive: opts.DisableNoArchive,
Posix: p,
rootfd: f,
rootdir: rootdir,
meta: metastore,
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
glaciermode: opts.GlacierMode,
newDirPerm: opts.NewDirPerm,
}, nil
}
@@ -71,10 +70,10 @@ type tmpfile struct {
bucket string
objname string
size int64
needsChown bool
uid int
gid int
newDirPerm fs.FileMode
needsChown bool
}
var (

View File

@@ -21,17 +21,16 @@ import (
"io/fs"
"sort"
"strings"
"syscall"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3response"
)
type WalkResults struct {
NextMarker string
CommonPrefixes []types.CommonPrefix
Objects []s3response.Object
Truncated bool
NextMarker string
}
type GetObjFunc func(path string, d fs.DirEntry) (s3response.Object, error)
@@ -44,19 +43,12 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
cpmap := make(map[string]struct{})
var objects []s3response.Object
// if max is 0, it should return empty non-truncated result
if max == 0 {
return WalkResults{
Truncated: false,
}, nil
}
var pastMarker bool
if marker == "" {
pastMarker = true
}
var pastMax bool
pastMax := max == 0
var newMarker string
var truncated bool
@@ -83,6 +75,14 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return fs.SkipDir
}
if pastMax {
if len(objects) != 0 {
newMarker = *objects[len(objects)-1].Key
truncated = true
}
return fs.SkipAll
}
// After this point, return skipflag instead of nil
// so we can skip a directory without an early return
var skipflag error
@@ -107,7 +107,13 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
strings.Contains(strings.TrimPrefix(path+"/", prefix), delimiter) {
skipflag = fs.SkipDir
} else {
if delimiter == "" {
// TODO: can we do better here rather than a second readdir
// per directory?
ents, err := fs.ReadDir(fileSystem, path)
if err != nil {
return fmt.Errorf("readdir %q: %w", path, err)
}
if len(ents) == 0 && delimiter == "" {
dirobj, err := getObj(path+"/", d)
if err == ErrSkipObj {
return skipflag
@@ -115,26 +121,11 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
if err != nil {
return fmt.Errorf("directory to object %q: %w", path, err)
}
if pastMax {
truncated = true
return fs.SkipAll
}
objects = append(objects, dirobj)
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
return skipflag
}
// TODO: can we do better here rather than a second readdir
// per directory?
ents, err := fs.ReadDir(fileSystem, path)
if err != nil {
return fmt.Errorf("readdir %q: %w", path, err)
}
if len(ents) != 0 {
return skipflag
}
@@ -167,15 +158,9 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
if err != nil {
return fmt.Errorf("file to object %q: %w", path, err)
}
if pastMax {
truncated = true
return fs.SkipAll
}
objects = append(objects, obj)
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
if max > 0 && (len(objects)+len(cpmap)) == int(max) {
pastMax = true
}
@@ -213,13 +198,8 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
if err != nil {
return fmt.Errorf("file to object %q: %w", path, err)
}
if pastMax {
truncated = true
return fs.SkipAll
}
objects = append(objects, obj)
if (len(objects) + len(cpmap)) == int(max) {
newMarker = path
pastMax = true
}
return skipflag
@@ -240,21 +220,18 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
return skipflag
}
if pastMax {
truncated = true
return fs.SkipAll
}
cpmap[cpref] = struct{}{}
if (len(objects) + len(cpmap)) == int(max) {
newMarker = cpref
pastMax = true
truncated = true
return fs.SkipAll
}
return skipflag
})
if err != nil {
// suppress file not found caused by user's prefix
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
if errors.Is(err, fs.ErrNotExist) {
return WalkResults{}, nil
}
return WalkResults{}, err
@@ -273,10 +250,6 @@ func Walk(ctx context.Context, fileSystem fs.FS, prefix, delimiter, marker strin
})
}
if !truncated {
newMarker = ""
}
return WalkResults{
CommonPrefixes: commonPrefixes,
Objects: objects,
@@ -295,18 +268,18 @@ func contains(a string, strs []string) bool {
}
type WalkVersioningResults struct {
NextMarker string
NextVersionIdMarker string
CommonPrefixes []types.CommonPrefix
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
Truncated bool
NextMarker string
NextVersionIdMarker string
}
type ObjVersionFuncResult struct {
NextVersionIdMarker string
ObjectVersions []types.ObjectVersion
DelMarkers []types.DeleteMarkerEntry
NextVersionIdMarker string
Truncated bool
}

View File

@@ -41,8 +41,8 @@ type testcase struct {
prefix string
delimiter string
marker string
maxObjs int32
expected backend.WalkResults
maxObjs int32
}
func getObj(path string, d fs.DirEntry) (s3response.Object, error) {

View File

@@ -74,9 +74,6 @@ var (
metricsService string
statsdServers string
dogstatsServers string
ipaHost, ipaVaultName string
ipaUser, ipaPassword string
ipaInsecure, ipaDebug bool
)
var (
@@ -98,7 +95,6 @@ func main() {
scoutfsCommand(),
s3Command(),
azureCommand(),
pluginCommand(),
adminCommand(),
testCommand(),
utilsCommand(),
@@ -210,7 +206,6 @@ func initFlags() []cli.Flag {
&cli.BoolFlag{
Name: "debug",
Usage: "enable debug output",
Value: false,
EnvVars: []string{"VGW_DEBUG"},
Destination: &debug,
},
@@ -511,42 +506,6 @@ func initFlags() []cli.Flag {
Aliases: []string{"mds"},
Destination: &dogstatsServers,
},
&cli.StringFlag{
Name: "ipa-host",
Usage: "FreeIPA server url e.g. https://ipa.example.test",
EnvVars: []string{"VGW_IPA_HOST"},
Destination: &ipaHost,
},
&cli.StringFlag{
Name: "ipa-vault-name",
Usage: "A name of the user vault containing their secret",
EnvVars: []string{"VGW_IPA_VAULT_NAME"},
Destination: &ipaVaultName,
},
&cli.StringFlag{
Name: "ipa-user",
Usage: "Username used to connect to FreeIPA. Needs permissions to read user vault contents",
EnvVars: []string{"VGW_IPA_USER"},
Destination: &ipaUser,
},
&cli.StringFlag{
Name: "ipa-password",
Usage: "Password of the user used to connect to FreeIPA.",
EnvVars: []string{"VGW_IPA_PASSWORD"},
Destination: &ipaPassword,
},
&cli.BoolFlag{
Name: "ipa-insecure",
Usage: "Verify TLS certificate of FreeIPA server. Default is 'true'.",
EnvVars: []string{"VGW_IPA_INSECURE"},
Destination: &ipaInsecure,
},
&cli.BoolFlag{
Name: "ipa-debug",
Usage: "FreeIPA IAM debug output",
EnvVars: []string{"VGW_IPA_DEBUG"},
Destination: &ipaDebug,
},
}
}
@@ -664,12 +623,6 @@ func runGateway(ctx context.Context, be backend.Backend) error {
CacheDisable: iamCacheDisable,
CacheTTL: iamCacheTTL,
CachePrune: iamCachePrune,
IpaHost: ipaHost,
IpaVaultName: ipaVaultName,
IpaUser: ipaUser,
IpaPassword: ipaPassword,
IpaInsecure: ipaInsecure,
IpaDebug: ipaDebug,
})
if err != nil {
return fmt.Errorf("setup iam: %w", err)

View File

@@ -1,64 +0,0 @@
// Copyright 2025 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"fmt"
"github.com/urfave/cli/v2"
vgwplugin "github.com/versity/versitygw/backend/plugin"
)
var (
pluginPath string
pluginConfig string
)
func pluginCommand() *cli.Command {
return &cli.Command{
Name: "plugin",
Usage: "plugin storage backend",
Description: `This tells the gateway to load the backend from a dynamic runtime plugin.`,
Action: runPlugin,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "file",
Usage: "path to plugin shared object file",
Value: "",
Required: true,
EnvVars: []string{"VGW_PLUGIN_FILE"},
Destination: &pluginPath,
Aliases: []string{"f"},
},
&cli.StringFlag{
Name: "config",
Usage: "configuration option for the plugin",
Value: "",
Required: true,
EnvVars: []string{"VGW_PLUGIN_CONFIG"},
Destination: &pluginConfig,
Aliases: []string{"c"},
},
},
}
}
func runPlugin(ctx *cli.Context) error {
be, err := vgwplugin.NewPluginBackend(pluginPath, pluginConfig)
if err != nil {
return fmt.Errorf("init plugin backend: %w", err)
}
return runGateway(ctx.Context, be)
}

View File

@@ -29,8 +29,6 @@ var (
bucketlinks bool
versioningDir string
dirPerms uint
sidecar string
nometa bool
)
func posixCommand() *cli.Command {
@@ -81,18 +79,6 @@ will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
DefaultText: "0755",
Value: 0755,
},
&cli.StringFlag{
Name: "sidecar",
Usage: "use provided sidecar directory to store metadata",
EnvVars: []string{"VGW_META_SIDECAR"},
Destination: &sidecar,
},
&cli.BoolFlag{
Name: "nometa",
Usage: "disable metadata storage",
EnvVars: []string{"VGW_META_NONE"},
Destination: &nometa,
},
},
}
}
@@ -103,45 +89,24 @@ func runPosix(ctx *cli.Context) error {
}
gwroot := (ctx.Args().Get(0))
err := meta.XattrMeta{}.Test(gwroot)
if err != nil {
return fmt.Errorf("posix xattr check: %v", err)
}
if dirPerms > math.MaxUint32 {
return fmt.Errorf("invalid directory permissions: %d", dirPerms)
}
if nometa && sidecar != "" {
return fmt.Errorf("cannot use both nometa and sidecar metadata")
}
opts := posix.PosixOpts{
be, err := posix.New(gwroot, meta.XattrMeta{}, posix.PosixOpts{
ChownUID: chownuid,
ChownGID: chowngid,
BucketLinks: bucketlinks,
VersioningDir: versioningDir,
NewDirPerm: fs.FileMode(dirPerms),
}
var ms meta.MetadataStorer
switch {
case sidecar != "":
sc, err := meta.NewSideCar(sidecar)
if err != nil {
return fmt.Errorf("failed to init sidecar metadata: %w", err)
}
ms = sc
opts.SideCarDir = sidecar
case nometa:
ms = meta.NoMeta{}
default:
ms = meta.XattrMeta{}
err := meta.XattrMeta{}.Test(gwroot)
if err != nil {
return fmt.Errorf("xattr check failed: %w", err)
}
}
be, err := posix.New(gwroot, ms, opts)
})
if err != nil {
return fmt.Errorf("failed to init posix backend: %w", err)
return fmt.Errorf("init posix: %v", err)
}
return runGateway(ctx.Context, be)

View File

@@ -24,8 +24,7 @@ import (
)
var (
glacier bool
disableNoArchive bool
glacier bool
)
func scoutfsCommand() *cli.Command {
@@ -80,12 +79,6 @@ move interfaces as well as support for tiered filesystems.`,
DefaultText: "0755",
Value: 0755,
},
&cli.BoolFlag{
Name: "disable-noarchive",
Usage: "disable setting noarchive for multipart part uploads",
EnvVars: []string{"VGW_DISABLE_NOARCHIVE"},
Destination: &disableNoArchive,
},
},
}
}
@@ -105,7 +98,6 @@ func runScoutfs(ctx *cli.Context) error {
opts.ChownGID = chowngid
opts.BucketLinks = bucketlinks
opts.NewDirPerm = fs.FileMode(dirPerms)
opts.DisableNoArchive = disableNoArchive
be, err := scoutfs.New(ctx.Args().Get(0), opts)
if err != nil {

View File

@@ -38,7 +38,6 @@ var (
checksumDisable bool
versioningEnabled bool
azureTests bool
tlsStatus bool
)
func testCommand() *cli.Command {
@@ -80,12 +79,6 @@ func initTestFlags() []cli.Flag {
Aliases: []string{"d"},
Destination: &debug,
},
&cli.BoolFlag{
Name: "allow-insecure",
Usage: "skip tls verification",
Aliases: []string{"ai"},
Destination: &tlsStatus,
},
}
}
@@ -218,7 +211,6 @@ func initTestCommands() []*cli.Command {
integration.WithEndpoint(endpoint),
integration.WithConcurrency(concurrency),
integration.WithPartSize(partSize),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -279,7 +271,6 @@ func initTestCommands() []*cli.Command {
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithConcurrency(concurrency),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -305,7 +296,6 @@ func getAction(tf testFunc) func(*cli.Context) error {
integration.WithSecret(awsSecret),
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())
@@ -343,7 +333,6 @@ func extractIntTests() (commands []*cli.Command) {
integration.WithSecret(awsSecret),
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithTLSStatus(tlsStatus),
}
if debug {
opts = append(opts, integration.WithDebug())

View File

@@ -358,13 +358,6 @@ ROOT_SECRET_ACCESS_KEY=
# as any parent directories automatically created with object uploads.
#VGW_DIR_PERMS=0755
# The default behavior of the gateway is to automatically set the noarchive
# flag on the multipart upload parts while the multipart upload is in progress.
# This is to prevent the parts from being archived since they are temporary
# and will be deleted after the multipart upload is completed or aborted. The
# VGW_DISABLE_NOARCHIVE option can be set to true to disable this behavior.
#VGW_DISABLE_NOARCHIVE=false
######
# s3 #
######

91
go.mod
View File

@@ -1,83 +1,82 @@
module github.com/versity/versitygw
go 1.23.0
toolchain go1.24.1
go 1.21.0
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
github.com/DataDog/datadog-go/v5 v5.6.0
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1
github.com/aws/smithy-go v1.22.3
github.com/go-ldap/ldap/v3 v3.4.10
github.com/gofiber/fiber/v2 v2.52.6
github.com/google/go-cmp v0.7.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1
github.com/DataDog/datadog-go/v5 v5.5.0
github.com/aws/aws-sdk-go-v2 v1.32.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2
github.com/aws/smithy-go v1.22.0
github.com/go-ldap/ldap/v3 v3.4.8
github.com/gofiber/fiber/v2 v2.52.5
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/hashicorp/vault-client-go v0.4.3
github.com/nats-io/nats.go v1.41.0
github.com/nats-io/nats.go v1.37.0
github.com/oklog/ulid/v2 v2.1.0
github.com/pkg/xattr v0.4.10
github.com/segmentio/kafka-go v0.4.47
github.com/smira/go-statsd v1.3.4
github.com/urfave/cli/v2 v2.27.6
github.com/valyala/fasthttp v1.60.0
github.com/urfave/cli/v2 v2.27.5
github.com/valyala/fasthttp v1.57.0
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44
golang.org/x/sync v0.13.0
golang.org/x/sys v0.32.0
golang.org/x/sync v0.8.0
golang.org/x/sys v0.26.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/time v0.7.0 // indirect
)
require (
github.com/andybalholm/brotli v1.1.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.13
github.com/aws/aws-sdk-go-v2/credentials v1.17.66
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect
github.com/aws/aws-sdk-go-v2/config v1.28.1
github.com/aws/aws-sdk-go-v2/credentials v1.17.42
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
)

216
go.sum
View File

@@ -1,23 +1,23 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 h1:Bg8m3nq/X1DeePkAbCfb6ml6F3F0IunEhE8TMh+lY48=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 h1:cf+OIKbkmMHBaC3u78AXomweqM0oxQSgBXRZf3WH4yM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQKJxSMNiGJcq4QuUQkOynyD93gLw6MDF7ek=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw=
github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 h1:6LyjnnaLpcOKK0fbYisI+mb8CE7iNe7i89nMNQxFxs8=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
@@ -25,48 +25,48 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
github.com/aws/aws-sdk-go-v2/config v1.29.13 h1:RgdPqWoE8nPpIekpVpDJsBckbqT4Liiaq9f35pbTh1Y=
github.com/aws/aws-sdk-go-v2/config v1.29.13/go.mod h1:NI28qs/IOUIRhsR7GQ/JdexoqRN9tDxkIrYZq0SOF44=
github.com/aws/aws-sdk-go-v2/credentials v1.17.66 h1:aKpEKaTy6n4CEJeYI1MNj97oSDLi4xro3UzQfwf5RWE=
github.com/aws/aws-sdk-go-v2/credentials v1.17.66/go.mod h1:xQ5SusDmHb/fy55wU0QqTy0yNfLqxzec59YcsRZB+rI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71 h1:s43gLuY+zGmtpx+KybfFP4IckopmTfDOPdlf/L++N5I=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.71/go.mod h1:KH6wWmY3O3c/jVAjHk0MGzVAFDxkOSt42Eoe4ZO4ge0=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1 h1:2Ku1xwAohSSXHR1tpAnyVDSQSxoDMA+/NZBytW+f4qg=
github.com/aws/aws-sdk-go-v2/service/s3 v1.79.1/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18 h1:xz7WvTMfSStb9Y8NpCT82FXLNC3QasqBfuAFHY4Pk5g=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.18/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk=
github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA=
github.com/aws/aws-sdk-go-v2/config v1.28.1 h1:oxIvOUXy8x0U3fR//0eq+RdCKimWI900+SV+10xsCBw=
github.com/aws/aws-sdk-go-v2/config v1.28.1/go.mod h1:bRQcttQJiARbd5JZxw6wG0yIK3eLeSCPdg6uqmmlIiI=
github.com/aws/aws-sdk-go-v2/credentials v1.17.42 h1:sBP0RPjBU4neGpIYyx8mkU2QqLPl5u9cmdTWVzIpHkM=
github.com/aws/aws-sdk-go-v2/credentials v1.17.42/go.mod h1:FwZBfU530dJ26rv9saAbxa9Ej3eF/AK0OAY86k13n4M=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 h1:68jFVtt3NulEzojFesM/WVarlFpCaXLKaBxDpzkQ9OQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18/go.mod h1:Fjnn5jQVIo6VyedMc0/EhPpfNlPl7dHV916O6B+49aE=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 h1:ihPPdcCVSN0IvBByXwqVp28/l4VosBZ6sDulcvU2J7w=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35/go.mod h1:JkgEhs3SVF51Dj3m1Bj+yL8IznpxzkwlA3jLg3x7Kls=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U=
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.3/go.mod h1:FZ9j3PFHHAR+w0BSEjK955w5YD2UwB/l/H0yAK3MJvI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 h1:2YCmIXv3tmiItw0LlYf6v7gEHebLY45kBEnPezbUKyU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3/go.mod h1:u19stRyNPxGhj6dRm+Cdgu6N75qnbW7+QN0q0dsAk58=
github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyFUsSnbaUWChuSGzs=
github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE=
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -74,18 +74,18 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ=
github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
@@ -120,30 +120,31 @@ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJk
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -151,8 +152,8 @@ github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -174,14 +175,16 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.60.0 h1:kBRYS0lOhVJ6V+bYN8PqAHELKHtXqwq9zNMLKx1MBsw=
github.com/valyala/fasthttp v1.60.0/go.mod h1:iY4kDgV3Gc6EqhRZ8icqcmlG6bqhcDXfuHgTO4FXCvc=
github.com/valyala/fasthttp v1.57.0 h1:Xw8SjWGEP/+wAAgyy5XTvgrWlOD1+TxbbvNADYCm1Tg=
github.com/valyala/fasthttp v1.57.0/go.mod h1:h6ZBaPRlzpZ6O3H5t2gEk1Qi33+TmLvfwgLLp0t9CpE=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44 h1:Wx1o3pNrCzsHIIDyZ2MLRr6tF/1FhAr7HNDn80QqDWE=
github.com/versity/scoutfs-go v0.0.0-20240325223134-38eb2f5f7d44/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
@@ -200,19 +203,14 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -222,23 +220,17 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -250,27 +242,23 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -279,19 +267,15 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -72,9 +72,6 @@ var (
ActionPutBucketOwnershipControls = "s3_PutBucketOwnershipControls"
ActionGetBucketOwnershipControls = "s3_GetBucketOwnershipControls"
ActionDeleteBucketOwnershipControls = "s3_DeleteBucketOwnershipControls"
ActionPutBucketCors = "s3_PutBucketCors"
ActionGetBucketCors = "s3_GetBucketCors"
ActionDeleteBucketCors = "s3_DeleteBucketCors"
// Admin actions
ActionAdminCreateUser = "admin_CreateUser"
@@ -269,16 +266,4 @@ func init() {
Name: "UploadPartCopy",
Service: "s3",
}
ActionMap[ActionPutBucketCors] = Action{
Name: "PutBucketCors",
Service: "s3",
}
ActionMap[ActionGetBucketCors] = Action{
Name: "GetBucketCors",
Service: "s3",
}
ActionMap[ActionDeleteBucketCors] = Action{
Name: "DeleteBucketCors",
Service: "s3",
}
}

View File

@@ -43,13 +43,14 @@ type Tag struct {
// Manager is a manager of metrics plugins
type Manager struct {
wg sync.WaitGroup
ctx context.Context
addDataChan chan datapoint
config Config
publishers []publisher
addDataChan chan datapoint
publishers []publisher
wg sync.WaitGroup
}
type Config struct {
@@ -220,6 +221,6 @@ func (m *Manager) addForwarder(addChan <-chan datapoint) {
type datapoint struct {
key string
value int64
tags []Tag
value int64
}

View File

@@ -10,14 +10,6 @@ mkdir /tmp/versioning.covdata
rm -rf /tmp/versioningdir
mkdir /tmp/versioningdir
# setup tls certificate and key
ECHO "Generating TLS certificate and key in the cert.pem and key.pem files"
openssl genpkey -algorithm RSA -out key.pem -pkeyopt rsa_keygen_bits:2048
openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj "/C=US/ST=California/L=San Francisco/O=Versity/OU=Software/CN=versity.com"
ECHO "Running the sdk test over http"
# run server in background not versioning-enabled
# port: 7070(default)
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
@@ -25,7 +17,7 @@ GW_PID=$!
sleep 1
# check if gateway process is still running
# check if versioning-enabled gateway process is still running
if ! kill -0 $GW_PID; then
echo "server no longer running"
exit 1
@@ -53,48 +45,9 @@ fi
kill $GW_PID
ECHO "Running the sdk test over https"
# run server in background with TLS certificate
# port: 7071(default)
GOCOVERDIR=/tmp/https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7071 -a user -s pass --iam-dir /tmp/gw posix /tmp/gw &
GW_HTTPS_PID=$!
sleep 1
# check if https gateway process is still running
if ! kill -0 $GW_HTTPS_PID; then
echo "server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 full-flow; then
echo "full flow tests failed"
kill $GW_HTTPS_PID
exit 1
fi
# posix tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 posix; then
echo "posix tests failed"
kill $GW_HTTPS_PID
exit 1
fi
# iam tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7071 iam; then
echo "iam tests failed"
kill $GW_HTTPS_PID
exit 1
fi
kill $GW_HTTPS_PID
ECHO "Running the sdk test over http against the versioning-enabled gateway"
# run server in background versioning-enabled
# port: 7072
GOCOVERDIR=/tmp/versioning.covdata ./versitygw -p :7072 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
# port: 7071
GOCOVERDIR=/tmp/versioning.covdata ./versitygw -p :7071 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_VS_PID=$!
# wait a second for server to start up
@@ -108,13 +61,13 @@ fi
# run tests
# full flow tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 full-flow -vs; then
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7071 full-flow -vs; then
echo "versioning-enabled full-flow tests failed"
kill $GW_VS_PID
exit 1
fi
# posix tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7072 posix -vs; then
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7071 posix -vs; then
echo "versiongin-enabled posix tests failed"
kill $GW_VS_PID
exit 1
@@ -123,38 +76,6 @@ fi
# kill off server
kill $GW_VS_PID
ECHO "Running the sdk test over https against the versioning-enabled gateway"
# run server in background versioning-enabled
# port: 7073
GOCOVERDIR=/tmp/versioning.https.covdata ./versitygw --cert "$PWD/cert.pem" --key "$PWD/key.pem" -p :7073 -a user -s pass --iam-dir /tmp/gw posix --versioning-dir /tmp/versioningdir /tmp/gw &
GW_VS_HTTPS_PID=$!
# wait a second for server to start up
sleep 1
# check if versioning-enabled gateway process is still running
if ! kill -0 $GW_VS_HTTPS_PID; then
echo "versioning-enabled server no longer running"
exit 1
fi
# run tests
# full flow tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 full-flow -vs; then
echo "versioning-enabled full-flow tests failed"
kill $GW_VS_HTTPS_PID
exit 1
fi
# posix tests
if ! ./versitygw test --allow-insecure -a user -s pass -e https://127.0.0.1:7073 posix -vs; then
echo "versiongin-enabled posix tests failed"
kill $GW_VS_HTTPS_PID
exit 1
fi
# kill off server
kill $GW_VS_HTTPS_PID
exit 0
# if the above binary was built with -cover enabled (make testbin),

View File

@@ -26,11 +26,11 @@ import (
)
type S3AdminServer struct {
app *fiber.App
backend backend.Backend
app *fiber.App
router *S3AdminRouter
port string
cert *tls.Certificate
port string
}
func NewAdminServer(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, opts ...AdminOpt) *S3AdminServer {

View File

@@ -167,7 +167,7 @@ func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
Owner: owner,
Grantees: []auth.Grantee{
{
Permission: auth.PermissionFullControl,
Permission: types.PermissionFullControl,
Access: owner,
Type: types.TypeCanonicalUser,
},

View File

@@ -64,11 +64,11 @@ func TestAdminController_CreateUser(t *testing.T) {
`
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Admin-create-user-malformed-body",
@@ -149,11 +149,11 @@ func TestAdminController_UpdateUser(t *testing.T) {
`
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Admin-update-user-success",
@@ -223,11 +223,11 @@ func TestAdminController_DeleteUser(t *testing.T) {
app.Patch("/delete-user", adminController.DeleteUser)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Admin-delete-user-success",
@@ -280,11 +280,11 @@ func TestAdminController_ListUsers(t *testing.T) {
appSucc.Patch("/list-users", adminController.ListUsers)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Admin-list-users-iam-error",
@@ -361,11 +361,11 @@ func TestAdminController_ChangeBucketOwner(t *testing.T) {
appIamNoSuchUser.Patch("/change-bucket-owner", adminControllerIamAccDoesNotExist.ChangeBucketOwner)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Change-bucket-owner-check-account-server-error",
@@ -424,11 +424,11 @@ func TestAdminController_ListBuckets(t *testing.T) {
app.Patch("/list-buckets", adminController.ListBuckets)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "List-buckets-success",

View File

@@ -32,21 +32,18 @@ var _ backend.Backend = &BackendMock{}
// CompleteMultipartUploadFunc: func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
// panic("mock out the CompleteMultipartUpload method")
// },
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
// CopyObjectFunc: func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
// panic("mock out the CopyObject method")
// },
// CreateBucketFunc: func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error {
// panic("mock out the CreateBucket method")
// },
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
// CreateMultipartUploadFunc: func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
// panic("mock out the CreateMultipartUpload method")
// },
// DeleteBucketFunc: func(contextMoqParam context.Context, bucket string) error {
// panic("mock out the DeleteBucket method")
// },
// DeleteBucketCorsFunc: func(contextMoqParam context.Context, bucket string) error {
// panic("mock out the DeleteBucketCors method")
// },
// DeleteBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) error {
// panic("mock out the DeleteBucketOwnershipControls method")
// },
@@ -68,9 +65,6 @@ var _ backend.Backend = &BackendMock{}
// GetBucketAclFunc: func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error) {
// panic("mock out the GetBucketAcl method")
// },
// GetBucketCorsFunc: func(contextMoqParam context.Context, bucket string) ([]byte, error) {
// panic("mock out the GetBucketCors method")
// },
// GetBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
// panic("mock out the GetBucketOwnershipControls method")
// },
@@ -134,9 +128,6 @@ var _ backend.Backend = &BackendMock{}
// PutBucketAclFunc: func(contextMoqParam context.Context, bucket string, data []byte) error {
// panic("mock out the PutBucketAcl method")
// },
// PutBucketCorsFunc: func(contextMoqParam context.Context, bytes []byte) error {
// panic("mock out the PutBucketCors method")
// },
// PutBucketOwnershipControlsFunc: func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
// panic("mock out the PutBucketOwnershipControls method")
// },
@@ -149,7 +140,7 @@ var _ backend.Backend = &BackendMock{}
// PutBucketVersioningFunc: func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error {
// panic("mock out the PutBucketVersioning method")
// },
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
// PutObjectFunc: func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
// panic("mock out the PutObject method")
// },
// PutObjectAclFunc: func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error {
@@ -179,10 +170,10 @@ var _ backend.Backend = &BackendMock{}
// StringFunc: func() string {
// panic("mock out the String method")
// },
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
// UploadPartFunc: func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
// panic("mock out the UploadPart method")
// },
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
// UploadPartCopyFunc: func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
// panic("mock out the UploadPartCopy method")
// },
// }
@@ -202,20 +193,17 @@ type BackendMock struct {
CompleteMultipartUploadFunc func(contextMoqParam context.Context, completeMultipartUploadInput *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
// CopyObjectFunc mocks the CopyObject method.
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error)
CopyObjectFunc func(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
// CreateBucketFunc mocks the CreateBucket method.
CreateBucketFunc func(contextMoqParam context.Context, createBucketInput *s3.CreateBucketInput, defaultACL []byte) error
// CreateMultipartUploadFunc mocks the CreateMultipartUpload method.
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
CreateMultipartUploadFunc func(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error)
// DeleteBucketFunc mocks the DeleteBucket method.
DeleteBucketFunc func(contextMoqParam context.Context, bucket string) error
// DeleteBucketCorsFunc mocks the DeleteBucketCors method.
DeleteBucketCorsFunc func(contextMoqParam context.Context, bucket string) error
// DeleteBucketOwnershipControlsFunc mocks the DeleteBucketOwnershipControls method.
DeleteBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) error
@@ -237,9 +225,6 @@ type BackendMock struct {
// GetBucketAclFunc mocks the GetBucketAcl method.
GetBucketAclFunc func(contextMoqParam context.Context, getBucketAclInput *s3.GetBucketAclInput) ([]byte, error)
// GetBucketCorsFunc mocks the GetBucketCors method.
GetBucketCorsFunc func(contextMoqParam context.Context, bucket string) ([]byte, error)
// GetBucketOwnershipControlsFunc mocks the GetBucketOwnershipControls method.
GetBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error)
@@ -303,9 +288,6 @@ type BackendMock struct {
// PutBucketAclFunc mocks the PutBucketAcl method.
PutBucketAclFunc func(contextMoqParam context.Context, bucket string, data []byte) error
// PutBucketCorsFunc mocks the PutBucketCors method.
PutBucketCorsFunc func(contextMoqParam context.Context, bytes []byte) error
// PutBucketOwnershipControlsFunc mocks the PutBucketOwnershipControls method.
PutBucketOwnershipControlsFunc func(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error
@@ -319,7 +301,7 @@ type BackendMock struct {
PutBucketVersioningFunc func(contextMoqParam context.Context, bucket string, status types.BucketVersioningStatus) error
// PutObjectFunc mocks the PutObject method.
PutObjectFunc func(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error)
PutObjectFunc func(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error)
// PutObjectAclFunc mocks the PutObjectAcl method.
PutObjectAclFunc func(contextMoqParam context.Context, putObjectAclInput *s3.PutObjectAclInput) error
@@ -349,10 +331,10 @@ type BackendMock struct {
StringFunc func() string
// UploadPartFunc mocks the UploadPart method.
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartFunc func(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error)
// UploadPartCopyFunc mocks the UploadPartCopy method.
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error)
UploadPartCopyFunc func(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
// calls tracks calls to the methods.
calls struct {
@@ -384,7 +366,7 @@ type BackendMock struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// CopyObjectInput is the copyObjectInput argument value.
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
}
// CreateBucket holds details about calls to the CreateBucket method.
CreateBucket []struct {
@@ -400,7 +382,7 @@ type BackendMock struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// CreateMultipartUploadInput is the createMultipartUploadInput argument value.
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
}
// DeleteBucket holds details about calls to the DeleteBucket method.
DeleteBucket []struct {
@@ -409,13 +391,6 @@ type BackendMock struct {
// Bucket is the bucket argument value.
Bucket string
}
// DeleteBucketCors holds details about calls to the DeleteBucketCors method.
DeleteBucketCors []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
}
// DeleteBucketOwnershipControls holds details about calls to the DeleteBucketOwnershipControls method.
DeleteBucketOwnershipControls []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -467,13 +442,6 @@ type BackendMock struct {
// GetBucketAclInput is the getBucketAclInput argument value.
GetBucketAclInput *s3.GetBucketAclInput
}
// GetBucketCors holds details about calls to the GetBucketCors method.
GetBucketCors []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bucket is the bucket argument value.
Bucket string
}
// GetBucketOwnershipControls holds details about calls to the GetBucketOwnershipControls method.
GetBucketOwnershipControls []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -631,13 +599,6 @@ type BackendMock struct {
// Data is the data argument value.
Data []byte
}
// PutBucketCors holds details about calls to the PutBucketCors method.
PutBucketCors []struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// Bytes is the bytes argument value.
Bytes []byte
}
// PutBucketOwnershipControls holds details about calls to the PutBucketOwnershipControls method.
PutBucketOwnershipControls []struct {
// ContextMoqParam is the contextMoqParam argument value.
@@ -679,7 +640,7 @@ type BackendMock struct {
// ContextMoqParam is the contextMoqParam argument value.
ContextMoqParam context.Context
// PutObjectInput is the putObjectInput argument value.
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
}
// PutObjectAcl holds details about calls to the PutObjectAcl method.
PutObjectAcl []struct {
@@ -778,7 +739,6 @@ type BackendMock struct {
lockCreateBucket sync.RWMutex
lockCreateMultipartUpload sync.RWMutex
lockDeleteBucket sync.RWMutex
lockDeleteBucketCors sync.RWMutex
lockDeleteBucketOwnershipControls sync.RWMutex
lockDeleteBucketPolicy sync.RWMutex
lockDeleteBucketTagging sync.RWMutex
@@ -786,7 +746,6 @@ type BackendMock struct {
lockDeleteObjectTagging sync.RWMutex
lockDeleteObjects sync.RWMutex
lockGetBucketAcl sync.RWMutex
lockGetBucketCors sync.RWMutex
lockGetBucketOwnershipControls sync.RWMutex
lockGetBucketPolicy sync.RWMutex
lockGetBucketTagging sync.RWMutex
@@ -808,7 +767,6 @@ type BackendMock struct {
lockListObjectsV2 sync.RWMutex
lockListParts sync.RWMutex
lockPutBucketAcl sync.RWMutex
lockPutBucketCors sync.RWMutex
lockPutBucketOwnershipControls sync.RWMutex
lockPutBucketPolicy sync.RWMutex
lockPutBucketTagging sync.RWMutex
@@ -940,13 +898,13 @@ func (mock *BackendMock) CompleteMultipartUploadCalls() []struct {
}
// CopyObject calls CopyObjectFunc.
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectInput *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
if mock.CopyObjectFunc == nil {
panic("BackendMock.CopyObjectFunc: method is nil but Backend.CopyObject was just called")
}
callInfo := struct {
ContextMoqParam context.Context
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
}{
ContextMoqParam: contextMoqParam,
CopyObjectInput: copyObjectInput,
@@ -963,11 +921,11 @@ func (mock *BackendMock) CopyObject(contextMoqParam context.Context, copyObjectI
// len(mockedBackend.CopyObjectCalls())
func (mock *BackendMock) CopyObjectCalls() []struct {
ContextMoqParam context.Context
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
} {
var calls []struct {
ContextMoqParam context.Context
CopyObjectInput s3response.CopyObjectInput
CopyObjectInput *s3.CopyObjectInput
}
mock.lockCopyObject.RLock()
calls = mock.calls.CopyObject
@@ -1016,13 +974,13 @@ func (mock *BackendMock) CreateBucketCalls() []struct {
}
// CreateMultipartUpload calls CreateMultipartUploadFunc.
func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context, createMultipartUploadInput s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context, createMultipartUploadInput *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
if mock.CreateMultipartUploadFunc == nil {
panic("BackendMock.CreateMultipartUploadFunc: method is nil but Backend.CreateMultipartUpload was just called")
}
callInfo := struct {
ContextMoqParam context.Context
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
}{
ContextMoqParam: contextMoqParam,
CreateMultipartUploadInput: createMultipartUploadInput,
@@ -1039,11 +997,11 @@ func (mock *BackendMock) CreateMultipartUpload(contextMoqParam context.Context,
// len(mockedBackend.CreateMultipartUploadCalls())
func (mock *BackendMock) CreateMultipartUploadCalls() []struct {
ContextMoqParam context.Context
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
} {
var calls []struct {
ContextMoqParam context.Context
CreateMultipartUploadInput s3response.CreateMultipartUploadInput
CreateMultipartUploadInput *s3.CreateMultipartUploadInput
}
mock.lockCreateMultipartUpload.RLock()
calls = mock.calls.CreateMultipartUpload
@@ -1087,42 +1045,6 @@ func (mock *BackendMock) DeleteBucketCalls() []struct {
return calls
}
// DeleteBucketCors calls DeleteBucketCorsFunc.
func (mock *BackendMock) DeleteBucketCors(contextMoqParam context.Context, bucket string) error {
if mock.DeleteBucketCorsFunc == nil {
panic("BackendMock.DeleteBucketCorsFunc: method is nil but Backend.DeleteBucketCors was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
}
mock.lockDeleteBucketCors.Lock()
mock.calls.DeleteBucketCors = append(mock.calls.DeleteBucketCors, callInfo)
mock.lockDeleteBucketCors.Unlock()
return mock.DeleteBucketCorsFunc(contextMoqParam, bucket)
}
// DeleteBucketCorsCalls gets all the calls that were made to DeleteBucketCors.
// Check the length with:
//
// len(mockedBackend.DeleteBucketCorsCalls())
func (mock *BackendMock) DeleteBucketCorsCalls() []struct {
ContextMoqParam context.Context
Bucket string
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
}
mock.lockDeleteBucketCors.RLock()
calls = mock.calls.DeleteBucketCors
mock.lockDeleteBucketCors.RUnlock()
return calls
}
// DeleteBucketOwnershipControls calls DeleteBucketOwnershipControlsFunc.
func (mock *BackendMock) DeleteBucketOwnershipControls(contextMoqParam context.Context, bucket string) error {
if mock.DeleteBucketOwnershipControlsFunc == nil {
@@ -1379,42 +1301,6 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
return calls
}
// GetBucketCors calls GetBucketCorsFunc.
func (mock *BackendMock) GetBucketCors(contextMoqParam context.Context, bucket string) ([]byte, error) {
if mock.GetBucketCorsFunc == nil {
panic("BackendMock.GetBucketCorsFunc: method is nil but Backend.GetBucketCors was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bucket string
}{
ContextMoqParam: contextMoqParam,
Bucket: bucket,
}
mock.lockGetBucketCors.Lock()
mock.calls.GetBucketCors = append(mock.calls.GetBucketCors, callInfo)
mock.lockGetBucketCors.Unlock()
return mock.GetBucketCorsFunc(contextMoqParam, bucket)
}
// GetBucketCorsCalls gets all the calls that were made to GetBucketCors.
// Check the length with:
//
// len(mockedBackend.GetBucketCorsCalls())
func (mock *BackendMock) GetBucketCorsCalls() []struct {
ContextMoqParam context.Context
Bucket string
} {
var calls []struct {
ContextMoqParam context.Context
Bucket string
}
mock.lockGetBucketCors.RLock()
calls = mock.calls.GetBucketCors
mock.lockGetBucketCors.RUnlock()
return calls
}
// GetBucketOwnershipControls calls GetBucketOwnershipControlsFunc.
func (mock *BackendMock) GetBucketOwnershipControls(contextMoqParam context.Context, bucket string) (types.ObjectOwnership, error) {
if mock.GetBucketOwnershipControlsFunc == nil {
@@ -2191,42 +2077,6 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
return calls
}
// PutBucketCors calls PutBucketCorsFunc.
func (mock *BackendMock) PutBucketCors(contextMoqParam context.Context, bytes []byte) error {
if mock.PutBucketCorsFunc == nil {
panic("BackendMock.PutBucketCorsFunc: method is nil but Backend.PutBucketCors was just called")
}
callInfo := struct {
ContextMoqParam context.Context
Bytes []byte
}{
ContextMoqParam: contextMoqParam,
Bytes: bytes,
}
mock.lockPutBucketCors.Lock()
mock.calls.PutBucketCors = append(mock.calls.PutBucketCors, callInfo)
mock.lockPutBucketCors.Unlock()
return mock.PutBucketCorsFunc(contextMoqParam, bytes)
}
// PutBucketCorsCalls gets all the calls that were made to PutBucketCors.
// Check the length with:
//
// len(mockedBackend.PutBucketCorsCalls())
func (mock *BackendMock) PutBucketCorsCalls() []struct {
ContextMoqParam context.Context
Bytes []byte
} {
var calls []struct {
ContextMoqParam context.Context
Bytes []byte
}
mock.lockPutBucketCors.RLock()
calls = mock.calls.PutBucketCors
mock.lockPutBucketCors.RUnlock()
return calls
}
// PutBucketOwnershipControls calls PutBucketOwnershipControlsFunc.
func (mock *BackendMock) PutBucketOwnershipControls(contextMoqParam context.Context, bucket string, ownership types.ObjectOwnership) error {
if mock.PutBucketOwnershipControlsFunc == nil {
@@ -2388,13 +2238,13 @@ func (mock *BackendMock) PutBucketVersioningCalls() []struct {
}
// PutObject calls PutObjectFunc.
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInput *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
if mock.PutObjectFunc == nil {
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
}
callInfo := struct {
ContextMoqParam context.Context
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
}{
ContextMoqParam: contextMoqParam,
PutObjectInput: putObjectInput,
@@ -2411,11 +2261,11 @@ func (mock *BackendMock) PutObject(contextMoqParam context.Context, putObjectInp
// len(mockedBackend.PutObjectCalls())
func (mock *BackendMock) PutObjectCalls() []struct {
ContextMoqParam context.Context
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
} {
var calls []struct {
ContextMoqParam context.Context
PutObjectInput s3response.PutObjectInput
PutObjectInput *s3.PutObjectInput
}
mock.lockPutObject.RLock()
calls = mock.calls.PutObject
@@ -2770,7 +2620,7 @@ func (mock *BackendMock) StringCalls() []struct {
}
// UploadPart calls UploadPartFunc.
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
func (mock *BackendMock) UploadPart(contextMoqParam context.Context, uploadPartInput *s3.UploadPartInput) (string, error) {
if mock.UploadPartFunc == nil {
panic("BackendMock.UploadPartFunc: method is nil but Backend.UploadPart was just called")
}
@@ -2806,7 +2656,7 @@ func (mock *BackendMock) UploadPartCalls() []struct {
}
// UploadPartCopy calls UploadPartCopyFunc.
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
func (mock *BackendMock) UploadPartCopy(contextMoqParam context.Context, uploadPartCopyInput *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
if mock.UploadPartCopyFunc == nil {
panic("BackendMock.UploadPartCopyFunc: method is nil but Backend.UploadPartCopy was just called")
}

File diff suppressed because it is too large Load Diff

View File

@@ -123,11 +123,11 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
appErr.Get("/", s3ApiControllerErr.ListBuckets)
tests := []struct {
name string
args args
app *fiber.App
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "List-bucket-method-not-allowed",
@@ -232,15 +232,12 @@ func TestS3ApiController_GetActions(t *testing.T) {
getObjAttrs := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
getObjAttrs.Header.Set("X-Amz-Object-Attributes", "hello")
invalidChecksumMode := httptest.NewRequest(http.MethodGet, "/my-bucket/key", nil)
invalidChecksumMode.Header.Set("x-amz-checksum-mode", "invalid_checksum_mode")
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Get-actions-get-tags-success",
@@ -332,15 +329,6 @@ func TestS3ApiController_GetActions(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Get-actions-get-object-invalid-checksum-mode",
app: app,
args: args{
req: invalidChecksumMode,
},
wantErr: false,
statusCode: 400,
},
{
name: "Get-actions-get-object-success",
app: app,
@@ -447,11 +435,11 @@ func TestS3ApiController_ListActions(t *testing.T) {
appError.Get("/:bucket", s3ApiControllerError.ListActions)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Get-bucket-tagging-non-existing-bucket",
@@ -634,7 +622,8 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
</VersioningConfiguration>
`
policyBody := `{
policyBody := `
{
"Statement": [
{
"Effect": "Allow",
@@ -739,11 +728,11 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
invAclOwnershipReq.Header.Set("X-Amz-Grant-Read", "hello")
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Put-bucket-tagging-invalid-body",
@@ -952,12 +941,12 @@ func TestS3ApiController_PutActions(t *testing.T) {
</Tagging>
`
//retentionBody := `
//<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
// <Mode>GOVERNANCE</Mode>
// <RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
//</Retention>
//`
retentionBody := `
<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Mode>GOVERNANCE</Mode>
<RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
</Retention>
`
legalHoldBody := `
<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
@@ -974,22 +963,22 @@ func TestS3ApiController_PutActions(t *testing.T) {
PutObjectAclFunc: func(context.Context, *s3.PutObjectAclInput) error {
return nil
},
CopyObjectFunc: func(context.Context, s3response.CopyObjectInput) (*s3.CopyObjectOutput, error) {
CopyObjectFunc: func(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return &s3.CopyObjectOutput{
CopyObjectResult: &types.CopyObjectResult{},
}, nil
},
PutObjectFunc: func(context.Context, s3response.PutObjectInput) (s3response.PutObjectOutput, error) {
PutObjectFunc: func(context.Context, *s3.PutObjectInput) (s3response.PutObjectOutput, error) {
return s3response.PutObjectOutput{}, nil
},
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
return &s3.UploadPartOutput{}, nil
UploadPartFunc: func(context.Context, *s3.UploadPartInput) (string, error) {
return "hello", nil
},
PutObjectTaggingFunc: func(_ context.Context, bucket, object string, tags map[string]string) error {
return nil
},
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyPartResult, error) {
return s3response.CopyPartResult{}, nil
UploadPartCopyFunc: func(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
return s3response.CopyObjectResult{}, nil
},
PutObjectLegalHoldFunc: func(contextMoqParam context.Context, bucket, object, versionId string, status bool) error {
return nil
@@ -1023,11 +1012,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
cpySrcReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
cpySrcReq.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
// CopyObject invalid checksum algorithm
cpyInvChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
cpyInvChecksumAlgo.Header.Set("X-Amz-Copy-Source", "srcBucket/srcObject")
cpyInvChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
// PutObjectAcl success
aclReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
aclReq.Header.Set("X-Amz-Acl", "private")
@@ -1049,46 +1033,12 @@ func TestS3ApiController_PutActions(t *testing.T) {
invAclBodyGrtReq := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?acl", strings.NewReader(body))
invAclBodyGrtReq.Header.Set("X-Amz-Grant-Read", "hello")
// PutObject invalid checksum algorithm
invChecksumAlgo := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
// PutObject invalid base64 checksum
invBase64Checksum := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invBase64Checksum.Header.Set("X-Amz-Checksum-Crc32", "invalid_base64")
// PutObject invalid crc32
invCrc32 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invCrc32.Header.Set("X-Amz-Checksum-Crc32", "YXNkZmFkc2Zhc2Rm")
// PutObject invalid crc32c
invCrc32c := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invCrc32c.Header.Set("X-Amz-Checksum-Crc32c", "YXNkZmFkc2Zhc2RmYXNkZg==")
// PutObject invalid sha1
invSha1 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invSha1.Header.Set("X-Amz-Checksum-Sha1", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZg==")
// PutObject invalid sha256
invSha256 := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
invSha256.Header.Set("X-Amz-Checksum-Sha256", "YXNkZmFkc2Zhc2RmYXNkZnNkYWZkYXNmZGFzZmFkc2Zhc2Rm")
// PutObject multiple checksum headers
mulChecksumHdrs := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Sha256", "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=")
mulChecksumHdrs.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
// PutObject checksum algorithm and header mismatch
checksumHdrMismatch := httptest.NewRequest(http.MethodPut, "/my-bucket/my-key", nil)
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Algorithm", "SHA1")
checksumHdrMismatch.Header.Set("X-Amz-Checksum-Crc32c", "ww2FVQ==")
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Put-object-part-error-case",
@@ -1126,15 +1076,15 @@ func TestS3ApiController_PutActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
//{
// name: "put-object-retention-success",
// app: app,
// args: args{
// req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
// },
// wantErr: false,
// statusCode: 200,
//},
{
name: "put-object-retention-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?retention", strings.NewReader(retentionBody)),
},
wantErr: false,
statusCode: 200,
},
{
name: "put-legal-hold-invalid-request",
app: app,
@@ -1225,15 +1175,6 @@ func TestS3ApiController_PutActions(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Copy-object-invalid-checksum-algorithm",
app: app,
args: args{
req: cpyInvChecksumAlgo,
},
wantErr: false,
statusCode: 400,
},
{
name: "Copy-object-success",
app: app,
@@ -1302,11 +1243,11 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
app.Delete("/:bucket", s3ApiController.DeleteBucket)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Delete-bucket-success",
@@ -1393,11 +1334,11 @@ func TestS3ApiController_DeleteObjects(t *testing.T) {
request.Header.Set("Content-Type", "application/xml")
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Delete-Objects-success",
@@ -1491,11 +1432,11 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
appErr.Delete("/:bucket/:key/*", s3ApiControllerErr.DeleteActions)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Abort-multipart-upload-success",
@@ -1600,11 +1541,11 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
appErr.Head("/:bucket", s3ApiControllerErr.HeadBucket)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Head-bucket-success",
@@ -1701,15 +1642,12 @@ func TestS3ApiController_HeadObject(t *testing.T) {
})
appErr.Head("/:bucket/:key/*", s3ApiControllerErr.HeadObject)
invChecksumMode := httptest.NewRequest(http.MethodHead, "/my-bucket/my-key", nil)
invChecksumMode.Header.Set("X-Amz-Checksum-Mode", "invalid_checksum_mode")
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Head-object-success",
@@ -1720,15 +1658,6 @@ func TestS3ApiController_HeadObject(t *testing.T) {
wantErr: false,
statusCode: 200,
},
{
name: "Head-object-invalid-checksum-mode",
app: app,
args: args{
req: invChecksumMode,
},
wantErr: false,
statusCode: 400,
},
{
name: "Head-object-error",
app: appErr,
@@ -1768,7 +1697,7 @@ func TestS3ApiController_CreateActions(t *testing.T) {
CompleteMultipartUploadFunc: func(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return &s3.CompleteMultipartUploadOutput{}, nil
},
CreateMultipartUploadFunc: func(context.Context, s3response.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
CreateMultipartUploadFunc: func(context.Context, *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) {
return s3response.InitiateMultipartUploadResult{}, nil
},
SelectObjectContentFunc: func(context.Context, *s3.SelectObjectContentInput) func(w *bufio.Writer) {
@@ -1784,19 +1713,6 @@ func TestS3ApiController_CreateActions(t *testing.T) {
</SelectObjectContentRequest>
`
completMpBody := `
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Part>
<ETag>etag</ETag>
<PartNumber>1</PartNumber>
</Part>
</CompleteMultipartUpload>
`
completMpEmptyBody := `
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></CompleteMultipartUpload>
`
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("account", auth.Account{Access: "valid access"})
ctx.Locals("isRoot", true)
@@ -1806,15 +1722,12 @@ func TestS3ApiController_CreateActions(t *testing.T) {
})
app.Post("/:bucket/:key/*", s3ApiController.CreateActions)
invChecksumAlgo := httptest.NewRequest(http.MethodPost, "/my-bucket/my-key", nil)
invChecksumAlgo.Header.Set("X-Amz-Checksum-Algorithm", "invalid_checksum_algorithm")
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Restore-object-success",
@@ -1852,33 +1765,15 @@ func TestS3ApiController_CreateActions(t *testing.T) {
wantErr: false,
statusCode: 400,
},
{
name: "Complete-multipart-upload-empty-parts",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpEmptyBody)),
},
wantErr: false,
statusCode: 400,
},
{
name: "Complete-multipart-upload-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(completMpBody)),
req: httptest.NewRequest(http.MethodPost, "/my-bucket/my-key?uploadId=23423", strings.NewReader(`<root><key>body</key></root>`)),
},
wantErr: false,
statusCode: 200,
},
{
name: "Create-multipart-upload-invalid-checksum-algorithm",
app: app,
args: args{
req: invChecksumAlgo,
},
wantErr: false,
statusCode: 400,
},
{
name: "Create-multipart-upload-success",
app: app,
@@ -1913,10 +1808,10 @@ func Test_XMLresponse(t *testing.T) {
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
tests := []struct {
name string
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Internal-server-error",
@@ -1988,10 +1883,10 @@ func Test_response(t *testing.T) {
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
tests := []struct {
name string
args args
wantErr bool
name string
statusCode int
wantErr bool
}{
{
name: "Internal-server-error",

View File

@@ -51,8 +51,7 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
!ctx.Request().URI().QueryArgs().Has("versioning") &&
!ctx.Request().URI().QueryArgs().Has("policy") &&
!ctx.Request().URI().QueryArgs().Has("object-lock") &&
!ctx.Request().URI().QueryArgs().Has("ownershipControls") &&
!ctx.Request().URI().QueryArgs().Has("cors") {
!ctx.Request().URI().QueryArgs().Has("ownershipControls") {
if err := auth.MayCreateBucket(acct, isRoot); err != nil {
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
}
@@ -75,11 +74,6 @@ func AclParser(be backend.Backend, logger s3log.AuditLogger, readonly bool) fibe
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
}
// if owner is not set, set default owner to root account
if parsedAcl.Owner == "" {
parsedAcl.Owner = ctx.Locals("rootAccess").(string)
}
ctx.Locals("parsedAcl", parsedAcl)
return ctx.Next()
}

View File

@@ -63,6 +63,10 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
return sendResponse(ctx, err, logger, mm)
}
if authData.Algorithm != "AWS4-HMAC-SHA256" {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported), logger, mm)
}
if authData.Region != region {
return sendResponse(ctx, s3err.APIError{
Code: "SignatureDoesNotMatch",
@@ -72,7 +76,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}
ctx.Locals("isRoot", authData.Access == root.Access)
ctx.Locals("rootAccess", root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
@@ -105,7 +108,6 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
return sendResponse(ctx, err, logger, mm)
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if utils.IsBigDataAction(ctx) {
// for streaming PUT actions, authorization is deferred
// until end of stream due to need to get length and
@@ -113,24 +115,10 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
return utils.NewAuthReader(ctx, r, authData, account.Secret, debug)
})
// wrap the io.Reader with ChunkReader if x-amz-content-sha256
// provide chunk encoding value
if utils.IsStreamingPayload(hashPayload) {
var err error
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr io.Reader
cr, err = utils.NewChunkReader(ctx, r, authData, region, account.Secret, tdate)
return cr
})
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
}
return ctx.Next()
}
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if !utils.IsSpecialPayload(hashPayload) {
// Calculate the hash of the request payload
hashedPayload := sha256.Sum256(ctx.Body())
@@ -161,8 +149,8 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.Au
}
type accounts struct {
root RootUserConfig
iam auth.IAMService
root RootUserConfig
}
func (a accounts) getAccount(access string) (auth.Account, error) {

View File

@@ -0,0 +1,62 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"io"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/metrics"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3log"
)
// ProcessChunkedBody initializes the chunked upload stream if the
// request appears to be a chunked upload
func ProcessChunkedBody(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, mm *metrics.Manager, region string) fiber.Handler {
return func(ctx *fiber.Ctx) error {
decodedLength := ctx.Get("X-Amz-Decoded-Content-Length")
if decodedLength == "" {
return ctx.Next()
}
// TODO: validate content length
authData, err := utils.ParseAuthorization(ctx.Get("Authorization"))
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
acct := ctx.Locals("account").(auth.Account)
amzdate := ctx.Get("X-Amz-Date")
date, _ := time.Parse(iso8601Format, amzdate)
if utils.IsBigDataAction(ctx) {
var err error
wrapBodyReader(ctx, func(r io.Reader) io.Reader {
var cr *utils.ChunkReader
cr, err = utils.NewChunkReader(ctx, r, authData, region, acct.Secret, date)
return cr
})
if err != nil {
return sendResponse(ctx, err, logger, mm)
}
return ctx.Next()
}
return ctx.Next()
}
}

View File

@@ -45,7 +45,7 @@ func VerifyMD5Body(logger s3log.AuditLogger) fiber.Handler {
}
sum := md5.Sum(ctx.Body())
calculatedSum := utils.Base64SumString(sum[:])
calculatedSum := utils.Md5SumString(sum[:])
if incomingSum != calculatedSum {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest), &controllers.MetaOpts{Logger: logger})

View File

@@ -43,8 +43,6 @@ func VerifyPresignedV4Signature(root RootUserConfig, iam auth.IAMService, logger
}
ctx.Locals("isRoot", authData.Access == root.Access)
ctx.Locals("rootAccess", root.Access)
account, err := acct.getAccount(authData.Access)
if err == auth.ErrNoSuchUser {
return sendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), logger, mm)

View File

@@ -29,9 +29,9 @@ func TestS3ApiRouter_Init(t *testing.T) {
iam auth.IAMService
}
tests := []struct {
name string
sa *S3ApiRouter
args args
sa *S3ApiRouter
name string
}{
{
name: "Initialize S3 api router",

View File

@@ -29,15 +29,15 @@ import (
)
type S3ApiServer struct {
app *fiber.App
backend backend.Backend
app *fiber.App
router *S3ApiRouter
port string
cert *tls.Certificate
port string
health string
quiet bool
debug bool
readonly bool
health string
}
func New(
@@ -81,6 +81,7 @@ func New(
// Authentication middlewares
app.Use(middlewares.VerifyPresignedV4Signature(root, iam, l, mm, region, server.debug))
app.Use(middlewares.VerifyV4Signature(root, iam, l, mm, region, server.debug))
app.Use(middlewares.ProcessChunkedBody(root, iam, l, mm, region))
app.Use(middlewares.VerifyMD5Body(l))
app.Use(middlewares.AclParser(be, l, server.readonly))

View File

@@ -39,9 +39,9 @@ func TestNew(t *testing.T) {
port := ":7070"
tests := []struct {
name string
args args
wantS3ApiServer *S3ApiServer
args args
name string
wantErr bool
}{
{
@@ -78,8 +78,8 @@ func TestNew(t *testing.T) {
func TestS3ApiServer_Serve(t *testing.T) {
tests := []struct {
name string
sa *S3ApiServer
name string
wantErr bool
}{
{

View File

@@ -41,10 +41,10 @@ const (
// the data is completely read.
type AuthReader struct {
ctx *fiber.Ctx
r *HashReader
auth AuthData
secret string
size int
r *HashReader
debug bool
}
@@ -56,7 +56,7 @@ func NewAuthReader(ctx *fiber.Ctx, r io.Reader, auth AuthData, secret string, de
var hr *HashReader
hashPayload := ctx.Get("X-Amz-Content-Sha256")
if !IsSpecialPayload(hashPayload) {
hr, _ = NewHashReader(r, "", HashTypeSha256Hex)
hr, _ = NewHashReader(r, "", HashTypeSha256)
} else {
hr, _ = NewHashReader(r, "", HashTypeNone)
}
@@ -190,10 +190,6 @@ func ParseAuthorization(authorization string) (AuthData, error) {
algo := authParts[0]
if algo != "AWS4-HMAC-SHA256" {
return a, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported)
}
kvData := authParts[1]
kvPairs := strings.Split(kvData, ",")
// we are expecting at least Credential, SignedHeaders, and Signature
@@ -264,3 +260,19 @@ func removeSpace(str string) string {
}
return b.String()
}
var (
specialValues = map[string]bool{
"UNSIGNED-PAYLOAD": true,
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
}
)
// IsSpecialPayload checks for streaming/unsigned authorization types
func IsSpecialPayload(str string) bool {
return specialValues[str]
}

View File

@@ -15,113 +15,260 @@
package utils
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"net/http"
"strings"
"math"
"strconv"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3err"
)
type payloadType string
// chunked uploads described in:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
const (
payloadTypeUnsigned payloadType = "UNSIGNED-PAYLOAD"
payloadTypeStreamingUnsignedTrailer payloadType = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
payloadTypeStreamingSigned payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
payloadTypeStreamingSignedTrailer payloadType = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
payloadTypeStreamingEcdsa payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
payloadTypeStreamingEcdsaTrailer payloadType = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
chunkHdrStr = ";chunk-signature="
chunkHdrDelim = "\r\n"
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
awsV4 = "AWS4"
awsS3Service = "s3"
awsV4Request = "aws4_request"
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
)
func getPayloadTypeNotSupportedErr(p payloadType) error {
return s3err.APIError{
HTTPStatusCode: http.StatusNotImplemented,
Code: "NotImplemented",
Description: fmt.Sprintf("The chunk encoding algorithm %v is not supported.", p),
// ChunkReader reads from chunked upload request body, and returns
// object data stream
type ChunkReader struct {
r io.Reader
chunkHash hash.Hash
prevSig string
parsedSig string
strToSignPrefix string
signingKey []byte
stash []byte
currentChunkSize int64
chunkDataLeft int64
trailerExpected int
skipcheck bool
}
// NewChunkReader reads from request body io.Reader and parses out the
// chunk metadata in stream. The headers are validated for proper signatures.
// Reading from the chunk reader will read only the object data stream
// without the chunk headers/trailers.
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (*ChunkReader, error) {
return &ChunkReader{
r: r,
signingKey: getSigningKey(secret, region, date),
// the authdata.Signature is validated in the auth-reader,
// so we can use that here without any other checks
prevSig: authdata.Signature,
chunkHash: sha256.New(),
strToSignPrefix: getStringToSignPrefix(date, region),
}, nil
}
// Read satisfies the io.Reader for this type
func (cr *ChunkReader) Read(p []byte) (int, error) {
n, err := cr.r.Read(p)
if err != nil && err != io.EOF {
return n, err
}
if cr.chunkDataLeft < int64(n) {
chunkSize := cr.chunkDataLeft
if chunkSize > 0 {
cr.chunkHash.Write(p[:chunkSize])
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
n += int(chunkSize)
return n, err
}
cr.chunkDataLeft -= int64(n)
cr.chunkHash.Write(p[:n])
return n, err
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// This part is the same for all chunks,
// only the previous signature and hash of current chunk changes
func getStringToSignPrefix(date time.Time, region string) string {
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
date.Format("20060102"),
region,
awsS3Service,
awsV4Request)
return fmt.Sprintf("%s\n%s\n%s",
streamPayloadAlgo,
date.Format("20060102T150405Z"),
credentialScope)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// signature For each chunk, you calculate the signature using the following
// string to sign. For the first chunk, you use the seed-signature as the
// previous signature.
func getChunkStringToSign(prefix, prevSig string, chunkHash []byte) string {
return fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
}
// The provided p should have all of the previous chunk data and trailer
// consumed already. The positioning here is expected that p[0] starts the
// new chunk size with the ";chunk-signature=" following. The only exception
// is if we started consuming the trailer, but hit the end of the read buffer.
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
// finish consuming the final trailer bytes.
// This parses the chunk metadata in situ without allocating an extra buffer.
// It will just read and validate the chunk metadata and then move the
// following chunk data to overwrite the metadata in the provided buffer.
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
n := len(p)
if !cr.skipcheck && cr.parsedSig != "" {
chunkhash := cr.chunkHash.Sum(nil)
cr.chunkHash.Reset()
sigstr := getChunkStringToSign(cr.strToSignPrefix, cr.prevSig, chunkhash)
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.currentChunkSize != 0 && cr.prevSig != cr.parsedSig {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
}
if cr.trailerExpected != 0 {
if len(p) < len(chunkHdrDelim) {
// This is the special case where we need to consume the
// trailer, but instead hit the end of the buffer. The
// subsequent call will finish consuming the trailer.
cr.chunkDataLeft = 0
cr.trailerExpected -= len(p)
cr.skipcheck = true
return 0, nil
}
// move data up to remove trailer
copy(p, p[cr.trailerExpected:])
n -= cr.trailerExpected
}
cr.skipcheck = false
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n])
cr.currentChunkSize = chunkSize
cr.parsedSig = sig
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
return 0, err
}
if chunkSize == 0 {
return 0, io.EOF
}
cr.trailerExpected = len(chunkHdrDelim)
// move data up to remove chunk header
copy(p, p[bufOffset:n])
n -= bufOffset
// if remaining buffer larger than chunk data,
// parse next header in buffer
if int64(n) > chunkSize {
cr.chunkDataLeft = 0
cr.chunkHash.Write(p[:chunkSize])
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
}
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
return n, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
// Task 3: Calculate Signature
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
func getSigningKey(secret, region string, date time.Time) []byte {
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
return signingKey
}
func hmac256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
var (
specialValues = map[payloadType]bool{
payloadTypeUnsigned: true,
payloadTypeStreamingUnsignedTrailer: true,
payloadTypeStreamingSigned: true,
payloadTypeStreamingSignedTrailer: true,
payloadTypeStreamingEcdsa: true,
payloadTypeStreamingEcdsaTrailer: true,
}
errInvalidChunkFormat = errors.New("invalid chunk header format")
errskipHeader = errors.New("skip to next header")
)
func (pt payloadType) isValid() bool {
return pt == payloadTypeUnsigned ||
pt == payloadTypeStreamingUnsignedTrailer ||
pt == payloadTypeStreamingSigned ||
pt == payloadTypeStreamingSignedTrailer ||
pt == payloadTypeStreamingEcdsa ||
pt == payloadTypeStreamingEcdsaTrailer
}
type checksumType string
const (
checksumTypeCrc32 checksumType = "x-amz-checksum-crc32"
checksumTypeCrc32c checksumType = "x-amz-checksum-crc32c"
checksumTypeSha1 checksumType = "x-amz-checksum-sha1"
checksumTypeSha256 checksumType = "x-amz-checksum-sha256"
checksumTypeCrc64nvme checksumType = "x-amz-checksum-crc64nvme"
maxHeaderSize = 1024
)
func (c checksumType) isValid() bool {
return c == checksumTypeCrc32 ||
c == checksumTypeCrc32c ||
c == checksumTypeSha1 ||
c == checksumTypeSha256 ||
c == checksumTypeCrc64nvme
}
// IsSpecialPayload checks for special authorization types
func IsSpecialPayload(str string) bool {
return specialValues[payloadType(str)]
}
// IsChunkEncoding checks for streaming/unsigned authorization types
func IsStreamingPayload(str string) bool {
pt := payloadType(str)
return pt == payloadTypeStreamingUnsignedTrailer ||
pt == payloadTypeStreamingSigned ||
pt == payloadTypeStreamingSignedTrailer
}
func NewChunkReader(ctx *fiber.Ctx, r io.Reader, authdata AuthData, region, secret string, date time.Time) (io.Reader, error) {
decContLength := ctx.Get("X-Amz-Decoded-Content-Length")
if decContLength == "" {
return nil, s3err.GetAPIError(s3err.ErrMissingDecodedContentLength)
}
contentSha256 := payloadType(ctx.Get("X-Amz-Content-Sha256"))
if !contentSha256.isValid() {
//TODO: Add proper APIError
return nil, fmt.Errorf("invalid x-amz-content-sha256: %v", string(contentSha256))
// Theis returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte) (int64, string, int, error) {
stashLen := len(cr.stash)
if cr.stash != nil {
tmp := make([]byte, maxHeaderSize)
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
header = tmp
cr.stash = nil
}
checksumType := checksumType(strings.ToLower(ctx.Get("X-Amz-Trailer")))
if contentSha256 != payloadTypeStreamingSigned && !checksumType.isValid() {
return nil, s3err.GetAPIError(s3err.ErrTrailerHeaderNotSupported)
semicolonIndex := bytes.Index(header, []byte(chunkHdrStr))
if semicolonIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
switch contentSha256 {
case payloadTypeStreamingUnsignedTrailer:
return NewUnsignedChunkReader(r, checksumType)
case payloadTypeStreamingSignedTrailer:
return NewSignedChunkReader(r, authdata, region, secret, date, checksumType)
case payloadTypeStreamingSigned:
return NewSignedChunkReader(r, authdata, region, secret, date, "")
// return not supported for:
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD
// - STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER
default:
return nil, getPayloadTypeNotSupportedErr(contentSha256)
sigIndex := semicolonIndex + len(chunkHdrStr)
sigEndIndex := bytes.Index(header[sigIndex:], []byte(chunkHdrDelim))
if sigEndIndex == -1 {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
cr.trailerExpected = 0
return 0, "", 0, errskipHeader
}
chunkSizeBytes := header[:semicolonIndex]
chunkSize, err := strconv.ParseInt(string(chunkSizeBytes), 16, 64)
if err != nil {
return 0, "", 0, errInvalidChunkFormat
}
signature := string(header[sigIndex:(sigIndex + sigEndIndex)])
dataStartOffset := sigIndex + sigEndIndex + len(chunkHdrDelim)
return chunkSize, signature, dataStartOffset - stashLen, nil
}

View File

@@ -16,19 +16,13 @@ package utils
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"hash/crc32"
"hash/crc64"
"io"
"math/bits"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
)
@@ -37,21 +31,11 @@ type HashType string
const (
// HashTypeMd5 generates MD5 checksum for the data stream
HashTypeMd5 HashType = "md5"
// HashTypeSha256 generates SHA256 Base64-Encoded checksum for the data stream
HashTypeSha256 HashType = "sha256"
// HashTypeSha256Hex generates SHA256 hex encoded checksum for the data stream
HashTypeSha256Hex HashType = "sha256-hex"
// HashTypeSha1 generates SHA1 Base64-Encoded checksum for the data stream
HashTypeSha1 HashType = "sha1"
// HashTypeCRC32 generates CRC32 Base64-Encoded checksum for the data stream
HashTypeCRC32 HashType = "crc32"
// HashTypeCRC32C generates CRC32C Base64-Encoded checksum for the data stream
HashTypeCRC32C HashType = "crc32c"
// HashTypeCRC64NVME generates CRC64NVME Base64-Encoded checksum for the data stream
HashTypeCRC64NVME HashType = "crc64nvme"
HashTypeMd5 = "md5"
// HashTypeSha256 generates SHA256 checksum for the data stream
HashTypeSha256 = "sha256"
// HashTypeNone is a no-op checksum for the data stream
HashTypeNone HashType = "none"
HashTypeNone = "none"
)
// HashReader is an io.Reader that calculates the checksum
@@ -78,18 +62,8 @@ func NewHashReader(r io.Reader, expectedSum string, ht HashType) (*HashReader, e
switch ht {
case HashTypeMd5:
hash = md5.New()
case HashTypeSha256Hex:
hash = sha256.New()
case HashTypeSha256:
hash = sha256.New()
case HashTypeSha1:
hash = sha1.New()
case HashTypeCRC32:
hash = crc32.NewIEEE()
case HashTypeCRC32C:
hash = crc32.New(crc32.MakeTable(crc32.Castagnoli))
case HashTypeCRC64NVME:
hash = crc64.New(crc64.MakeTable(bits.Reverse64(0xad93d23594c93659)))
case HashTypeNone:
hash = noop{}
default:
@@ -114,40 +88,15 @@ func (hr *HashReader) Read(p []byte) (int, error) {
if errors.Is(readerr, io.EOF) && hr.sum != "" {
switch hr.hashType {
case HashTypeMd5:
sum := hr.Sum()
sum := base64.StdEncoding.EncodeToString(hr.hash.Sum(nil))
if sum != hr.sum {
return n, s3err.GetAPIError(s3err.ErrInvalidDigest)
}
case HashTypeSha256Hex:
sum := hr.Sum()
case HashTypeSha256:
sum := hex.EncodeToString(hr.hash.Sum(nil))
if sum != hr.sum {
return n, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch)
}
case HashTypeCRC32:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32)
}
case HashTypeCRC32C:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc32c)
}
case HashTypeSha1:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha1)
}
case HashTypeSha256:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmSha256)
}
case HashTypeCRC64NVME:
sum := hr.Sum()
if sum != hr.sum {
return n, s3err.GetChecksumBadDigestErr(types.ChecksumAlgorithmCrc64nvme)
}
default:
return n, errInvalidHashType
}
@@ -155,38 +104,20 @@ func (hr *HashReader) Read(p []byte) (int, error) {
return n, readerr
}
func (hr *HashReader) SetReader(r io.Reader) {
hr.r = r
}
// Sum returns the checksum hash of the data read so far
func (hr *HashReader) Sum() string {
switch hr.hashType {
case HashTypeMd5:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeSha256Hex:
return hex.EncodeToString(hr.hash.Sum(nil))
case HashTypeCRC32:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeCRC32C:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeSha1:
return Base64SumString(hr.hash.Sum(nil))
return Md5SumString(hr.hash.Sum(nil))
case HashTypeSha256:
return Base64SumString(hr.hash.Sum(nil))
case HashTypeCRC64NVME:
return Base64SumString(hr.hash.Sum(nil))
return hex.EncodeToString(hr.hash.Sum(nil))
default:
return ""
}
}
func (hr *HashReader) Type() HashType {
return hr.hashType
}
// Md5SumString converts the hash bytes to the string checksum value
func Base64SumString(b []byte) string {
func Md5SumString(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
@@ -197,59 +128,3 @@ func (n noop) Sum(b []byte) []byte { return []byte{} }
func (n noop) Reset() {}
func (n noop) Size() int { return 0 }
func (n noop) BlockSize() int { return 1 }
// NewCompositeChecksumReader initializes a composite checksum
// processor, which decodes and validates the provided
// checksums and returns the final checksum based on
// the previous processings.
//
// The supported checksum types are:
// - CRC32
// - CRC32C
// - SHA1
// - SHA256
func NewCompositeChecksumReader(ht HashType) (*CompositeChecksumReader, error) {
var hasher hash.Hash
switch ht {
case HashTypeSha256:
hasher = sha256.New()
case HashTypeSha1:
hasher = sha1.New()
case HashTypeCRC32:
hasher = crc32.NewIEEE()
case HashTypeCRC32C:
hasher = crc32.New(crc32.MakeTable(crc32.Castagnoli))
case HashTypeNone:
hasher = noop{}
default:
return nil, errInvalidHashType
}
return &CompositeChecksumReader{
hasher: hasher,
}, nil
}
type CompositeChecksumReader struct {
hasher hash.Hash
}
// Decodes and writes the checksum in the hasher
func (ccr *CompositeChecksumReader) Process(checksum string) error {
data, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
return fmt.Errorf("base64 decode: %w", err)
}
_, err = ccr.hasher.Write(data)
if err != nil {
return fmt.Errorf("hash write: %w", err)
}
return nil
}
// Returns the base64 encoded composite checksum
func (ccr *CompositeChecksumReader) Sum() string {
return Base64SumString(ccr.hasher.Sum(nil))
}

View File

@@ -41,10 +41,10 @@ const (
// data requests where the data size is not known until
// the data is completely read.
type PresignedAuthReader struct {
r io.Reader
ctx *fiber.Ctx
auth AuthData
secret string
r io.Reader
debug bool
}

View File

@@ -23,13 +23,13 @@ import (
func Test_validateExpiration(t *testing.T) {
type args struct {
str string
date time.Time
str string
}
tests := []struct {
name string
args args
err error
name string
}{
{
name: "empty-expiration",

View File

@@ -1,485 +0,0 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bufio"
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"math"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
)
// chunked uploads described in:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
const (
chunkHdrDelim = "\r\n"
zeroLenSig = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
awsV4 = "AWS4"
awsS3Service = "s3"
awsV4Request = "aws4_request"
trailerSignatureHeader = "x-amz-trailer-signature"
streamPayloadAlgo = "AWS4-HMAC-SHA256-PAYLOAD"
streamPayloadTrailerAlgo = "AWS4-HMAC-SHA256-TRAILER"
)
// ChunkReader reads from chunked upload request body, and returns
// object data stream
type ChunkReader struct {
r io.Reader
signingKey []byte
prevSig string
parsedSig string
chunkDataLeft int64
trailer checksumType
trailerSig string
parsedChecksum string
stash []byte
chunkHash hash.Hash
checksumHash hash.Hash
isEOF bool
isFirstHeader bool
region string
date time.Time
}
// NewChunkReader reads from request body io.Reader and parses out the
// chunk metadata in stream. The headers are validated for proper signatures.
// Reading from the chunk reader will read only the object data stream
// without the chunk headers/trailers.
func NewSignedChunkReader(r io.Reader, authdata AuthData, region, secret string, date time.Time, chType checksumType) (io.Reader, error) {
chRdr := &ChunkReader{
r: r,
signingKey: getSigningKey(secret, region, date),
// the authdata.Signature is validated in the auth-reader,
// so we can use that here without any other checks
prevSig: authdata.Signature,
chunkHash: sha256.New(),
isFirstHeader: true,
date: date,
region: region,
trailer: chType,
}
if chType != "" {
checksumHasher, err := getHasher(chType)
if err != nil {
return nil, err
}
chRdr.checksumHash = checksumHasher
}
return chRdr, nil
}
// Read satisfies the io.Reader for this type
func (cr *ChunkReader) Read(p []byte) (int, error) {
n, err := cr.r.Read(p)
if err != nil && err != io.EOF {
return 0, err
}
cr.isEOF = err == io.EOF
if cr.chunkDataLeft < int64(n) {
chunkSize := cr.chunkDataLeft
if chunkSize > 0 {
cr.chunkHash.Write(p[:chunkSize])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:chunkSize])
}
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
n += int(chunkSize)
return n, err
}
cr.chunkDataLeft -= int64(n)
cr.chunkHash.Write(p[:n])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:n])
}
return n, err
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// This part is the same for all chunks,
// only the previous signature and hash of current chunk changes
func (cr *ChunkReader) getStringToSignPrefix(algo string) string {
credentialScope := fmt.Sprintf("%s/%s/%s/%s",
cr.date.Format("20060102"),
cr.region,
awsS3Service,
awsV4Request)
return fmt.Sprintf("%s\n%s\n%s",
algo,
cr.date.Format("20060102T150405Z"),
credentialScope)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
// signature For each chunk, you calculate the signature using the following
// string to sign. For the first chunk, you use the seed-signature as the
// previous signature.
func (cr *ChunkReader) getChunkStringToSign() string {
prefix := cr.getStringToSignPrefix(streamPayloadAlgo)
chunkHash := cr.chunkHash.Sum(nil)
return fmt.Sprintf("%s\n%s\n%s\n%s",
prefix,
cr.prevSig,
zeroLenSig,
hex.EncodeToString(chunkHash))
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
// Builds the final chunk trailing signature string to sign
func (cr *ChunkReader) getTrailerChunkStringToSign() string {
trailer := fmt.Sprintf("%v:%v\n", cr.trailer, cr.parsedChecksum)
hsh := sha256.Sum256([]byte(trailer))
sig := hex.EncodeToString(hsh[:])
prefix := cr.getStringToSignPrefix(streamPayloadTrailerAlgo)
return fmt.Sprintf("%s\n%s\n%s",
prefix,
cr.prevSig,
sig,
)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html#example-signature-calculations-trailing-header
// Calculates and validates the final chunk trailer signature
func (cr *ChunkReader) verifyTrailerSignature() error {
strToSign := cr.getTrailerChunkStringToSign()
sig := hex.EncodeToString(hmac256(cr.signingKey, []byte(strToSign)))
if sig != cr.trailerSig {
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return nil
}
// Verifies the object checksum
func (cr *ChunkReader) verifyChecksum() error {
checksumHash := cr.checksumHash.Sum(nil)
checksum := base64.StdEncoding.EncodeToString(checksumHash)
if checksum != cr.parsedChecksum {
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(cr.trailer), "x-amz-checksum-")))
return s3err.GetChecksumBadDigestErr(algo)
}
return nil
}
// Calculates and verifies the chunk signature
func (cr *ChunkReader) checkSignature() error {
sigstr := cr.getChunkStringToSign()
cr.chunkHash.Reset()
cr.prevSig = hex.EncodeToString(hmac256(cr.signingKey, []byte(sigstr)))
if cr.prevSig != cr.parsedSig {
return s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
cr.parsedSig = ""
return nil
}
// The provided p should have all of the previous chunk data and trailer
// consumed already. The positioning here is expected that p[0] starts the
// new chunk size with the ";chunk-signature=" following. The only exception
// is if we started consuming the trailer, but hit the end of the read buffer.
// In this case, parseAndRemoveChunkInfo is called with skipcheck=true to
// finish consuming the final trailer bytes.
// This parses the chunk metadata in situ without allocating an extra buffer.
// It will just read and validate the chunk metadata and then move the
// following chunk data to overwrite the metadata in the provided buffer.
func (cr *ChunkReader) parseAndRemoveChunkInfo(p []byte) (int, error) {
n := len(p)
if cr.parsedSig != "" {
err := cr.checkSignature()
if err != nil {
return 0, err
}
}
chunkSize, sig, bufOffset, err := cr.parseChunkHeaderBytes(p[:n], &n)
if err == errskipHeader {
cr.chunkDataLeft = 0
return 0, nil
}
if err != nil {
return 0, err
}
cr.parsedSig = sig
// If we hit the final chunk, calculate and validate the final
// chunk signature and finish reading
if chunkSize == 0 {
cr.chunkHash.Reset()
err := cr.checkSignature()
if err != nil {
return 0, err
}
if cr.trailer != "" {
err := cr.verifyChecksum()
if err != nil {
return 0, err
}
err = cr.verifyTrailerSignature()
if err != nil {
return 0, err
}
}
return 0, io.EOF
}
// move data up to remove chunk header
copy(p, p[bufOffset:n])
n -= bufOffset
// if remaining buffer larger than chunk data,
// parse next header in buffer
if int64(n) > chunkSize {
cr.chunkDataLeft = 0
cr.chunkHash.Write(p[:chunkSize])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:chunkSize])
}
n, err := cr.parseAndRemoveChunkInfo(p[chunkSize:n])
if (chunkSize + int64(n)) > math.MaxInt {
return 0, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)
}
return n + int(chunkSize), err
}
cr.chunkDataLeft = chunkSize - int64(n)
cr.chunkHash.Write(p[:n])
if cr.checksumHash != nil {
cr.checksumHash.Write(p[:n])
}
return n, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
// Task 3: Calculate Signature
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html#signing-request-intro
func getSigningKey(secret, region string, date time.Time) []byte {
dateKey := hmac256([]byte(awsV4+secret), []byte(date.Format(yyyymmdd)))
dateRegionKey := hmac256(dateKey, []byte(region))
dateRegionServiceKey := hmac256(dateRegionKey, []byte(awsS3Service))
signingKey := hmac256(dateRegionServiceKey, []byte(awsV4Request))
return signingKey
}
func hmac256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
var (
errInvalidChunkFormat = errors.New("invalid chunk header format")
errskipHeader = errors.New("skip to next header")
)
const (
maxHeaderSize = 1024
)
// This returns the chunk payload size, signature, data start offset, and
// error if any. See the AWS documentation for the chunk header format. The
// header[0] byte is expected to be the first byte of the chunk size here.
func (cr *ChunkReader) parseChunkHeaderBytes(header []byte, l *int) (int64, string, int, error) {
stashLen := len(cr.stash)
if stashLen > maxHeaderSize {
return 0, "", 0, errInvalidChunkFormat
}
if cr.stash != nil {
tmp := make([]byte, stashLen+len(header))
copy(tmp, cr.stash)
copy(tmp[len(cr.stash):], header)
header = tmp
cr.stash = nil
}
rdr := bufio.NewReader(bytes.NewReader(header))
// After the first chunk each chunk header should start
// with "\n\r\n"
if !cr.isFirstHeader && stashLen == 0 {
err := readAndSkip(rdr, '\r', '\n')
if err != nil {
return cr.handleRdrErr(err, header)
}
copy(header, header[2:])
*l = *l - 2
}
// read and parse the chunk size
chunkSizeStr, err := readAndTrim(rdr, ';')
if err != nil {
return cr.handleRdrErr(err, header)
}
chunkSize, err := strconv.ParseInt(chunkSizeStr, 16, 64)
if err != nil {
return 0, "", 0, errInvalidChunkFormat
}
// read the chunk signature
err = readAndSkip(rdr, 'c', 'h', 'u', 'n', 'k', '-', 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', '=')
if err != nil {
return cr.handleRdrErr(err, header)
}
sig, err := readAndTrim(rdr, '\r')
if err != nil {
return cr.handleRdrErr(err, header)
}
// read and parse the final chunk trailer and checksum
if chunkSize == 0 {
if cr.trailer != "" {
err = readAndSkip(rdr, '\n')
if err != nil {
return cr.handleRdrErr(err, header)
}
// parse and validate the trailing header
trailer, err := readAndTrim(rdr, ':')
if err != nil {
return cr.handleRdrErr(err, header)
}
if trailer != string(cr.trailer) {
return 0, "", 0, errInvalidChunkFormat
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(trailer, "x-amz-checksum-")))
// parse the checksum
checksum, err := readAndTrim(rdr, '\r')
if err != nil {
return cr.handleRdrErr(err, header)
}
if !IsValidChecksum(checksum, algo) {
return 0, "", 0, s3err.GetInvalidTrailingChecksumHeaderErr(trailer)
}
err = readAndSkip(rdr, '\n')
if err != nil {
return cr.handleRdrErr(err, header)
}
// parse the trailing signature
trailerSigPrefix, err := readAndTrim(rdr, ':')
if err != nil {
return cr.handleRdrErr(err, header)
}
if trailerSigPrefix != trailerSignatureHeader {
return 0, "", 0, errInvalidChunkFormat
}
trailerSig, err := readAndTrim(rdr, '\r')
if err != nil {
return cr.handleRdrErr(err, header)
}
cr.trailerSig = trailerSig
cr.parsedChecksum = checksum
}
// "\r\n\r\n" is followed after the last chunk
err = readAndSkip(rdr, '\n', '\r', '\n')
if err != nil {
return cr.handleRdrErr(err, header)
}
return 0, sig, 0, nil
}
err = readAndSkip(rdr, '\n')
if err != nil {
return cr.handleRdrErr(err, header)
}
ind := bytes.Index(header, []byte{'\r', '\n'})
cr.isFirstHeader = false
return chunkSize, sig, ind + len(chunkHdrDelim) - stashLen, nil
}
// Stashes the header in cr.stash and returns "errskipHeader"
func (cr *ChunkReader) stashAndSkipHeader(header []byte) (int64, string, int, error) {
cr.stash = make([]byte, len(header))
copy(cr.stash, header)
return 0, "", 0, errskipHeader
}
// Returns "errInvalidChunkFormat" if the passed err is "io.EOF" and cr.rdr EOF is reached
// calls "cr.stashAndSkipHeader" if the passed err is "io.EOF" and cr.isEOF is false
// Returns the error otherwise
func (cr *ChunkReader) handleRdrErr(err error, header []byte) (int64, string, int, error) {
if err == io.EOF {
if cr.isEOF {
return 0, "", 0, errInvalidChunkFormat
}
return cr.stashAndSkipHeader(header)
}
return 0, "", 0, err
}
// reads data from the "rdr" and validates the passed data bytes
func readAndSkip(rdr *bufio.Reader, data ...byte) error {
for _, d := range data {
b, err := rdr.ReadByte()
if err != nil {
return err
}
if b != d {
return errMalformedEncoding
}
}
return nil
}
// reads string by "delim" and trims the delimiter at the end
func readAndTrim(r *bufio.Reader, delim byte) (string, error) {
str, err := r.ReadString(delim)
if err != nil {
return "", err
}
return strings.TrimSuffix(str, string(delim)), nil
}

View File

@@ -1,235 +0,0 @@
// Copyright 2024 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bufio"
"bytes"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"hash"
"hash/crc32"
"hash/crc64"
"io"
"math/bits"
"strconv"
"strings"
)
var (
trailerDelim = []byte{'\n', '\r', '\n'}
errMalformedEncoding = errors.New("malformed chunk encoding")
)
type UnsignedChunkReader struct {
reader *bufio.Reader
checksumType checksumType
expectedChecksum string
hasher hash.Hash
stash []byte
chunkCounter int
offset int
}
func NewUnsignedChunkReader(r io.Reader, ct checksumType) (*UnsignedChunkReader, error) {
hasher, err := getHasher(ct)
if err != nil {
return nil, err
}
return &UnsignedChunkReader{
reader: bufio.NewReader(r),
checksumType: ct,
stash: make([]byte, 0),
hasher: hasher,
chunkCounter: 1,
}, nil
}
func (ucr *UnsignedChunkReader) Read(p []byte) (int, error) {
// First read any stashed data
if len(ucr.stash) != 0 {
n := copy(p, ucr.stash)
ucr.offset += n
if n < len(ucr.stash) {
ucr.stash = ucr.stash[n:]
ucr.offset = 0
return n, nil
}
}
for {
// Read the chunk size
chunkSize, err := ucr.extractChunkSize()
if err != nil {
return 0, err
}
if chunkSize == 0 {
// Stop reading parsing payloads as 0 sized chunk is reached
break
}
rdr := io.TeeReader(ucr.reader, ucr.hasher)
payload := make([]byte, chunkSize)
// Read and cache the payload
_, err = io.ReadFull(rdr, payload)
if err != nil {
return 0, err
}
// Skip the trailing "\r\n"
if err := ucr.readAndSkip('\r', '\n'); err != nil {
return 0, err
}
// Copy the payload into the io.Reader buffer
n := copy(p[ucr.offset:], payload)
ucr.offset += n
ucr.chunkCounter++
if int64(n) < chunkSize {
// stash the remaining data
ucr.stash = payload[n:]
dataRead := ucr.offset
ucr.offset = 0
return dataRead, nil
}
}
// Read and validate trailers
if err := ucr.readTrailer(); err != nil {
return 0, err
}
return ucr.offset, io.EOF
}
// Reads and validates the bytes provided from the underlying io.Reader
func (ucr *UnsignedChunkReader) readAndSkip(data ...byte) error {
for _, d := range data {
b, err := ucr.reader.ReadByte()
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if b != d {
return errMalformedEncoding
}
}
return nil
}
// Extracts the chunk size from the payload
func (ucr *UnsignedChunkReader) extractChunkSize() (int64, error) {
line, err := ucr.reader.ReadString('\n')
if err != nil {
return 0, errMalformedEncoding
}
line = strings.TrimSpace(line)
chunkSize, err := strconv.ParseInt(line, 16, 64)
if err != nil {
return 0, errMalformedEncoding
}
return chunkSize, nil
}
// Reads and validates the trailer at the end
func (ucr *UnsignedChunkReader) readTrailer() error {
var trailerBuffer bytes.Buffer
for {
v, err := ucr.reader.ReadByte()
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if v != '\r' {
trailerBuffer.WriteByte(v)
continue
}
var tmp [3]byte
_, err = io.ReadFull(ucr.reader, tmp[:])
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
if !bytes.Equal(tmp[:], trailerDelim) {
return errMalformedEncoding
}
break
}
// Parse the trailer
trailerHeader := trailerBuffer.String()
trailerHeader = strings.TrimSpace(trailerHeader)
trailerHeaderParts := strings.Split(trailerHeader, ":")
if len(trailerHeaderParts) != 2 {
return errMalformedEncoding
}
if trailerHeaderParts[0] != string(ucr.checksumType) {
//TODO: handle the error
return errMalformedEncoding
}
ucr.expectedChecksum = trailerHeaderParts[1]
// Validate checksum
return ucr.validateChecksum()
}
// Validates the trailing checksum sent at the end
func (ucr *UnsignedChunkReader) validateChecksum() error {
csum := ucr.hasher.Sum(nil)
checksum := base64.StdEncoding.EncodeToString(csum)
if checksum != ucr.expectedChecksum {
return fmt.Errorf("actual checksum: %v, expected checksum: %v", checksum, ucr.expectedChecksum)
}
return nil
}
// Retruns the hash calculator based on the hash type provided
func getHasher(ct checksumType) (hash.Hash, error) {
switch ct {
case checksumTypeCrc32:
return crc32.NewIEEE(), nil
case checksumTypeCrc32c:
return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
case checksumTypeCrc64nvme:
table := crc64.MakeTable(bits.Reverse64(0xad93d23594c93659))
return crc64.New(table), nil
case checksumTypeSha1:
return sha1.New(), nil
case checksumTypeSha256:
return sha256.New(), nil
default:
return nil, errors.New("unsupported checksum type")
}
}

View File

@@ -16,7 +16,6 @@ package utils
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -179,15 +178,9 @@ func ParseUint(str string) (int32, error) {
if str == "" {
return 1000, nil
}
num, err := strconv.ParseInt(str, 10, 32)
num, err := strconv.ParseUint(str, 10, 16)
if err != nil {
return 1000, fmt.Errorf("invalid int: %w", err)
}
if num < 0 {
return 1000, fmt.Errorf("negative uint: %v", num)
}
if num > 1000 {
num = 1000
return 1000, fmt.Errorf("invalid uint: %w", err)
}
return int32(num), nil
}
@@ -203,13 +196,6 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
}
}
// Streams the response body by chunks
func StreamResponseBody(ctx *fiber.Ctx, rdr io.ReadCloser, bodysize int) {
// SetBodyStream will call Close() on the reader when the stream is done
// since rdr is a ReadCloser
ctx.Context().SetBodyStream(rdr, bodysize)
}
func IsValidBucketName(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
return false
@@ -286,9 +272,7 @@ func FilterObjectAttributes(attrs map[s3response.ObjectAttributes]struct{}, outp
if _, ok := attrs[s3response.ObjectAttributesStorageClass]; !ok {
output.StorageClass = ""
}
if _, ok := attrs[s3response.ObjectAttributesChecksum]; !ok {
output.Checksum = nil
}
fmt.Printf("%+v\n", output)
return output
}
@@ -298,9 +282,6 @@ func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]stru
var err error
ctx.Request().Header.VisitAll(func(key, value []byte) {
if string(key) == "X-Amz-Object-Attributes" {
if len(value) == 0 {
return
}
oattrs := strings.Split(string(value), ",")
for _, a := range oattrs {
attr := s3response.ObjectAttributes(a)
@@ -313,15 +294,11 @@ func ParseObjectAttributes(ctx *fiber.Ctx) (map[s3response.ObjectAttributes]stru
}
})
if err != nil {
return nil, err
}
if len(attrs) == 0 {
return nil, s3err.GetAPIError(s3err.ErrObjectAttributesInvalidHeader)
}
return attrs, nil
return attrs, err
}
type objLockCfg struct {
@@ -356,13 +333,13 @@ func ParsObjectLockHdrs(ctx *fiber.Ctx) (*objLockCfg, error) {
if objLockMode != "" &&
objLockMode != types.ObjectLockModeCompliance &&
objLockMode != types.ObjectLockModeGovernance {
return nil, s3err.GetAPIError(s3err.ErrInvalidObjectLockMode)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
legalHold := types.ObjectLockLegalHoldStatus(legalHoldHdr)
if legalHold != "" && legalHold != types.ObjectLockLegalHoldStatusOff && legalHold != types.ObjectLockLegalHoldStatusOn {
return nil, s3err.GetAPIError(s3err.ErrInvalidLegalHoldStatus)
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
return &objLockCfg{
@@ -455,182 +432,3 @@ func shouldEscape(c byte) bool {
return true
}
func ParseChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, map[types.ChecksumAlgorithm]string, error) {
sdkAlgorithm := types.ChecksumAlgorithm(strings.ToUpper(ctx.Get("X-Amz-Sdk-Checksum-Algorithm")))
err := IsChecksumAlgorithmValid(sdkAlgorithm)
if err != nil {
return "", nil, err
}
checksums := map[types.ChecksumAlgorithm]string{}
var hdrErr error
// Parse and validate checksum headers
ctx.Request().Header.VisitAll(func(key, value []byte) {
// Skip `X-Amz-Checksum-Type` as it's a special header
if hdrErr != nil || !strings.HasPrefix(string(key), "X-Amz-Checksum-") || string(key) == "X-Amz-Checksum-Type" {
return
}
algo := types.ChecksumAlgorithm(strings.ToUpper(strings.TrimPrefix(string(key), "X-Amz-Checksum-")))
err := IsChecksumAlgorithmValid(algo)
if err != nil {
hdrErr = s3err.GetAPIError(s3err.ErrInvalidChecksumHeader)
return
}
checksums[algo] = string(value)
})
if hdrErr != nil {
return sdkAlgorithm, nil, hdrErr
}
if len(checksums) > 1 {
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
for al, val := range checksums {
if !IsValidChecksum(val, al) {
return sdkAlgorithm, checksums, s3err.GetInvalidChecksumHeaderErr(fmt.Sprintf("x-amz-checksum-%v", strings.ToLower(string(al))))
}
// If any other checksum value is provided,
// rather than x-amz-sdk-checksum-algorithm
if sdkAlgorithm != "" && sdkAlgorithm != al {
return sdkAlgorithm, checksums, s3err.GetAPIError(s3err.ErrMultipleChecksumHeaders)
}
sdkAlgorithm = al
}
return sdkAlgorithm, checksums, nil
}
var checksumLengths = map[types.ChecksumAlgorithm]int{
types.ChecksumAlgorithmCrc32: 4,
types.ChecksumAlgorithmCrc32c: 4,
types.ChecksumAlgorithmCrc64nvme: 8,
types.ChecksumAlgorithmSha1: 20,
types.ChecksumAlgorithmSha256: 32,
}
func IsValidChecksum(checksum string, algorithm types.ChecksumAlgorithm) bool {
decoded, err := base64.StdEncoding.DecodeString(checksum)
if err != nil {
return false
}
expectedLength, exists := checksumLengths[algorithm]
if !exists {
return false
}
return len(decoded) == expectedLength
}
func IsChecksumAlgorithmValid(alg types.ChecksumAlgorithm) error {
alg = types.ChecksumAlgorithm(strings.ToUpper(string(alg)))
if alg != "" &&
alg != types.ChecksumAlgorithmCrc32 &&
alg != types.ChecksumAlgorithmCrc32c &&
alg != types.ChecksumAlgorithmSha1 &&
alg != types.ChecksumAlgorithmSha256 &&
alg != types.ChecksumAlgorithmCrc64nvme {
return s3err.GetAPIError(s3err.ErrInvalidChecksumAlgorithm)
}
return nil
}
// Validates the provided checksum type
func IsChecksumTypeValid(t types.ChecksumType) error {
if t != "" &&
t != types.ChecksumTypeComposite &&
t != types.ChecksumTypeFullObject {
return s3err.GetInvalidChecksumHeaderErr("x-amz-checksum-type")
}
return nil
}
type checksumTypeSchema map[types.ChecksumType]struct{}
type checksumSchema map[types.ChecksumAlgorithm]checksumTypeSchema
// A table defining the checksum algorithm/type support
var checksumMap checksumSchema = checksumSchema{
types.ChecksumAlgorithmCrc32: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmCrc32c: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmSha1: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmSha256: checksumTypeSchema{
types.ChecksumTypeComposite: struct{}{},
"": struct{}{},
},
types.ChecksumAlgorithmCrc64nvme: checksumTypeSchema{
types.ChecksumTypeFullObject: struct{}{},
"": struct{}{},
},
// Both could be empty
"": checksumTypeSchema{
"": struct{}{},
},
}
// Checks if checksum type and algorithm are supported together
func checkChecksumTypeAndAlgo(algo types.ChecksumAlgorithm, t types.ChecksumType) error {
typeSchema := checksumMap[algo]
_, ok := typeSchema[t]
if !ok {
return s3err.GetChecksumSchemaMismatchErr(algo, t)
}
return nil
}
// Parses and validates the x-amz-checksum-algorithm and x-amz-checksum-type headers
func ParseCreateMpChecksumHeaders(ctx *fiber.Ctx) (types.ChecksumAlgorithm, types.ChecksumType, error) {
algo := types.ChecksumAlgorithm(ctx.Get("x-amz-checksum-algorithm"))
if err := IsChecksumAlgorithmValid(algo); err != nil {
return "", "", err
}
chType := types.ChecksumType(ctx.Get("x-amz-checksum-type"))
if err := IsChecksumTypeValid(chType); err != nil {
return "", "", err
}
// Verify if checksum algorithm is provided, if
// checksum type is specified
if chType != "" && algo == "" {
return algo, chType, s3err.GetAPIError(s3err.ErrChecksumTypeWithAlgo)
}
// Verify if the checksum type is supported for
// the provided checksum algorithm
if err := checkChecksumTypeAndAlgo(algo, chType); err != nil {
return algo, chType, err
}
// x-amz-checksum-type defaults to COMPOSITE
// if x-amz-checksum-algorithm is set except
// for the CRC64NVME algorithm: it defaults to FULL_OBJECT
if algo != "" && chType == "" {
if algo == types.ChecksumAlgorithmCrc64nvme {
chType = types.ChecksumTypeFullObject
} else {
chType = types.ChecksumTypeComposite
}
}
return algo, chType, nil
}

View File

@@ -49,11 +49,11 @@ func TestCreateHttpRequestFromCtx(t *testing.T) {
request2.Header.Add("X-Amz-Mfa", "Some valid Mfa")
tests := []struct {
name string
args args
want *http.Request
wantErr bool
name string
hdrs []string
wantErr bool
}{
{
name: "Success-response",
@@ -101,9 +101,9 @@ func TestGetUserMetaData(t *testing.T) {
req := ctx.Request()
tests := []struct {
name string
args args
wantMetadata map[string]string
name string
}{
{
name: "Success-empty-response",
@@ -268,14 +268,6 @@ func TestParseUint(t *testing.T) {
want: 23,
wantErr: false,
},
{
name: "Parse-uint-greater-than-1000",
args: args{
str: "25000000",
},
want: 1000,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -535,325 +527,3 @@ func Test_escapePath(t *testing.T) {
})
}
}
func TestIsChecksumAlgorithmValid(t *testing.T) {
type args struct {
alg types.ChecksumAlgorithm
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "empty",
args: args{
alg: "",
},
wantErr: false,
},
{
name: "crc32",
args: args{
alg: types.ChecksumAlgorithmCrc32,
},
wantErr: false,
},
{
name: "crc32c",
args: args{
alg: types.ChecksumAlgorithmCrc32c,
},
wantErr: false,
},
{
name: "sha1",
args: args{
alg: types.ChecksumAlgorithmSha1,
},
wantErr: false,
},
{
name: "sha256",
args: args{
alg: types.ChecksumAlgorithmSha256,
},
wantErr: false,
},
{
name: "crc64nvme",
args: args{
alg: types.ChecksumAlgorithmCrc64nvme,
},
wantErr: false,
},
{
name: "invalid",
args: args{
alg: types.ChecksumAlgorithm("invalid_checksum_algorithm"),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumAlgorithmValid(tt.args.alg); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumAlgorithmValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestIsValidChecksum(t *testing.T) {
type args struct {
checksum string
algorithm types.ChecksumAlgorithm
}
tests := []struct {
name string
args args
want bool
}{
{
name: "invalid-base64",
args: args{
checksum: "invalid_base64_string",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: false,
},
{
name: "invalid-crc32",
args: args{
checksum: "YXNkZmFzZGZhc2Rm",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: false,
},
{
name: "valid-crc32",
args: args{
checksum: "ww2FVQ==",
algorithm: types.ChecksumAlgorithmCrc32,
},
want: true,
},
{
name: "invalid-crc32c",
args: args{
checksum: "Zmdoa2doZmtnZmhr",
algorithm: types.ChecksumAlgorithmCrc32c,
},
want: false,
},
{
name: "valid-crc32c",
args: args{
checksum: "DOsb4w==",
algorithm: types.ChecksumAlgorithmCrc32c,
},
want: true,
},
{
name: "invalid-sha1",
args: args{
checksum: "YXNkZmFzZGZhc2RmYXNkZnNhZGZzYWRm",
algorithm: types.ChecksumAlgorithmSha1,
},
want: false,
},
{
name: "valid-sha1",
args: args{
checksum: "L4q6V59Zcwn12wyLIytoE2c1ugk=",
algorithm: types.ChecksumAlgorithmSha1,
},
want: true,
},
{
name: "invalid-sha256",
args: args{
checksum: "Zmdoa2doZmtnZmhrYXNkZmFzZGZhc2RmZHNmYXNkZg==",
algorithm: types.ChecksumAlgorithmSha256,
},
want: false,
},
{
name: "valid-sha256",
args: args{
checksum: "d1SPCd/kZ2rAzbbLUC0n/bEaOSx70FNbXbIqoIxKuPY=",
algorithm: types.ChecksumAlgorithmSha256,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidChecksum(tt.args.checksum, tt.args.algorithm); got != tt.want {
t.Errorf("IsValidChecksum() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsChecksumTypeValid(t *testing.T) {
type args struct {
t types.ChecksumType
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "valid_FULL_OBJECT",
args: args{
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "valid_COMPOSITE",
args: args{
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "invalid",
args: args{
t: types.ChecksumType("invalid_checksum_type"),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsChecksumTypeValid(tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("IsChecksumTypeValid() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_checkChecksumTypeAndAlgo(t *testing.T) {
type args struct {
algo types.ChecksumAlgorithm
t types.ChecksumType
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "full_object-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-crc32c",
args: args{
algo: types.ChecksumAlgorithmCrc32c,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-sha1",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
{
name: "full_object-sha256",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
{
name: "full_object-crc64nvme",
args: args{
algo: types.ChecksumAlgorithmCrc64nvme,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "full_object-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeFullObject,
},
wantErr: false,
},
{
name: "composite-crc32",
args: args{
algo: types.ChecksumAlgorithmCrc32,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-crc32c",
args: args{
algo: types.ChecksumAlgorithmCrc32c,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-sha1",
args: args{
algo: types.ChecksumAlgorithmSha1,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-sha256",
args: args{
algo: types.ChecksumAlgorithmSha256,
t: types.ChecksumTypeComposite,
},
wantErr: false,
},
{
name: "composite-crc64nvme",
args: args{
algo: types.ChecksumAlgorithmCrc64nvme,
t: types.ChecksumTypeComposite,
},
wantErr: true,
},
{
name: "composite-empty",
args: args{
t: types.ChecksumTypeComposite,
},
wantErr: true,
},
{
name: "full_object-empty",
args: args{
t: types.ChecksumTypeFullObject,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := checkChecksumTypeAndAlgo(tt.args.algo, tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("checkChecksumTypeAndAlgo() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -17,11 +17,7 @@ package s3err
import (
"bytes"
"encoding/xml"
"fmt"
"net/http"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
// APIError structure
@@ -76,14 +72,10 @@ const (
ErrInvalidPartNumberMarker
ErrInvalidObjectAttributes
ErrInvalidPart
ErrEmptyParts
ErrInvalidPartNumber
ErrInvalidPartOrder
ErrInvalidCompleteMpPartNumber
ErrInternalError
ErrInvalidCopyDest
ErrInvalidCopySource
ErrInvalidCopySourceRange
ErrInvalidTag
ErrAuthHeaderEmpty
ErrSignatureVersionNotSupported
@@ -113,7 +105,6 @@ const (
ErrSignatureTerminationStr
ErrSignatureIncorrService
ErrContentSHA256Mismatch
ErrMissingDecodedContentLength
ErrInvalidAccessKeyID
ErrRequestNotReadyYet
ErrMissingDateHeader
@@ -131,8 +122,6 @@ const (
ErrObjectLocked
ErrPastObjectLockRetainDate
ErrObjectLockInvalidRetentionPeriod
ErrInvalidLegalHoldStatus
ErrInvalidObjectLockMode
ErrNoSuchBucketPolicy
ErrBucketTaggingNotFound
ErrObjectLockInvalidHeaders
@@ -146,23 +135,15 @@ const (
ErrUnexpectedContent
ErrMissingSecurityHeader
ErrInvalidMetadataDirective
ErrInvalidTaggingDirective
ErrKeyTooLong
ErrInvalidVersionId
ErrNoSuchVersion
ErrSuspendedVersioningNotAllowed
ErrMultipleChecksumHeaders
ErrInvalidChecksumAlgorithm
ErrInvalidChecksumPart
ErrChecksumTypeWithAlgo
ErrInvalidChecksumHeader
ErrTrailerHeaderNotSupported
// Non-AWS errors
ErrExistingObjectIsDirectory
ErrObjectParentIsFile
ErrDirectoryObjectContainsData
ErrDirectoryNotEmpty
ErrQuotaExceeded
ErrVersioningNotConfigured
@@ -172,7 +153,6 @@ const (
ErrAdminUserExists
ErrAdminInvalidUserRole
ErrAdminMissingUserAcess
ErrAdminMethodNotSupported
)
var errorCodeResponse = map[ErrorCode]APIError{
@@ -271,26 +251,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEmptyParts: {
Code: "InvalidRequest",
Description: "You must specify at least one part",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartNumber: {
Code: "InvalidArgument",
Description: "Part number must be an integer between 1 and 10000, inclusive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartOrder: {
Code: "InvalidPartOrder",
Description: "The list of parts was not in ascending order. Parts must be ordered by part number.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCompleteMpPartNumber: {
Code: "InvalidArgument",
Description: "PartNumber must be >= 1",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCopyDest: {
Code: "InvalidRequest",
Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
@@ -301,11 +266,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCopySourceRange: {
Code: "InvalidArgument",
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTag: {
Code: "InvalidArgument",
Description: "The Tag value you have provided is invalid",
@@ -456,11 +416,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingDecodedContentLength: {
Code: "MissingContentLength",
Description: "You must provide the Content-Length HTTP header.",
HTTPStatusCode: http.StatusLengthRequired,
},
ErrMissingDateHeader: {
Code: "AccessDenied",
Description: "AWS authentication requires a valid Date or x-amz-date header.",
@@ -536,16 +491,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "the retention days/years must be positive integer.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidLegalHoldStatus: {
Code: "InvalidArgument",
Description: "Legal Hold must be either of 'ON' or 'OFF'",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidObjectLockMode: {
Code: "InvalidArgument",
Description: "Unknown wormMode directive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchBucketPolicy: {
Code: "NoSuchBucketPolicy",
Description: "The bucket policy does not exist.",
@@ -611,11 +556,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Unknown metadata directive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTaggingDirective: {
Code: "InvalidArgument",
Description: "Unknown tagging directive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidVersionId: {
Code: "InvalidArgument",
Description: "Invalid version id specified",
@@ -636,36 +576,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMultipleChecksumHeaders: {
Code: "InvalidRequest",
Description: "Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidChecksumAlgorithm: {
Code: "InvalidRequest",
Description: "Checksum algorithm provided is unsupported. Please try again with any of the valid types: [CRC32, CRC32C, SHA1, SHA256]",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidChecksumPart: {
Code: "InvalidArgument",
Description: "Invalid Base64 or multiple checksums present in request",
HTTPStatusCode: http.StatusBadRequest,
},
ErrChecksumTypeWithAlgo: {
Code: "InvalidRequest",
Description: "The x-amz-checksum-type header can only be used with the x-amz-checksum-algorithm header.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidChecksumHeader: {
Code: "InvalidRequest",
Description: "The algorithm type you specified in x-amz-checksum- header is invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrTrailerHeaderNotSupported: {
Code: "InvalidRequest",
Description: "The value specified in the x-amz-trailer header is not supported",
HTTPStatusCode: http.StatusBadRequest,
},
// non aws errors
ErrExistingObjectIsDirectory: {
@@ -683,11 +593,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Directory object contains data payload.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrDirectoryNotEmpty: {
Code: "ErrDirectoryNotEmpty",
Description: "Directory object not empty.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrQuotaExceeded: {
Code: "QuotaExceeded",
Description: "Your request was denied due to quota exceeded.",
@@ -725,11 +630,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "User access key ID is missing.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminMethodNotSupported: {
Code: "XAdminMethodNotSupported",
Description: "The method is not supported in single root user mode.",
HTTPStatusCode: http.StatusNotImplemented,
},
}
// GetAPIError provides API Error for input API error code.
@@ -760,80 +660,3 @@ func encodeResponse(response interface{}) []byte {
e.Encode(response)
return bytesBuffer.Bytes()
}
// Returns invalid checksum error with the provided header in the error description
func GetInvalidChecksumHeaderErr(header string) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("Value for %v header is invalid.", header),
HTTPStatusCode: http.StatusBadRequest,
}
}
func GetInvalidTrailingChecksumHeaderErr(header string) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("Value for %v trailing header is invalid.", header),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns checksum type mismatch APIError
func GetChecksumTypeMismatchErr(expected, actual types.ChecksumAlgorithm) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("Checksum Type mismatch occurred, expected checksum Type: %v, actual checksum Type: %v", expected, actual),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns incorrect checksum APIError
func GetChecksumBadDigestErr(algo types.ChecksumAlgorithm) APIError {
return APIError{
Code: "BadDigest",
Description: fmt.Sprintf("The %v you specified did not match the calculated checksum.", algo),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns checksum type mismatch error with checksum algorithm
func GetChecksumSchemaMismatchErr(algo types.ChecksumAlgorithm, t types.ChecksumType) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The %v checksum type cannot be used with the %v checksum algorithm.", algo, strings.ToLower(string(t))),
HTTPStatusCode: http.StatusBadRequest,
}
}
// Returns checksum type mismatch error for multipart uploads
func GetChecksumTypeMismatchOnMpErr(t types.ChecksumType) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The upload was created using the %v checksum mode. The complete request must use the same checksum mode.", t),
HTTPStatusCode: http.StatusBadRequest,
}
}
func GetIncorrectMpObjectSizeErr(expected, actual int64) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The provided 'x-amz-mp-object-size' header value %v does not match what was computed: %v", expected, actual),
HTTPStatusCode: http.StatusBadRequest,
}
}
func GetInvalidMpObjectSizeErr(val int64) APIError {
return APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("Value for x-amz-mp-object-size header is less than zero: '%v'", val),
HTTPStatusCode: http.StatusBadRequest,
}
}
func CreateExceedingRangeErr(objSize int64) APIError {
return APIError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Range specified is not valid for source object of size: %d", objSize),
HTTPStatusCode: http.StatusBadRequest,
}
}

View File

@@ -30,11 +30,11 @@ type S3EventSender interface {
}
type EventMeta struct {
ObjectETag *string
VersionId *string
BucketOwner string
EventName EventType
ObjectSize int64
ObjectETag *string
VersionId *string
}
type EventSchema struct {
@@ -42,6 +42,8 @@ type EventSchema struct {
}
type EventRecord struct {
ResponseElements EventResponseElements `json:"responseElements"`
GlacierEventData EventGlacierData `json:"glacierEventData"`
EventVersion string `json:"eventVersion"`
EventSource string `json:"eventSource"`
AwsRegion string `json:"awsRegion"`
@@ -49,9 +51,7 @@ type EventRecord struct {
EventName EventType `json:"eventName"`
UserIdentity EventUserIdentity `json:"userIdentity"`
RequestParameters EventRequestParams `json:"requestParameters"`
ResponseElements EventResponseElements `json:"responseElements"`
S3 EventS3Data `json:"s3"`
GlacierEventData EventGlacierData `json:"glacierEventData"`
}
type EventUserIdentity struct {
@@ -99,11 +99,11 @@ type EventS3BucketData struct {
}
type EventObjectData struct {
Key string `json:"key"`
Size int64 `json:"size"`
ETag *string `json:"eTag"`
VersionId *string `json:"versionId"`
Key string `json:"key"`
Sequencer string `json:"sequencer"`
Size int64 `json:"size"`
}
type EventConfig struct {

View File

@@ -31,9 +31,9 @@ import (
var sequencer = 0
type Kafka struct {
key string
writer *kafka.Writer
filter EventFilter
key string
mu sync.Mutex
}

View File

@@ -27,10 +27,10 @@ import (
)
type NatsEventSender struct {
topic string
client *nats.Conn
mu sync.Mutex
filter EventFilter
topic string
mu sync.Mutex
}
func InitNatsEventService(url, topic string, filter EventFilter) (S3EventSender, error) {

View File

@@ -30,9 +30,9 @@ import (
)
type Webhook struct {
url string
client *http.Client
filter EventFilter
url string
mu sync.Mutex
}

View File

@@ -33,8 +33,8 @@ type AuditLogger interface {
type LogMeta struct {
BucketOwner string
ObjectSize int64
Action string
ObjectSize int64
HttpStatus int
}
@@ -45,21 +45,16 @@ type LogConfig struct {
}
type LogFields struct {
Time time.Time
BucketOwner string
Bucket string
Time time.Time
RemoteIP string
Requester string
RequestID string
Operation string
Key string
RequestURI string
HttpStatus int
ErrorCode string
BytesSent int
ObjectSize int64
TotalTime int64
TurnAroundTime int64
Referer string
UserAgent string
VersionID string
@@ -71,6 +66,11 @@ type LogFields struct {
TLSVersion string
AccessPointARN string
AclRequired string
HttpStatus int
BytesSent int
ObjectSize int64
TotalTime int64
TurnAroundTime int64
}
type AdminLogFields struct {
@@ -80,17 +80,17 @@ type AdminLogFields struct {
RequestID string
Operation string
RequestURI string
HttpStatus int
ErrorCode string
BytesSent int
TotalTime int64
TurnAroundTime int64
Referer string
UserAgent string
SignatureVersion string
CipherSuite string
AuthenticationType string
TLSVersion string
HttpStatus int
BytesSent int
TotalTime int64
TurnAroundTime int64
}
type Loggers struct {

View File

@@ -34,10 +34,10 @@ const (
// FileLogger is a local file audit log
type FileLogger struct {
logfile string
f *os.File
gotErr bool
logfile string
mu sync.Mutex
gotErr bool
}
var _ AuditLogger = &FileLogger{}

View File

@@ -33,8 +33,8 @@ import (
// WebhookLogger is a webhook URL audit log
type WebhookLogger struct {
mu sync.Mutex
url string
mu sync.Mutex
}
var _ AuditLogger = &WebhookLogger{}

View File

@@ -16,7 +16,6 @@ package s3response
import (
"encoding/xml"
"io"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
@@ -30,34 +29,23 @@ const (
)
type PutObjectOutput struct {
ETag string
VersionID string
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
ChecksumType types.ChecksumType
ETag string
VersionID string
}
// Part describes part metadata.
type Part struct {
PartNumber int
LastModified time.Time
ETag string
Size int64
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
LastModified time.Time
ETag string
PartNumber int
Size int64
}
func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias Part
aux := &struct {
LastModified string `xml:"LastModified"`
*Alias
LastModified string `xml:"LastModified"`
}{
Alias: (*Alias)(&p),
}
@@ -71,25 +59,23 @@ func (p Part) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type ListPartsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
Bucket string
Key string
UploadID string `xml:"UploadId"`
ChecksumAlgorithm types.ChecksumAlgorithm
ChecksumType types.ChecksumType
Initiator Initiator
Owner Owner
Bucket string
Key string
UploadID string `xml:"UploadId"`
// The class of storage used to store the object.
StorageClass types.StorageClass
// List of parts.
Parts []Part `xml:"Part"`
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
// List of parts.
Parts []Part `xml:"Part"`
}
type ObjectAttributes string
@@ -111,24 +97,23 @@ func (o ObjectAttributes) IsValid() bool {
}
type GetObjectAttributesResponse struct {
ETag *string
ObjectSize *int64
StorageClass types.StorageClass `xml:",omitempty"`
ObjectParts *ObjectParts
Checksum *types.Checksum
ETag *string
ObjectSize *int64
ObjectParts *ObjectParts
// Not included in the response body
VersionId *string
LastModified *time.Time
DeleteMarker *bool
StorageClass types.StorageClass `xml:",omitempty"`
}
type ObjectParts struct {
Parts []types.ObjectPart `xml:"Part"`
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
Parts []types.ObjectPart `xml:"Part"`
}
// ListMultipartUploadsResponse - s3 api list multipart uploads response.
@@ -143,18 +128,17 @@ type ListMultipartUploadsResult struct {
Delimiter string
Prefix string
EncodingType string `xml:"EncodingType,omitempty"`
MaxUploads int
IsTruncated bool
// List of pending uploads.
Uploads []Upload `xml:"Upload"`
// Delimed common prefixes.
CommonPrefixes []CommonPrefix
MaxUploads int
IsTruncated bool
}
type ListObjectsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
Name *string
Prefix *string
Marker *string
@@ -162,13 +146,13 @@ type ListObjectsResult struct {
MaxKeys *int32
Delimiter *string
IsTruncated *bool
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
EncodingType types.EncodingType
Contents []Object
CommonPrefixes []types.CommonPrefix
EncodingType types.EncodingType
}
type ListObjectsV2Result struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
Name *string
Prefix *string
StartAfter *string
@@ -178,21 +162,20 @@ type ListObjectsV2Result struct {
MaxKeys *int32
Delimiter *string
IsTruncated *bool
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
EncodingType types.EncodingType
Contents []Object
CommonPrefixes []types.CommonPrefix
EncodingType types.EncodingType
}
type Object struct {
ChecksumAlgorithm []types.ChecksumAlgorithm
ChecksumType types.ChecksumType
ETag *string
Key *string
LastModified *time.Time
Owner *types.Owner
RestoreStatus *types.RestoreStatus
Size *int64
StorageClass types.ObjectStorageClass
ETag *string
Key *string
LastModified *time.Time
Owner *types.Owner
RestoreStatus *types.RestoreStatus
Size *int64
StorageClass types.ObjectStorageClass
}
func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
@@ -214,21 +197,19 @@ func (o Object) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// Upload describes in progress multipart upload
type Upload struct {
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass types.StorageClass
Initiated time.Time
ChecksumAlgorithm types.ChecksumAlgorithm
ChecksumType types.ChecksumType
Initiated time.Time
Initiator Initiator
Owner Owner
Key string
UploadID string `xml:"UploadId"`
StorageClass types.StorageClass
}
func (u Upload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias Upload
aux := &struct {
Initiated string `xml:"Initiated"`
*Alias
Initiated string `xml:"Initiated"`
}{
Alias: (*Alias)(&u),
}
@@ -280,11 +261,11 @@ type DeleteResult struct {
}
type SelectObjectContentPayload struct {
Expression *string
ExpressionType types.ExpressionType
RequestProgress *types.RequestProgress
InputSerialization *types.InputSerialization
OutputSerialization *types.OutputSerialization
ScanRange *types.ScanRange
ExpressionType types.ExpressionType
}
type SelectObjectContentResult struct {
@@ -302,30 +283,30 @@ type Bucket struct {
type ListBucketsInput struct {
Owner string
IsAdmin bool
ContinuationToken string
Prefix string
MaxBuckets int32
IsAdmin bool
}
type ListAllMyBucketsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"`
Owner CanonicalUser
Buckets ListAllMyBucketsList
ContinuationToken string `xml:"ContinuationToken,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
Buckets ListAllMyBucketsList
}
type ListAllMyBucketsEntry struct {
Name string
CreationDate time.Time
Name string
}
func (r ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias ListAllMyBucketsEntry
aux := &struct {
CreationDate string `xml:"CreationDate"`
*Alias
CreationDate string `xml:"CreationDate"`
}{
Alias: (*Alias)(&r),
}
@@ -351,25 +332,11 @@ type CopyObjectResult struct {
CopySourceVersionId string `xml:"-"`
}
type CopyPartResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyPartResult" json:"-"`
LastModified time.Time
ETag *string
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
// not included in the body
CopySourceVersionId string `xml:"-"`
}
func (r CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type Alias CopyObjectResult
aux := &struct {
LastModified string `xml:"LastModified"`
*Alias
LastModified string `xml:"LastModified"`
}{
Alias: (*Alias)(&r),
}
@@ -437,143 +404,15 @@ type ListVersionsResult struct {
}
type GetBucketVersioningOutput struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersioningConfiguration" json:"-"`
MFADelete *types.MFADeleteStatus
Status *types.BucketVersioningStatus
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersioningConfiguration" json:"-"`
}
type PutObjectRetentionInput struct {
RetainUntilDate AmzDate
XMLName xml.Name `xml:"Retention"`
Mode types.ObjectLockRetentionMode
RetainUntilDate AmzDate
}
type PutObjectInput struct {
ContentLength *int64
ObjectLockRetainUntilDate *time.Time
Bucket *string
Key *string
ContentType *string
ContentEncoding *string
ContentDisposition *string
ContentLanguage *string
CacheControl *string
Expires *string
Tagging *string
ChecksumCRC32 *string
ChecksumCRC32C *string
ChecksumSHA1 *string
ChecksumSHA256 *string
ChecksumCRC64NVME *string
ContentMD5 *string
ExpectedBucketOwner *string
GrantFullControl *string
GrantRead *string
GrantReadACP *string
GrantWriteACP *string
IfMatch *string
IfNoneMatch *string
SSECustomerAlgorithm *string
SSECustomerKey *string
SSECustomerKeyMD5 *string
SSEKMSEncryptionContext *string
SSEKMSKeyId *string
WebsiteRedirectLocation *string
ObjectLockMode types.ObjectLockMode
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
ChecksumAlgorithm types.ChecksumAlgorithm
Metadata map[string]string
Body io.Reader
}
type CreateMultipartUploadInput struct {
Bucket *string
Key *string
ExpectedBucketOwner *string
CacheControl *string
ContentDisposition *string
ContentEncoding *string
ContentLanguage *string
ContentType *string
Expires *string
SSECustomerAlgorithm *string
SSECustomerKey *string
SSECustomerKeyMD5 *string
SSEKMSEncryptionContext *string
SSEKMSKeyId *string
GrantFullControl *string
GrantRead *string
GrantReadACP *string
GrantWriteACP *string
Tagging *string
WebsiteRedirectLocation *string
BucketKeyEnabled *bool
ObjectLockRetainUntilDate *time.Time
Metadata map[string]string
ACL types.ObjectCannedACL
ChecksumAlgorithm types.ChecksumAlgorithm
ChecksumType types.ChecksumType
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
ObjectLockMode types.ObjectLockMode
RequestPayer types.RequestPayer
ServerSideEncryption types.ServerSideEncryption
StorageClass types.StorageClass
}
type CopyObjectInput struct {
Metadata map[string]string
Bucket *string
CopySource *string
Key *string
CacheControl *string
ContentDisposition *string
ContentEncoding *string
ContentLanguage *string
ContentType *string
CopySourceIfMatch *string
CopySourceIfNoneMatch *string
CopySourceSSECustomerAlgorithm *string
CopySourceSSECustomerKey *string
CopySourceSSECustomerKeyMD5 *string
ExpectedBucketOwner *string
ExpectedSourceBucketOwner *string
Expires *string
GrantFullControl *string
GrantRead *string
GrantReadACP *string
GrantWriteACP *string
SSECustomerAlgorithm *string
SSECustomerKey *string
SSECustomerKeyMD5 *string
SSEKMSEncryptionContext *string
SSEKMSKeyId *string
Tagging *string
WebsiteRedirectLocation *string
CopySourceIfModifiedSince *time.Time
CopySourceIfUnmodifiedSince *time.Time
ObjectLockRetainUntilDate *time.Time
BucketKeyEnabled *bool
ACL types.ObjectCannedACL
ChecksumAlgorithm types.ChecksumAlgorithm
MetadataDirective types.MetadataDirective
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
ObjectLockMode types.ObjectLockMode
RequestPayer types.RequestPayer
ServerSideEncryption types.ServerSideEncryption
StorageClass types.StorageClass
TaggingDirective types.TaggingDirective
}
type GetObjectLegalHoldResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LegalHold"`
Status types.ObjectLockLegalHoldStatus
}
type AmzDate struct {
@@ -628,14 +467,3 @@ func (AmzDate) ISO8601Parse(date string) (t time.Time, err error) {
type ListBucketsResult struct {
Buckets []Bucket
}
type Checksum struct {
Algorithm types.ChecksumAlgorithm
Type types.ChecksumType
CRC32 *string
CRC32C *string
SHA1 *string
SHA256 *string
CRC64NVME *string
}

View File

@@ -204,7 +204,6 @@ func genErrorMessage(errorCode, errorMessage string) []byte {
type GetProgress func() (bytesScanned int64, bytesProcessed int64)
type MessageHandler struct {
sync.Mutex
ctx context.Context
cancel context.CancelFunc
writer *bufio.Writer
@@ -213,6 +212,7 @@ type MessageHandler struct {
stopCh chan bool
resetCh chan bool
bytesReturned int64
sync.Mutex
}
// NewMessageHandler creates a new MessageHandler instance and starts the event streaming

View File

@@ -25,5 +25,4 @@ USERNAME_TWO=HIJKLMN
PASSWORD_TWO=OPQRSTU
TEST_FILE_FOLDER=$PWD/versity-gwtest-files
RECREATE_BUCKETS=true
REMOVE_TEST_FILE_FOLDER=true
VERSIONING_DIR=/tmp/versioning
REMOVE_TEST_FILE_FOLDER=true

View File

@@ -3,9 +3,8 @@ FROM ubuntu:latest
ARG DEBIAN_FRONTEND=noninteractive
ARG SECRETS_FILE=tests/.secrets
ARG CONFIG_FILE=tests/.env.docker
ARG GO_LIBRARY=go1.21.13.linux-arm64.tar.gz
# see https://github.com/versity/versitygw/issues/1034
ARG AWS_CLI=awscli-exe-linux-aarch64-2.22.35.zip
ARG GO_LIBRARY=go1.23.1.linux-arm64.tar.gz
ARG AWS_CLI=awscli-exe-linux-aarch64.zip
ARG MC_FOLDER=linux-arm64
ENV TZ=Etc/UTC
@@ -86,7 +85,6 @@ RUN openssl genpkey -algorithm RSA -out versitygw-docker.pem -pkeyopt rsa_keygen
ENV WORKSPACE=.
ENV VERSITYGW_TEST_ENV=$CONFIG_FILE
#ENV AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED
ENTRYPOINT ["tests/run.sh"]
CMD ["s3api,s3,s3cmd,mc,rest"]

View File

@@ -1,19 +1,5 @@
# Command-Line Tests
## Table of Contents
[Instructions - Running Locally](#instructions---running-locally)<br>
[* Posix Backend](#posix-backend)<br>
[* Static Bucket Mode](#static-bucket-mode)<br>
[* S3 Backend](#s3-backend)<br>
[* Direct Mode](#direct-mode)<br>
[Instructions - Running With Docker](#instructions---running-with-docker)<br>
[Instructions - Running With Docker-Compose](#instructions---running-with-docker-compose)<br>
[Environment Parameters](#environment-parameters)<br>
[* Secret](#secret)<br>
[* Non-Secret](#non-secret)<br>
[REST Scripts](#rest-scripts)<br>
## Instructions - Running Locally
### Posix Backend
@@ -75,11 +61,10 @@ To communicate directly with s3, in order to compare the gateway results to dire
1. Copy `.secrets.default` to `.secrets` in the `tests` folder and change the parameters and add the additional s3 fields explained in the **S3 Backend** section above if running with the s3 backend.
2. By default, the dockerfile uses the **arm** architecture (usually modern Mac). If using **amd** (usually earlier Mac or Linux), you can either replace the corresponding `ARG` values directly, or with `arg="<param>=<amd library or folder>"` Also, you can determine which is used by your OS with `uname -a`.
3. Build and run the `Dockerfile_test_bats` file. Change the `SECRETS_FILE` and `CONFIG_FILE` parameters to point to your secrets and config file, respectively, if not using the defaults. Example: `docker build -t <tag> -f Dockerfile_test_bats --build-arg="SECRETS_FILE=<file>" --build-arg="CONFIG_FILE=<file>" .`.
4. To run the entire suite, run `docker run -it <image name>`. To run an individual suite, pass in the name of the suite as defined in `tests/run.sh` (e.g. REST tests -> `docker run -it <image name> rest`). Also, multiple specific suites can be run, if separated by comma.
## Instructions - Running with docker-compose
A file named `docker-compose-bats.yml` is provided in the root folder. A few configurations are provided, and you can also create your own provided you have a secrets and config file:
A file named `docker-compose-bats.yml` is provided in the root folder. Four configurations are provided:
* insecure (without certificates), with creation/removal of buckets
* secure, posix backend, with static buckets
* secure, posix backend, with creation/removal of buckets
@@ -100,18 +85,8 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
## Environment Parameters
### Secret
**AWS_PROFILE**, **AWS_ENDPOINT_URL**, **AWS_REGION**, **AWS_ACCESS_KEY_ID**, **AWS_SECRET_ACCESS_KEY**: identical to the same parameters in **s3**.
**AWS_CANONICAL_ID**: for direct mode, the canonical ID for the main user (owner)
**ACL_AWS_CANONICAL_ID**: for direct mode, the canonical ID for the user to test ACL changes and access by non-owners
**ACL_AWS_ACCESS_KEY_ID**, **ACL_AWS_ACCESS_SECRET_KEY**: for direct mode, the ID and key for the S3 user in the **ACL_AWS_CANONICAL_ID** account.
### Non-Secret
**VERSITY_EXE**: location of the versity executable relative to test folder.
**RUN_VERSITYGW**: whether to run the versitygw executable, should be set to **false** when running tests directly against **s3**.
@@ -159,21 +134,3 @@ A single instance can be run with `docker-compose -f docker-compose-bats.yml up
**VERSIONING_DIR**: where to put gateway file versioning info.
**COMMAND_LOG**: where to store list of client commands, which if using will be reported during test failures.
**TIME_LOG**: optional log to show duration of individual tests
**DIRECT_S3_ROOT_ACCOUNT_NAME**: for direct mode, S3 username
**DELETE_BUCKETS_AFTER_TEST**: whether or not to delete buckets after individual tests, useful for debugging if the post-test bucket state needs to be checked
## REST Scripts
REST scripts are included for calls to S3's REST API in the `./tests/rest_scripts/` folder. To call a script, the following parameters are needed:
* **AWS_ACCESS_KEY_ID**, **AWS_SECRET_ACCESS_KEY**, etc.
* **AWS_ENDPOINT_URL** (default: `https://localhost:7070`)
* **OUTPUT_FILE**: file where the command's response data is written
* Any other parameters specified at the top of the script file, such as payloads and variables. Sometimes, defaults are included.
Upon success, the script will return a response code, and write the data to the **OUTPUT_FILE** location.
Example: `AWS_ACCESS_KEY_ID={id} AWS_SECRET_ACCESS_KEY={key} AWS_ENDPOINT_URL=https://s3.amazonaws.com OUTPUT_FILE=./output_file.xml ./tests/rest_scripts/list_buckets.sh`

View File

@@ -16,7 +16,7 @@
copy_object() {
if [ $# -ne 4 ]; then
log 2 "copy object command requires command type, source, bucket, key"
echo "copy object command requires command type, source, bucket, key"
return 1
fi
local exit_code=0
@@ -24,7 +24,7 @@ copy_object() {
record_command "copy-object" "client:$1"
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 cp "$2" s3://"$3/$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
error=$(send_command aws --no-verify-ssl s3api copy-object --copy-source "$2" --bucket "$3" --key "$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate cp s3://$2 s3://$3/$4"
@@ -32,12 +32,12 @@ copy_object() {
elif [[ $1 == 'mc' ]]; then
error=$(send_command mc --insecure cp "$MC_ALIAS/$2" "$MC_ALIAS/$3/$4" 2>&1) || exit_code=$?
else
log 2 "'copy-object' not implemented for '$1'"
echo "'copy-object' not implemented for '$1'"
return 1
fi
log 5 "copy object exit code: $exit_code"
if [ $exit_code -ne 0 ]; then
log 2 "error copying object to bucket: $error"
echo "error copying object to bucket: $error"
return 1
fi
return 0

View File

@@ -31,7 +31,7 @@ create_bucket() {
log 6 "create bucket"
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 mb s3://"$2" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
elif [[ $1 == "aws" ]] || [[ $1 == 's3api' ]]; then
error=$(send_command aws --no-verify-ssl s3api create-bucket --bucket "$2" 2>&1) || exit_code=$?
elif [[ $1 == "s3cmd" ]]; then
log 5 "s3cmd ${S3CMD_OPTS[*]} --no-check-certificate mb s3://$2"
@@ -85,8 +85,5 @@ create_bucket_object_lock_enabled() {
log 2 "error creating bucket: $error"
return 1
fi
if [ "$DIRECT" == "true" ]; then
sleep 15
fi
return 0
}

View File

@@ -54,7 +54,6 @@ create_multipart_upload_with_user() {
return 1
fi
upload_id="${upload_id//\"/}"
echo "$upload_id"
return 0
}

View File

@@ -7,7 +7,7 @@ create_presigned_url() {
fi
local presign_result=0
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]]; then
presigned_url=$(send_command aws s3 presign "s3://$2/$3" --expires-in 900) || presign_result=$?
elif [[ $1 == 's3cmd' ]]; then
presigned_url=$(send_command s3cmd --no-check-certificate "${S3CMD_OPTS[@]}" signurl "s3://$2/$3" "$(echo "$(date +%s)" + 900 | bc)") || presign_result=$?

View File

@@ -32,7 +32,7 @@ delete_bucket() {
exit_code=0
if [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3 rb s3://"$2") || exit_code=$?
elif [[ $1 == 's3api' ]]; then
elif [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
error=$(send_command aws --no-verify-ssl s3api delete-bucket --bucket "$2" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rb s3://"$2" 2>&1) || exit_code=$?

View File

@@ -21,7 +21,7 @@ delete_bucket_policy() {
return 1
fi
local delete_result=0
if [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]] || [[ $1 == 's3' ]]; then
error=$(send_command aws --no-verify-ssl s3api delete-bucket-policy --bucket "$2" 2>&1) || delete_result=$?
elif [[ $1 == 's3cmd' ]]; then
error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate delpolicy "s3://$2" 2>&1) || delete_result=$?

View File

@@ -21,7 +21,7 @@ delete_bucket_tagging() {
return 1
fi
local result
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]]; then
tags=$(send_command aws --no-verify-ssl s3api delete-bucket-tagging --bucket "$2" 2>&1) || result=$?
elif [[ $1 == 'mc' ]]; then
tags=$(send_command mc --insecure tag remove "$MC_ALIAS"/"$2" 2>&1) || result=$?

View File

@@ -25,7 +25,7 @@ delete_object() {
local exit_code=0
if [[ $1 == 's3' ]]; then
delete_object_error=$(send_command aws --no-verify-ssl s3 rm "s3://$2/$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
delete_object_error=$(send_command aws --no-verify-ssl s3api delete-object --bucket "$2" --key "$3" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
delete_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate rm "s3://$2/$3" 2>&1) || exit_code=$?

View File

@@ -17,22 +17,22 @@
delete_object_tagging() {
record_command "delete-object-tagging" "client:$1"
if [[ $# -ne 3 ]]; then
log 2 "delete object tagging command missing command type, bucket, key"
echo "delete object tagging command missing command type, bucket, key"
return 1
fi
delete_result=0
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]]; then
error=$(send_command aws --no-verify-ssl s3api delete-object-tagging --bucket "$2" --key "$3" 2>&1) || delete_result=$?
elif [[ $1 == 'mc' ]]; then
error=$(send_command mc --insecure tag remove "$MC_ALIAS/$2/$3") || delete_result=$?
elif [ "$1" == 'rest' ]; then
delete_object_tagging_rest "$2" "$3" || delete_result=$?
else
log 2 "delete-object-tagging command not implemented for '$1'"
echo "delete-object-tagging command not implemented for '$1'"
return 1
fi
if [[ $delete_result -ne 0 ]]; then
log 2 "error deleting object tagging: $error"
echo "error deleting object tagging: $error"
return 1
fi
return 0
@@ -43,12 +43,38 @@ delete_object_tagging_rest() {
log 2 "'delete_object_tagging' requires bucket, key"
return 1
fi
if ! result=$(BUCKET_NAME="$1" OBJECT_KEY="$2" OUTPUT_FILE="$TEST_FILE_FOLDER/response.txt" ./tests/rest_scripts/delete_object_tagging.sh); then
log 2 "error sending delete object tagging REST command: $result"
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="DELETE
/$1/$2
tagging=
host:$aws_endpoint_url_address
x-amz-content-sha256:$payload_hash
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
$payload_hash"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
if [ "$result" != "204" ]; then
log 2 "delete-object-tagging returned code $result (response: $(cat "$TEST_FILE_FOLDER/response.txt"))"
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -ks -w "%{http_code}" -X DELETE "$header://$aws_endpoint_url_address/$1/$2?tagging" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: $payload_hash" \
-H "x-amz-date: $current_date_time" \
-d "$tagging" -o "$TEST_FILE_FOLDER"/delete_tagging_error.txt 2>&1)
log 5 "reply status code: $reply"
if [[ "$reply" != "204" ]]; then
log 2 "reply error: $reply"
log 2 "put object tagging command returned error: $(cat "$TEST_FILE_FOLDER"/delete_tagging_error.txt)"
return 1
fi
return 0

View File

@@ -21,7 +21,7 @@ get_bucket_acl() {
return 1
fi
local exit_code=0
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
acl=$(send_command aws --no-verify-ssl s3api get-bucket-acl --bucket "$2" 2>&1) || exit_code="$?"
elif [[ $1 == 's3cmd' ]]; then
acl=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate info "s3://$2" 2>&1) || exit_code="$?"

View File

@@ -17,18 +17,17 @@
get_bucket_location() {
record_command "get-bucket-location" "client:$1"
if [[ $# -ne 2 ]]; then
log 2 "get bucket location command requires command type, bucket name"
echo "get bucket location command requires command type, bucket name"
return 1
fi
get_result=0
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]]; then
get_bucket_location_aws "$2" || get_result=$?
elif [[ $1 == 's3cmd' ]]; then
get_bucket_location_s3cmd "$2" || get_result=$?
elif [[ $1 == 'mc' ]]; then
get_bucket_location_mc "$2" || get_result=$?
else
log 2 "command type '$1' not implemented for get_bucket_location"
echo "command type '$1' not implemented for get_bucket_location"
return 1
fi
if [[ $get_result -ne 0 ]]; then
@@ -40,7 +39,7 @@ get_bucket_location() {
get_bucket_location_aws() {
record_command "get-bucket-location" "client:s3api"
if [[ $# -ne 1 ]]; then
log 2 "get bucket location (aws) requires bucket name"
echo "get bucket location (aws) requires bucket name"
return 1
fi
location_json=$(send_command aws --no-verify-ssl s3api get-bucket-location --bucket "$1") || location_result=$?
@@ -60,7 +59,7 @@ get_bucket_location_s3cmd() {
fi
info=$(send_command s3cmd --no-check-certificate info "s3://$1") || results=$?
if [[ $results -ne 0 ]]; then
log 2 "error getting bucket location: $location"
echo "error getting s3cmd info: $info"
return 1
fi
bucket_location=$(echo "$info" | grep -o 'Location:.*' | awk '{print $2}')
@@ -70,12 +69,12 @@ get_bucket_location_s3cmd() {
get_bucket_location_mc() {
record_command "get-bucket-location" "client:mc"
if [[ $# -ne 1 ]]; then
log 2 "get bucket location (mc) requires bucket name"
echo "get bucket location (mc) requires bucket name"
return 1
fi
info=$(send_command mc --insecure stat "$MC_ALIAS/$1") || results=$?
if [[ $results -ne 0 ]]; then
log 2 "error getting s3cmd info: $info"
echo "error getting s3cmd info: $info"
return 1
fi
# shellcheck disable=SC2034

View File

@@ -26,7 +26,6 @@ get_bucket_ownership_controls() {
return 1
fi
raw_bucket_ownership_controls=""
if ! raw_bucket_ownership_controls=$(send_command aws --no-verify-ssl s3api get-bucket-ownership-controls --bucket "$1" 2>&1); then
log 2 "error getting bucket ownership controls: $raw_bucket_ownership_controls"
return 1

View File

@@ -21,7 +21,7 @@ get_bucket_policy() {
return 1
fi
local get_bucket_policy_result=0
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]] || [[ $1 == 's3api' ]]; then
get_bucket_policy_aws "$2" || get_bucket_policy_result=$?
elif [[ $1 == 's3cmd' ]]; then
get_bucket_policy_s3cmd "$2" || get_bucket_policy_result=$?
@@ -97,85 +97,41 @@ get_bucket_policy_s3cmd() {
policy_brackets=false
# NOTE: versitygw sends policies back in multiple lines here, direct in single line
while IFS= read -r line; do
if check_and_load_policy_info; then
break
if [[ $policy_brackets == false ]]; then
policy_line=$(echo "$line" | grep 'Policy: ')
if [[ $policy_line != "" ]]; then
if [[ $policy_line != *'{'* ]]; then
break
fi
if [[ $policy_line == *'}'* ]]; then
log 5 "policy on single line"
bucket_policy=${policy_line//Policy:/}
break
else
policy_brackets=true
bucket_policy+="{"
fi
fi
else
bucket_policy+=$line
if [[ $line == "" ]]; then
break
fi
fi
done <<< "$info"
log 5 "bucket policy: $bucket_policy"
return 0
}
get_bucket_policy_rest() {
if [[ $# -ne 1 ]]; then
log 2 "s3cmd 'get bucket policy' command requires bucket name"
return 1
fi
if ! get_bucket_policy_rest_expect_code "$1" "200"; then
log 2 "error getting REST bucket policy"
return 1
fi
return 0
}
get_bucket_policy_rest_expect_code() {
if [[ $# -ne 2 ]]; then
log 2 "s3cmd 'get bucket policy' command requires bucket name, expected code"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/policy.txt" ./tests/rest_scripts/get_bucket_policy.sh); then
log 2 "error attempting to get bucket policy response: $result"
return 1
fi
if [ "$result" != "$2" ]; then
log 2 "unexpected response code, expected '$2', actual '$result' (reply: $(cat "$TEST_FILE_FOLDER/policy.txt"))"
return 1
fi
bucket_policy="$(cat "$TEST_FILE_FOLDER/policy.txt")"
}
# return 0 for no policy, single-line policy, or loading complete, 1 for still searching or loading
check_and_load_policy_info() {
if [[ $policy_brackets == false ]]; then
if search_for_first_policy_line_or_full_policy; then
return 0
fi
else
bucket_policy+=$line
if [[ $line == "}" ]]; then
return 0
fi
fi
return 1
}
# return 0 for empty or single-line policy, 1 for other cases
search_for_first_policy_line_or_full_policy() {
policy_line=$(echo "$line" | grep 'Policy: ')
if [[ $policy_line != "" ]]; then
if [[ $policy_line != *'{'* ]]; then
return 0
fi
if [[ $policy_line == *'}'* ]]; then
log 5 "policy on single line"
bucket_policy=${policy_line//Policy:/}
return 0
else
policy_brackets=true
bucket_policy+="{"
fi
fi
return 1
}
get_bucket_policy_mc() {
record_command "get-bucket-policy" "client:mc"
if [[ $# -ne 1 ]]; then
log 2 "aws 'get bucket policy' command requires bucket"
echo "aws 'get bucket policy' command requires bucket"
return 1
fi
bucket_policy=$(send_command mc --insecure anonymous get-json "$MC_ALIAS/$1") || get_result=$?
if [[ $get_result -ne 0 ]]; then
log 2 "error getting policy: $bucket_policy"
echo "error getting policy: $bucket_policy"
return 1
fi
return 0

View File

@@ -21,7 +21,7 @@ get_bucket_tagging() {
assert [ $# -eq 2 ]
record_command "get-bucket-tagging" "client:$1"
local result
if [[ $1 == 's3api' ]]; then
if [[ $1 == 'aws' ]]; then
tags=$(send_command aws --no-verify-ssl s3api get-bucket-tagging --bucket "$2" 2>&1) || result=$?
elif [[ $1 == 'mc' ]]; then
tags=$(send_command mc --insecure tag list "$MC_ALIAS"/"$2" 2>&1) || result=$?
@@ -35,7 +35,7 @@ get_bucket_tagging() {
export tags=
return 0
fi
log 2 "error getting bucket tags: $tags"
echo "error getting bucket tags: $tags"
return 1
fi
export tags

View File

@@ -24,7 +24,7 @@ get_object() {
local exit_code=0
if [[ $1 == 's3' ]]; then
get_object_error=$(send_command aws --no-verify-ssl s3 mv "s3://$2/$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3api' ]]; then
elif [[ $1 == 's3api' ]] || [[ $1 == 'aws' ]]; then
get_object_error=$(send_command aws --no-verify-ssl s3api get-object --bucket "$2" --key "$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == 's3cmd' ]]; then
get_object_error=$(send_command s3cmd "${S3CMD_OPTS[@]}" --no-check-certificate get "s3://$2/$3" "$4" 2>&1) || exit_code=$?
@@ -73,8 +73,6 @@ get_object_with_user() {
elif [[ $1 == "mc" ]]; then
log 5 "save location: $4"
get_object_error=$(send_command mc --insecure get "$MC_ALIAS/$2/$3" "$4" 2>&1) || exit_code=$?
elif [[ $1 == "rest" ]]; then
get_object_rest_with_user "$5" "$6" "$2" "$3" "$4" || exit_code=$?
else
log 2 "'get_object_with_user' not implemented for client '$1'"
return 1
@@ -93,24 +91,37 @@ get_object_rest() {
log 2 "'get_object_rest' requires bucket name, object name, output file"
return 1
fi
if ! get_object_rest_with_user "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" "$1" "$2" "$3"; then
log 2 "error getting REST object with root user"
return 1
fi
return 0
}
get_object_rest_with_user() {
if [ $# -ne 5 ]; then
log 2 "'get_object_rest_with_user' requires username, password, bucket name, object name, output file"
generate_hash_for_payload ""
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="GET
/$1/$2
host:$aws_endpoint_url_address
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
UNSIGNED-PAYLOAD"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
if ! result=$(AWS_ACCESS_KEY_ID="$1" AWS_SECRET_ACCESS_KEY="$2" COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$3" OBJECT_KEY="$4" OUTPUT_FILE="$5" ./tests/rest_scripts/get_object.sh 2>&1); then
log 2 "error getting object: $result"
return 1
fi
if [ "$result" != "200" ]; then
log 2 "expected '200', was '$result' ($(cat "$3"))"
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -w "%{http_code}" -ks "$header://$aws_endpoint_url_address/$1/$2" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: UNSIGNED-PAYLOAD" \
-H "x-amz-date: $current_date_time" \
-o "$3" 2>&1)
log 5 "reply: $reply"
if [[ "$reply" != "200" ]]; then
log 2 "get object command returned error: $(cat "$3")"
return 1
fi
return 0

View File

@@ -42,18 +42,4 @@ get_object_legal_hold_rest() {
return 1
fi
return 0
}
get_object_legal_hold_version_id() {
if [[ $# -ne 3 ]]; then
log 2 "'get_object_legal_hold_version_id' command requires bucket, key, version id"
return 1
fi
record_command "get-object-legal-hold" "client:s3api"
if ! legal_hold=$(send_command aws --no-verify-ssl s3api get-object-legal-hold --bucket "$1" --key "$2" --version-id "$3" 2>&1); then
log 2 "error getting object legal hold w/version id: $legal_hold"
return 1
fi
echo "$legal_hold"
return 0
}
}

View File

@@ -36,12 +36,35 @@ get_object_lock_configuration_rest() {
log 2 "'get_object_lock_configuration_rest' requires bucket name"
return 1
fi
if ! result=$(COMMAND_LOG="$COMMAND_LOG" BUCKET_NAME="$1" OUTPUT_FILE="$TEST_FILE_FOLDER/object-lock-config.txt" ./tests/rest_scripts/get_object_lock_config.sh); then
log 2 "error getting lock configuration: $result"
current_date_time=$(date -u +"%Y%m%dT%H%M%SZ")
aws_endpoint_url_address=${AWS_ENDPOINT_URL#*//}
header=$(echo "$AWS_ENDPOINT_URL" | awk -F: '{print $1}')
# shellcheck disable=SC2154
canonical_request="GET
/$1
object-lock=
host:$aws_endpoint_url_address
x-amz-content-sha256:UNSIGNED-PAYLOAD
x-amz-date:$current_date_time
host;x-amz-content-sha256;x-amz-date
UNSIGNED-PAYLOAD"
if ! generate_sts_string "$current_date_time" "$canonical_request"; then
log 2 "error generating sts string"
return 1
fi
if [[ "$result" != "200" ]]; then
log 2 "expected '200', returned '$result': $(cat "$TEST_FILE_FOLDER/object-lock-config.txt")"
get_signature
# shellcheck disable=SC2154
reply=$(send_command curl -w "%{http_code}" -ks "$header://$aws_endpoint_url_address/$1?object-lock" \
-H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$ymd/$AWS_REGION/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=$signature" \
-H "x-amz-content-sha256: UNSIGNED-PAYLOAD" \
-H "x-amz-date: $current_date_time" \
-o "$TEST_FILE_FOLDER/object-lock-config.txt" 2>&1)
log 5 "reply: $reply"
if [[ "$reply" != "200" ]]; then
log 2 "get object command returned error: $(cat "$TEST_FILE_FOLDER/object-lock-config.txt")"
return 1
fi
return 0

View File

@@ -21,7 +21,7 @@ get_object_tagging() {
return 1
fi
local result
if [[ $1 == 's3api' ]]; then
if [[ "$1" == 'aws' ]] || [[ $1 == 's3api' ]]; then
tags=$(send_command aws --no-verify-ssl s3api get-object-tagging --bucket "$2" --key "$3" 2>&1) || result=$?
elif [[ "$1" == 'mc' ]]; then
tags=$(send_command mc --insecure tag list "$MC_ALIAS"/"$2"/"$3" 2>&1) || result=$?

Some files were not shown because too many files have changed in this diff Show More