Compare commits

...

83 Commits
v0.3 ... v0.5

Author SHA1 Message Date
Ben McClelland
34830954c3 Merge pull request #178 from versity/ben/deps
fix: upgrade module dependencies
2023-08-01 21:54:10 -07:00
Ben McClelland
77a4a9e3a5 fix: upgrade module dependencies 2023-08-01 21:50:17 -07:00
Ben McClelland
25b02dc8fa Merge pull request #177 from versity/select-object-content-fe
SelectObjectContent FE
2023-08-01 13:37:29 -07:00
jonaustin09
009ceee748 feat: Added FE support for SelectObjectContent action 2023-08-02 00:08:28 +04:00
Ben McClelland
af69adf080 Merge pull request #176 from versity/fix/s3response-cleanup
s3response cleanup
2023-07-31 21:45:18 -07:00
jonaustin09
97847735c8 fix: s3response action responses naming cleanup 2023-07-31 21:41:10 -07:00
Ben McClelland
ac9aa25ff1 Merge pull request #175 from versity/fix/issue-143
Issue 143
2023-07-31 21:38:10 -07:00
Jon Austin
091375fa00 Issue 151 (#174)
* fix: Fixes #151. Fixed DeleteObjects action bugs: Corrected request body serialization type, added return type
2023-07-31 21:36:33 -07:00
Ben McClelland
f1e22b0a4d Merge pull request #173 from versity/fix/issue-168
Issu 168
2023-07-31 21:35:00 -07:00
jonaustin09
3f8c218431 fix: Fixes #143. Fixed action name in bucket creation admin checker response handler 2023-07-31 20:54:16 +04:00
jonaustin09
70818de594 fix: Fixes #168. Changed PutObject existing object error from custom internal error to ErrExistingObjectIsDirectory 2023-07-31 18:17:29 +04:00
Ben McClelland
366ed21ede Merge pull request #172 from versity/fix/issue-152
Issue 152
2023-07-28 21:24:39 -07:00
Ben McClelland
b96da570a7 Merge pull request #171 from versity/fix/issue-153
Issue 153
2023-07-28 21:23:22 -07:00
jonaustin09
898c3efaa0 fix: Fixes #153. Fixed CompleteMultipartUpload invalid ETag error case, fixed UploadPart xattr.Set error 2023-07-28 18:20:07 +04:00
jonaustin09
838a7f9ef9 fix: Fixes #152. Changed CompleteMultiPartUpload invalid payload error to MalformedXML 2023-07-28 18:19:15 +04:00
Jon Austin
bf33b9f5a2 Issue 154 (#169)
* fix: Fixes #154, Changed GetObject range error to InvalidRange
2023-07-27 11:05:40 -07:00
Jon Austin
77080328c1 Issue 156 (#167)
* fix: Fixes #156, Added bucket name validation on bucket creation
2023-07-27 11:04:50 -07:00
Ben McClelland
b0259ae1de Merge pull request #166 from versity/ben/context 2023-07-27 06:44:43 -07:00
Ben McClelland
884fd029c3 feat: add context to backend calls
This adds a context to the backend interface calls so that the backend
can enable request cancellation. This change isn't acutally implementing
any backend handling, but just putting the pieces into place to pass the
context to the backend.
2023-07-26 21:54:12 -07:00
Ben McClelland
36eb6d795f Merge pull request #165 from versity/acl-checker-refactoring
ACL refactoring
2023-07-26 19:06:52 -07:00
Ben McClelland
7de01cc983 Merge pull request #163 from versity/ben/log_cleanup
fix: allow logging to user specified log files
2023-07-26 19:05:39 -07:00
jonaustin09
7fb2a7f9ba feat: ACL refactoring, moved ace parsing from controllers to middleware 2023-07-26 20:54:50 +04:00
Ben McClelland
5a9b744dd1 fix: allow logging to user specified log files
This also cleans up some the of the error output to send to stderr.

This adds the Shutdown() to the logging interface, so we can keep the
log file open and just append entries.

This add HangUp() to the logging interface for log rotations.
2023-07-25 23:39:45 -07:00
Ben McClelland
5b31a7bafc Merge pull request #162 from versity/fix/issue-136
Issue 136
2023-07-25 10:03:28 -07:00
Ben McClelland
ee703479d0 Merge pull request #161 from versity/fix/issue-150
Issue 150
2023-07-25 10:02:57 -07:00
Ben McClelland
bedd353d72 Merge pull request #160 from versity/fix/issue-155
Issue 155
2023-07-25 10:02:16 -07:00
Ben McClelland
84fe647b81 Merge pull request #159 from versity/fix/issue-157
Issue 157
2023-07-25 10:00:35 -07:00
jonaustin09
1649c5cafd fix: Added KeyCount property in ListObjectsV2 action result, added a test case for one 2023-07-25 20:44:57 +04:00
jonaustin09
4c451a4822 feat: Added support to add object tags on object creation 2023-07-25 20:42:58 +04:00
jonaustin09
287db7a7b6 fix: Fixed ListObjects marker bug, now it takes the correct query param as marker 2023-07-25 20:31:40 +04:00
jonaustin09
c598ee5416 fix: Added accept-range, Content-range and x-amz-tagging-count headers in GetObject action response, added test cases for these 2023-07-25 20:28:40 +04:00
Ben McClelland
7c08ea44a6 Merge pull request #149 from versity/ben/backend_interface
fix: standardize Backend interface args for s3 types
2023-07-24 08:26:56 -07:00
Ben McClelland
e73d661de1 Merge pull request #148 from versity/ben/admin_cleanup
fix: cleanup unused adminRegion
2023-07-24 08:26:45 -07:00
Ben McClelland
2291c22eaa fix: standardize Backend interface args for s3 types 2023-07-22 22:45:24 -07:00
Ben McClelland
51e818b3e3 fix: cleanup unused adminRegion 2023-07-22 18:53:58 -07:00
Ben McClelland
daa4aa1510 Merge pull request #135 from versity/ben/cleanup
fix: signal.go spelling
2023-07-20 14:03:21 -07:00
Ben McClelland
8765a6c67f fix: signal.go spelling 2023-07-20 13:59:51 -07:00
Ben McClelland
c5a7b5aae1 Merge pull request #134 from versity/event-notif-nats
feat: cleanup nats for kafka similarity
2023-07-20 13:58:10 -07:00
Ben McClelland
2ae39c3ee8 feat: cleanup nats for kafka similarity 2023-07-20 13:54:55 -07:00
Ben McClelland
d0b3139640 Merge pull request #133 from versity/event-notif-nats
Bucket event notifications(nats)
2023-07-20 13:50:53 -07:00
jonaustin09
7bceaaca39 feat: Set up bucket event notifications with nats 2023-07-20 13:36:16 -07:00
Ben McClelland
6f0f527e5f Merge pull request #132 from versity/event-notifications
Bucket event notifications(kafka)
2023-07-20 13:27:40 -07:00
jonaustin09
fe547a19e9 feat: bucket event notifications
Set up Bucket event notifications interface to send aws compatible format event messages to a configured event service.
First integrated service is kafka message broker as an option for bucket event notifications.
2023-07-20 11:37:14 -07:00
Ben McClelland
df7f01f7e2 Merge pull request #129 from versity/audit-logging-setup
feat: Set up audit logging basic structure, set up webhook logger, bu…
2023-07-14 12:50:32 -07:00
Ben McClelland
5aeb96f138 Merge pull request #131 from versity/fix-posix-delete-object
Fix Posix Delete Objects
2023-07-14 12:46:57 -07:00
jonaustin09
ef1de682a4 fix: Error handling for posix DeleteObject function to return an error when the object doesn't exist 2023-07-14 23:41:52 +04:00
jonaustin09
87d61a1eb3 feat: Setup audit loggin with webhook url and root level access.log file. CLI enables either webhook or server access logs by providing the flags 2023-07-14 23:40:05 +04:00
Ben McClelland
18899f8029 Merge pull request #128 from versity/ben/update
update package deps
2023-07-06 20:59:52 -07:00
Ben McClelland
ca28792458 update package deps 2023-07-06 21:56:59 -06:00
Ben McClelland
8c469cbd69 Merge pull request #127 from versity/ben/issue_templates
feat: add issue templates
2023-07-06 20:43:40 -07:00
Ben McClelland
ff4bf23b6b feat: add issue templates 2023-07-06 21:40:57 -06:00
Ben McClelland
38ddbc4712 Merge pull request #126 from versity/admin-api-routing
Admin api routing
2023-07-06 14:42:22 -07:00
jonaustin09
cb193c42b4 fix: Up to date with main 2023-07-06 21:21:59 +04:00
jonaustin09
fbafc6b34c feat: Changed admin api http methods, some cleanup in admin cli commands, bug fix in delete user IAM service 2023-07-06 21:21:20 +04:00
Ben McClelland
d26b8856c1 Merge pull request #125 from versity/v4-auth-payload-support
V4 payload header support
2023-07-06 10:17:01 -07:00
Ben McClelland
23f738f37f Merge pull request #124 from versity/ben/copy_obj
feat: implement posix UploadCopyPart
2023-07-06 10:16:20 -07:00
jonaustin09
a10729b3ff fix: Fixed staticcheck error 2023-07-06 19:14:01 +04:00
jonaustin09
0330685c5c feat: Added support for unsigned, streamable and trailign payload header in sigv4 authentication 2023-07-06 19:03:19 +04:00
Ben McClelland
47dea2db7c feat: implement posix UploadCopyPart 2023-07-05 19:06:19 -07:00
Ben McClelland
db484eb900 Merge pull request #123 from versity/unit-testing-cleanup
Unit testing cleanup
2023-07-03 12:41:09 -07:00
Ben McClelland
140d41de40 Merge pull request #122 from versity/fe-upload-part-copy
Upload-part-copy FE
2023-07-03 12:37:19 -07:00
jonaustin09
39803cb158 feat: Some cleanup in controller unit tests, removed backend unsupported unit tests, added test cases for admin controller functions 2023-07-03 20:35:40 +04:00
jonaustin09
9c858b0396 feat: Added UploadPartCopy action in FE 2023-07-03 18:47:32 +04:00
jonaustin09
f63545c9b7 feat: Added UploadPartCopy action in FE 2023-07-03 17:14:46 +04:00
Ben McClelland
2894d4d5f3 Merge pull request #119 from versity/unit-test-coverage
Unit testing coverage
2023-06-30 12:49:06 -07:00
jonaustin09
46097fbf70 fix: Up to date with main 2023-06-30 22:06:25 +04:00
jonaustin09
9db01362a0 feat: increased unit testing coverage in controllers, utility functions and server functions. Fixed bucket owner bug in putbucketacl. 2 more minor changes in controllers 2023-06-30 22:04:46 +04:00
Ben McClelland
fbd7bce530 Merge pull request #118 from versity/ben/copy_obj
posix: cleanup extra debug output
2023-06-29 11:58:45 -07:00
Ben McClelland
7e34078d6a posix: cleanup extra debug output 2023-06-29 11:18:00 -07:00
Jon Austin
3c69c6922a Integration test cases for HeadBucket, CopyObject, DeleteObject actions (#117)
* feat: Added integration test cases for HeadBucket, CopyObject, DeleteObjects
* feat: Added logger for debugging
2023-06-29 10:40:54 -07:00
Ben McClelland
08db927634 Merge pull request #116 from versity/ben/fix_range
fix range gets with unspecified end range
2023-06-29 09:29:06 -07:00
Ben McClelland
6d99c69953 fix range gets with unspecified end range
The aws cli will send range gets of an object with ranges like
the following:
bytes=0-8388607
bytes=8388608-16777215
bytes=16777216-25165823
bytes=25165824-

The last one with the end offset unspecified just means the rest of
the object. So this fixes that case where there is only one offset
in the range.
2023-06-28 23:09:49 -07:00
Jon Austin
4bfb3d84d3 Acl integration test (#115)
* feat: Added test an integration test case for acl actions(get, put), fixed PutBucketAcl actions bugs, fixed iam bugs on getting and creating user accounts

* fix: Fixed acl unit tests

* fix: Fixed cli path in exec command in acl integration test

* fix: fixed account creation bug
2023-06-28 19:38:35 -07:00
Jon Austin
30dbd02a83 Tag actions integrations tests (#114)
* feat: Added an integration test case for for tag actions(get, put, delete)
2023-06-26 14:25:24 -07:00
Ben McClelland
f8afeec0a0 Merge pull request #112 from versity/ben/readme
update README.md with some content clarifications
2023-06-26 12:30:35 -07:00
Jon Austin
45e3c0922d Tag actions FE (#113)
* feat: Added get-object-tagging, put-object-tagging, delete-object-tagging actions in fe
2023-06-26 12:29:56 -07:00
Ben McClelland
a3f95520a8 update README.md with some content clarifications 2023-06-26 10:18:50 -07:00
Ben McClelland
c45280b7db Merge pull request #111 from versity/ben/tests
add functional tests to github actions
2023-06-26 08:36:39 -07:00
Ben McClelland
77b0759f86 fix full flow mising TestRangeGet test 2023-06-25 11:00:54 -07:00
Ben McClelland
1da0c1ceba add coverage report for actions tests 2023-06-25 10:54:24 -07:00
Ben McClelland
1d476c6d4d add signal handler for clean shutdown 2023-06-25 10:29:14 -07:00
Ben McClelland
c4f5f958eb add functional tests to github actions 2023-06-23 18:38:19 -07:00
Jon Austin
f84cfe58e7 Bench test (#110)
* feat: test CLI command set up for client side testing, test cases are corresponded with subcommands, added full-flow test case

* fix: TLS configuration removed

* feat: Added benchmark test for client side testing in the CLI

* fix: Removed unused variables

* fix: fixed staticcheck error
2023-06-23 09:55:04 -07:00
53 changed files with 4925 additions and 3443 deletions

27
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,27 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Server Version**
output of
```
./versitygw -version
uname -a
```
**Additional context**
Describe s3 client and version if applicable.

View File

@@ -0,0 +1,14 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Additional context**
Add any other context or screenshots about the feature request here.

30
.github/workflows/functional.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
name: functional tests
on: pull_request
jobs:
build:
name: RunTests
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 'stable'
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Get Dependencies
run: |
go get -v -t -d ./...
- name: Build and Run
run: |
make testbin
./runtests.sh
- name: Coverage Report
run: |
go tool covdata percent -i=/tmp/covdata

View File

@@ -24,10 +24,10 @@ jobs:
go get -v -t -d ./...
- name: Build
run: go build -o versitygw cmd/versitygw/*.go
run: make
- name: Test
run: go test -v -timeout 30s -tags=github ./...
run: go test -coverprofile profile.txt -race -v -timeout 30s -tags=github ./...
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

2
.gitignore vendored
View File

@@ -32,3 +32,5 @@ VERSION
/versitygw.spec
*.tar
*.tar.gz
**/rand.data
/profile.txt

View File

@@ -34,6 +34,9 @@ build: $(BIN)
$(BIN):
$(GOBUILD) $(LDFLAGS) -o $(BIN) cmd/$(BIN)/*.go
testbin:
$(GOBUILD) $(LDFLAGS) -o $(BIN) -cover -race cmd/$(BIN)/*.go
.PHONY: test
test:
$(GOTEST) ./...

View File

@@ -1,4 +1,4 @@
# The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
# The Versity Gateway:<br/>A High-Performance S3 to Storage System Translation Service
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/versity/versitygw/blob/assets/assets/logo-white.svg">
@@ -8,13 +8,11 @@
[![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/versity/versitygw/blob/main/LICENSE)
The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
Current status: Alpha, in development not yet suited for production use
**Current status:** Alpha, in development not yet suited for production use
See project [documentation](https://github.com/versity/versitygw/wiki) on the wiki.
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and file-based storage systems. The Versity Gateway bridges the gap between S3-reliant applications and file storage systems, enabling enhanced compatibility and integration with file based systems while offering exceptional scalability.
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and storage systems. The Versity Gateway bridges the gap between S3-reliant applications and other storage systems, enabling enhanced compatibility and integration while offering exceptional scalability.
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.

View File

@@ -42,7 +42,11 @@ type GetBucketAclOutput struct {
}
type AccessControlList struct {
Grants []types.Grant
Grants []types.Grant `xml:"Grant"`
}
type AccessControlPolicy struct {
AccessControlList AccessControlList `xml:"AccessControlList"`
Owner types.Owner
}
func ParseACL(data []byte) (ACL, error) {
@@ -80,69 +84,88 @@ func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
}, nil
}
func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) error {
func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error) {
if input == nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if acl.Owner != *input.AccessControlPolicy.Owner.ID {
return s3err.GetAPIError(s3err.ErrAccessDenied)
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
}
// if the ACL is specified, set the ACL, else replace the grantees
if input.ACL != "" {
acl.ACL = input.ACL
acl.Grantees = []Grantee{}
return nil
} else {
grantees := []Grantee{}
accs := []string{}
if input.GrantRead != nil {
fullControlList, readList, readACPList, writeList, writeACPList := []string{}, []string{}, []string{}, []string{}, []string{}
if *input.GrantFullControl != "" {
fullControlList = splitUnique(*input.GrantFullControl, ",")
fmt.Println(fullControlList)
for _, str := range fullControlList {
grantees = append(grantees, Grantee{Access: str, Permission: "FULL_CONTROL"})
}
}
if *input.GrantRead != "" {
readList = splitUnique(*input.GrantRead, ",")
for _, str := range readList {
grantees = append(grantees, Grantee{Access: str, Permission: "READ"})
}
}
if *input.GrantReadACP != "" {
readACPList = splitUnique(*input.GrantReadACP, ",")
for _, str := range readACPList {
grantees = append(grantees, Grantee{Access: str, Permission: "READ_ACP"})
}
}
if *input.GrantWrite != "" {
writeList = splitUnique(*input.GrantWrite, ",")
for _, str := range writeList {
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE"})
}
}
if *input.GrantWriteACP != "" {
writeACPList = splitUnique(*input.GrantWriteACP, ",")
for _, str := range writeACPList {
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE_ACP"})
}
}
accs = append(append(append(append(fullControlList, readList...), writeACPList...), readACPList...), writeList...)
} else {
cache := make(map[string]bool)
for _, grt := range input.AccessControlPolicy.Grants {
grantees = append(grantees, Grantee{Access: *grt.Grantee.ID, Permission: grt.Permission})
if _, ok := cache[*grt.Grantee.ID]; !ok {
cache[*grt.Grantee.ID] = true
accs = append(accs, *grt.Grantee.ID)
}
}
}
// Check if the specified accounts exist
accList, err := checkIfAccountsExist(accs, iam)
if err != nil {
return nil, err
}
if len(accList) > 0 {
return nil, fmt.Errorf("accounts does not exist: %s", strings.Join(accList, ", "))
}
acl.Grantees = grantees
acl.ACL = ""
}
grantees := []Grantee{}
fullControlList, readList, readACPList, writeList, writeACPList := []string{}, []string{}, []string{}, []string{}, []string{}
if *input.GrantFullControl != "" {
fullControlList = splitUnique(*input.GrantFullControl, ",")
fmt.Println(fullControlList)
for _, str := range fullControlList {
grantees = append(grantees, Grantee{Access: str, Permission: "FULL_CONTROL"})
}
}
if *input.GrantRead != "" {
readList = splitUnique(*input.GrantRead, ",")
for _, str := range readList {
grantees = append(grantees, Grantee{Access: str, Permission: "READ"})
}
}
if *input.GrantReadACP != "" {
readACPList = splitUnique(*input.GrantReadACP, ",")
for _, str := range readACPList {
grantees = append(grantees, Grantee{Access: str, Permission: "READ_ACP"})
}
}
if *input.GrantWrite != "" {
writeList = splitUnique(*input.GrantWrite, ",")
for _, str := range writeList {
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE"})
}
}
if *input.GrantWriteACP != "" {
writeACPList = splitUnique(*input.GrantWriteACP, ",")
for _, str := range writeACPList {
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE_ACP"})
}
}
accs := append(append(append(append(fullControlList, readList...), writeACPList...), readACPList...), writeList...)
// Check if the specified accounts exist
accList, err := checkIfAccountsExist(accs, iam)
result, err := json.Marshal(acl)
if err != nil {
return err
}
if len(accList) > 0 {
return fmt.Errorf("accounts does not exist: %s", strings.Join(accList, ", "))
return nil, err
}
acl.Grantees = grantees
acl.ACL = ""
return nil
return result, nil
}
func checkIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
@@ -153,7 +176,7 @@ func checkIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
if err != nil && err != ErrNoSuchUser {
return nil, fmt.Errorf("check user account: %w", err)
}
if err == nil {
if err == ErrNoSuchUser {
result = append(result, acc)
}
}
@@ -238,5 +261,5 @@ func IsAdmin(access string, isRoot bool) error {
if acc.Role == "admin" {
return nil
}
return fmt.Errorf("only admin users have access to this resource")
return s3err.GetAPIError(s3err.ErrAccessDenied)
}

View File

@@ -25,6 +25,8 @@ type Account struct {
}
// IAMService is the interface for all IAM service implementations
//
//go:generate moq -out ../s3api/controllers/iam_moq_test.go -pkg controllers . IAMService
type IAMService interface {
CreateAccount(access string, account Account) error
GetUserAccount(access string) (Account, error)

View File

@@ -76,7 +76,7 @@ func (s *IAMServiceInternal) CreateAccount(access string, account Account) error
return nil, fmt.Errorf("failed to parse iam: %w", err)
}
} else {
conf.AccessAccounts = make(map[string]Account)
conf = IAMConfig{AccessAccounts: map[string]Account{}}
}
_, ok := conf.AccessAccounts[access]
@@ -85,10 +85,11 @@ func (s *IAMServiceInternal) CreateAccount(access string, account Account) error
}
conf.AccessAccounts[access] = account
b, err := json.Marshal(s.accts)
b, err := json.Marshal(conf)
if err != nil {
return nil, fmt.Errorf("failed to serialize iam: %w", err)
}
s.accts = conf
return b, nil
})
@@ -168,11 +169,13 @@ func (s *IAMServiceInternal) DeleteUserAccount(access string) error {
delete(conf.AccessAccounts, access)
b, err := json.Marshal(s.accts)
b, err := json.Marshal(conf)
if err != nil {
return nil, fmt.Errorf("failed to serialize iam: %w", err)
}
s.accts = conf
return b, nil
})
}

View File

@@ -15,53 +15,52 @@
package backend
import (
"context"
"fmt"
"io"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
//go:generate moq -out backend_moq_test.go . Backend
//go:generate moq -out ../s3api/controllers/backend_moq_test.go -pkg controllers . Backend
type Backend interface {
fmt.Stringer
Shutdown()
ListBuckets() (s3response.ListAllMyBucketsResult, error)
HeadBucket(bucket string) (*s3.HeadBucketOutput, error)
GetBucketAcl(bucket string) ([]byte, error)
PutBucket(bucket, owner string) error
PutBucketAcl(bucket string, data []byte) error
DeleteBucket(bucket string) error
ListBuckets(_ context.Context, owner string, isRoot bool) (s3response.ListAllMyBucketsResult, error)
HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error)
CreateBucket(context.Context, *s3.CreateBucketInput) error
PutBucketAcl(_ context.Context, bucket string, data []byte) error
DeleteBucket(context.Context, *s3.DeleteBucketInput) error
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(*s3.AbortMultipartUploadInput) error
ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error)
PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error)
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error)
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
SelectObjectContent(context.Context, *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error)
PutObject(*s3.PutObjectInput) (string, error)
HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error)
CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error)
ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
DeleteObject(bucket, object string) error
DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error
PutObjectAcl(*s3.PutObjectAclInput) error
RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error
PutObject(context.Context, *s3.PutObjectInput) (string, error)
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error)
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
ListObjects(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
DeleteObject(context.Context, *s3.DeleteObjectInput) error
DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error)
PutObjectAcl(context.Context, *s3.PutObjectAclInput) error
RestoreObject(context.Context, *s3.RestoreObjectInput) error
GetTags(bucket, object string) (map[string]string, error)
SetTags(bucket, object string, tags map[string]string) error
RemoveTags(bucket, object string) error
GetTags(_ context.Context, bucket, object string) (map[string]string, error)
SetTags(_ context.Context, bucket, object string, tags map[string]string) error
RemoveTags(_ context.Context, bucket, object string) error
}
type BackendUnsupported struct{}
@@ -75,93 +74,93 @@ func (BackendUnsupported) Shutdown() {}
func (BackendUnsupported) String() string {
return "Unsupported"
}
func (BackendUnsupported) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
func (BackendUnsupported) ListBuckets(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucketAcl(bucket string, data []byte) error {
func (BackendUnsupported) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObjectAcl(*s3.PutObjectAclInput) error {
func (BackendUnsupported) PutObjectAcl(context.Context, *s3.PutObjectAclInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error {
func (BackendUnsupported) RestoreObject(context.Context, *s3.RestoreObjectInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) {
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetBucketAcl(bucket string) ([]byte, error) {
func (BackendUnsupported) HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutBucket(bucket, owner string) error {
func (BackendUnsupported) CreateBucket(context.Context, *s3.CreateBucketInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteBucket(bucket string) error {
func (BackendUnsupported) DeleteBucket(context.Context, *s3.DeleteBucketInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) SelectObjectContent(context.Context, *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error) {
return s3response.SelectObjectContentResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CreateMultipartUpload(input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
func (BackendUnsupported) CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) error {
func (BackendUnsupported) AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
return s3response.ListMultipartUploadsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
return s3response.ListMultipartUploadsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
return s3response.ListPartsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error) {
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error) {
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error) {
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObject(*s3.PutObjectInput) (string, error) {
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (string, error) {
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteObject(bucket, object string) error {
func (BackendUnsupported) DeleteObject(context.Context, *s3.DeleteObjectInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
return s3response.DeleteObjectsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error) {
func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error) {
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
func (BackendUnsupported) ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetTags(bucket, object string) (map[string]string, error) {
func (BackendUnsupported) GetTags(_ context.Context, bucket, object string) (map[string]string, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) SetTags(bucket, object string, tags map[string]string) error {
func (BackendUnsupported) SetTags(_ context.Context, bucket, object string, tags map[string]string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) RemoveTags(bucket, object string) error {
func (BackendUnsupported) RemoveTags(_ context.Context, bucket, object string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,222 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package backend
import (
"context"
"testing"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
func TestBackend_ListBuckets(t *testing.T) {
type args struct {
ctx context.Context
}
type test struct {
name string
c Backend
args args
wantErr bool
}
var tests []test
tests = append(tests, test{
name: "list-Bucket",
c: &BackendMock{
ListBucketsFunc: func() (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{
Buckets: s3response.ListAllMyBucketsList{
Bucket: []s3response.ListAllMyBucketsEntry{
{
Name: "t1",
},
},
},
}, s3err.GetAPIError(0)
},
},
args: args{
ctx: context.Background(),
},
wantErr: false,
}, test{
name: "list-Bucket-error",
c: &BackendMock{
ListBucketsFunc: func() (s3response.ListAllMyBucketsResult, error) {
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
},
},
args: args{
ctx: context.Background(),
},
wantErr: true,
})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if _, err := tt.c.ListBuckets(); (err.(s3err.APIError).Code != "") != tt.wantErr {
t.Errorf("Backend.ListBuckets() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestBackend_HeadBucket(t *testing.T) {
type args struct {
ctx context.Context
BucketName string
}
type test struct {
name string
c Backend
args args
wantErr bool
}
var tests []test
tests = append(tests, test{
name: "head-buckets-error",
c: &BackendMock{
HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
},
},
args: args{
ctx: context.Background(),
BucketName: "b1",
},
wantErr: true,
})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if _, err := tt.c.HeadBucket(tt.args.BucketName); (err.(s3err.APIError).Code != "") != tt.wantErr {
t.Errorf("Backend.HeadBucket() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestBackend_GetBucketAcl(t *testing.T) {
type args struct {
ctx context.Context
bucketName string
}
type test struct {
name string
c Backend
args args
wantErr bool
}
var tests []test
tests = append(tests, test{
name: "get bucket acl error",
c: &BackendMock{
GetBucketAclFunc: func(bucket string) ([]byte, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
},
},
args: args{
ctx: context.Background(),
bucketName: "b1",
},
wantErr: true,
})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if _, err := tt.c.GetBucketAcl(tt.args.bucketName); (err.(s3err.APIError).Code != "") != tt.wantErr {
t.Errorf("Backend.GetBucketAcl() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestBackend_PutBucket(t *testing.T) {
type args struct {
ctx context.Context
bucketName string
bucketOwner string
}
type test struct {
name string
c Backend
args args
wantErr bool
}
var tests []test
tests = append(tests, test{
name: "put bucket ",
c: &BackendMock{
PutBucketFunc: func(bucket, owner string) error {
return s3err.GetAPIError(0)
},
},
args: args{
ctx: context.Background(),
bucketName: "b1",
bucketOwner: "owner",
},
wantErr: false,
}, test{
name: "put bucket error",
c: &BackendMock{
PutBucketFunc: func(bucket, owner string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
},
},
args: args{
ctx: context.Background(),
bucketName: "b2",
bucketOwner: "owner",
},
wantErr: true,
})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := tt.c.PutBucket(tt.args.bucketName, tt.args.bucketOwner); (err.(s3err.APIError).Code != "") != tt.wantErr {
t.Errorf("Backend.PutBucket() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestBackend_DeleteBucket(t *testing.T) {
type args struct {
ctx context.Context
bucketName string
}
type test struct {
name string
c Backend
args args
wantErr bool
}
var tests []test
tests = append(tests, test{
name: "Delete Bucket Error",
c: &BackendMock{
DeleteBucketFunc: func(bucket string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
},
},
args: args{
ctx: context.Background(),
bucketName: "b1",
},
wantErr: true,
})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := tt.c.DeleteBucket(tt.args.bucketName); (err.(s3err.APIError).Code != "") != tt.wantErr {
t.Errorf("Backend.DeleteBucket() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -17,7 +17,6 @@ package backend
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io/fs"
"strconv"
@@ -25,6 +24,7 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -55,6 +55,12 @@ func GetTimePtr(t time.Time) *time.Time {
return &t
}
var (
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
)
// ParseRange parses input range header and returns startoffset, length, and
// error. If no endoffset specified, then length is set to -1.
func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
if acceptRange == "" {
return 0, file.Size(), nil
@@ -63,32 +69,37 @@ func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) < 2 {
return 0, 0, errors.New("invalid range parameter")
return 0, 0, errInvalidRange
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) < 2 {
return 0, 0, errors.New("invalid range parameter")
if len(bRange) < 1 || len(bRange) > 2 {
return 0, 0, errInvalidRange
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, 0, errors.New("invalid range parameter")
return 0, 0, errInvalidRange
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
endOffset := int64(-1)
if len(bRange) == 1 || bRange[1] == "" {
return startOffset, endOffset, nil
}
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, 0, errors.New("invalid range parameter")
return 0, 0, errInvalidRange
}
if endOffset < startOffset {
return 0, 0, errors.New("invalid range parameter")
return 0, 0, errInvalidRange
}
return int64(startOffset), int64(endOffset - startOffset + 1), nil
return startOffset, endOffset - startOffset + 1, nil
}
func GetMultipartMD5(parts []types.Part) string {
func GetMultipartMD5(parts []types.CompletedPart) string {
var partsEtagBytes []byte
for _, part := range parts {
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)

View File

@@ -15,6 +15,7 @@
package posix
import (
"context"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
@@ -96,7 +97,7 @@ func (p *Posix) String() string {
return "Posix Gateway"
}
func (p *Posix) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
func (p *Posix) ListBuckets(_ context.Context, owner string, isRoot bool) (s3response.ListAllMyBucketsResult, error) {
entries, err := os.ReadDir(".")
if err != nil {
return s3response.ListAllMyBucketsResult{},
@@ -131,8 +132,8 @@ func (p *Posix) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
}, nil
}
func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
_, err := os.Lstat(bucket)
func (p *Posix) HeadBucket(_ context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
_, err := os.Lstat(*input.Bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
@@ -143,7 +144,10 @@ func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
return &s3.HeadBucketOutput{}, nil
}
func (p *Posix) PutBucket(bucket string, owner string) error {
func (p *Posix) CreateBucket(_ context.Context, input *s3.CreateBucketInput) error {
bucket := *input.Bucket
owner := string(input.ObjectOwnership)
err := os.Mkdir(bucket, 0777)
if err != nil && os.IsExist(err) {
return s3err.GetAPIError(s3err.ErrBucketAlreadyExists)
@@ -165,8 +169,8 @@ func (p *Posix) PutBucket(bucket string, owner string) error {
return nil
}
func (p *Posix) DeleteBucket(bucket string) error {
names, err := os.ReadDir(bucket)
func (p *Posix) DeleteBucket(_ context.Context, input *s3.DeleteBucketInput) error {
names, err := os.ReadDir(*input.Bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
@@ -177,13 +181,13 @@ func (p *Posix) DeleteBucket(bucket string) error {
if len(names) == 1 && names[0].Name() == metaTmpDir {
// if .sgwtmp is only item in directory
// then clean this up before trying to remove the bucket
err = os.RemoveAll(filepath.Join(bucket, metaTmpDir))
err = os.RemoveAll(filepath.Join(*input.Bucket, metaTmpDir))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove temp dir: %w", err)
}
}
err = os.Remove(bucket)
err = os.Remove(*input.Bucket)
if err != nil && err.(*os.PathError).Err == syscall.ENOTEMPTY {
return s3err.GetAPIError(s3err.ErrBucketNotEmpty)
}
@@ -194,7 +198,7 @@ func (p *Posix) DeleteBucket(bucket string) error {
return nil
}
func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
func (p *Posix) CreateMultipartUpload(_ context.Context, mpu *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
bucket := *mpu.Bucket
object := *mpu.Key
@@ -245,7 +249,12 @@ func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.C
}, nil
}
func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
func (p *Posix) CompleteMultipartUpload(_ context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
bucket := *input.Bucket
object := *input.Key
uploadID := *input.UploadId
parts := input.MultipartUpload.Parts
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -286,7 +295,9 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
if err != nil {
etag = ""
}
parts[i].ETag = &etag
if etag != *parts[i].ETag {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
}
f, err := openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, totalsize)
@@ -467,7 +478,7 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
return nil
}
func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
func (p *Posix) AbortMultipartUpload(_ context.Context, mpu *s3.AbortMultipartUploadInput) error {
bucket := *mpu.Bucket
object := *mpu.Key
uploadID := *mpu.UploadId
@@ -497,7 +508,7 @@ func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
return nil
}
func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
bucket := *mpu.Bucket
var delimiter string
if mpu.Delimiter != nil {
@@ -508,7 +519,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
prefix = *mpu.Prefix
}
var lmu s3response.ListMultipartUploadsResponse
var lmu s3response.ListMultipartUploadsResult
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
@@ -584,7 +595,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
Initiated: fi.ModTime().Format(backend.RFC3339TimeFormat),
})
if len(uploads) == int(mpu.MaxUploads) {
return s3response.ListMultipartUploadsResponse{
return s3response.ListMultipartUploadsResult{
Bucket: bucket,
Delimiter: delimiter,
IsTruncated: i != len(objs) || j != len(upids),
@@ -600,7 +611,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
}
}
return s3response.ListMultipartUploadsResponse{
return s3response.ListMultipartUploadsResult{
Bucket: bucket,
Delimiter: delimiter,
KeyMarker: keyMarker,
@@ -611,8 +622,24 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
}, nil
}
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
var lpr s3response.ListPartsResponse
func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
bucket := *input.Bucket
object := *input.Key
uploadID := *input.UploadId
stringMarker := *input.PartNumberMarker
maxParts := int(input.MaxParts)
var lpr s3response.ListPartsResult
var partNumberMarker int
if stringMarker != "" {
var err error
partNumberMarker, err = strconv.Atoi(stringMarker)
if err != nil {
return lpr, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker)
}
}
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return lpr, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -681,7 +708,7 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
upiddir := filepath.Join(objdir, uploadID)
loadUserMetaData(upiddir, userMetaData)
return s3response.ListPartsResponse{
return s3response.ListPartsResult{
Bucket: bucket,
IsTruncated: oldLen != newLen,
Key: object,
@@ -693,11 +720,14 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
}, nil
}
// TODO: copy part
// func (p *Posix) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
// }
func (p *Posix) UploadPart(_ context.Context, input *s3.UploadPartInput) (string, error) {
bucket := *input.Bucket
object := *input.Key
uploadID := *input.UploadId
part := input.PartNumber
length := input.ContentLength
r := input.Body
func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (string, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -708,6 +738,15 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
sum := sha256.Sum256([]byte(object))
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
_, err = os.Stat(filepath.Join(bucket, objdir, uploadID))
if errors.Is(err, fs.ErrNotExist) {
return "", s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if err != nil {
return "", fmt.Errorf("stat uploadid: %w", err)
}
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", part))
f, err := openTmpFile(filepath.Join(bucket, objdir),
@@ -715,7 +754,6 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
if err != nil {
return "", fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
hash := md5.New()
tr := io.TeeReader(r, hash)
@@ -729,14 +767,123 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
return "", fmt.Errorf("link object in namespace: %w", err)
}
f.cleanup()
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum)
xattr.Set(partPath, etagkey, []byte(etag))
xattr.Set(filepath.Join(bucket, partPath), etagkey, []byte(etag))
return etag, nil
}
func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
func (p *Posix) UploadPartCopy(_ context.Context, upi *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
_, err := os.Stat(*upi.Bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("stat bucket: %w", err)
}
sum := sha256.Sum256([]byte(*upi.Key))
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
_, err = os.Stat(filepath.Join(*upi.Bucket, objdir, *upi.UploadId))
if errors.Is(err, fs.ErrNotExist) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("stat uploadid: %w", err)
}
partPath := filepath.Join(objdir, *upi.UploadId, fmt.Sprintf("%v", upi.PartNumber))
substrs := strings.SplitN(*upi.CopySource, "/", 2)
if len(substrs) != 2 {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrInvalidCopySource)
}
srcBucket := substrs[0]
srcObject := substrs[1]
_, err = os.Stat(srcBucket)
if errors.Is(err, fs.ErrNotExist) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("stat bucket: %w", err)
}
objPath := filepath.Join(srcBucket, srcObject)
fi, err := os.Stat(objPath)
if errors.Is(err, fs.ErrNotExist) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("stat object: %w", err)
}
startOffset, length, err := backend.ParseRange(fi, *upi.CopySourceRange)
if err != nil {
return s3response.CopyObjectResult{}, err
}
if length == -1 {
length = fi.Size() - startOffset + 1
}
if startOffset+length > fi.Size()+1 {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
f, err := openTmpFile(filepath.Join(*upi.Bucket, objdir),
*upi.Bucket, partPath, length)
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
srcf, err := os.Open(objPath)
if errors.Is(err, fs.ErrNotExist) {
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("open object: %w", err)
}
defer srcf.Close()
rdr := io.NewSectionReader(srcf, startOffset, length)
hash := md5.New()
tr := io.TeeReader(rdr, hash)
_, err = io.Copy(f, tr)
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("copy part data: %w", err)
}
err = f.link()
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("link object in namespace: %w", err)
}
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum)
xattr.Set(filepath.Join(*upi.Bucket, partPath), etagkey, []byte(etag))
fi, err = os.Stat(filepath.Join(*upi.Bucket, partPath))
if err != nil {
return s3response.CopyObjectResult{}, fmt.Errorf("stat part path: %w", err)
}
return s3response.CopyObjectResult{
ETag: etag,
LastModified: fi.ModTime(),
}, nil
}
func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, error) {
tagsStr := getString(po.Tagging)
tags := make(map[string]string)
_, err := os.Stat(*po.Bucket)
if errors.Is(err, fs.ErrNotExist) {
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -745,6 +892,17 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
return "", fmt.Errorf("stat bucket: %w", err)
}
if tagsStr != "" {
tagParts := strings.Split(tagsStr, "&")
for _, prt := range tagParts {
p := strings.Split(prt, "=")
if len(p) != 2 {
return "", s3err.GetAPIError(s3err.ErrInvalidTag)
}
tags[p[0]] = p[1]
}
}
name := filepath.Join(*po.Bucket, *po.Key)
if strings.HasSuffix(*po.Key, "/") {
@@ -788,13 +946,20 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
err = f.link()
if err != nil {
return "", fmt.Errorf("link object in namespace: %w", err)
return "", s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
}
for k, v := range po.Metadata {
xattr.Set(name, "user."+k, []byte(v))
}
if tagsStr != "" {
err := p.SetTags(ctx, *po.Bucket, *po.Key, tags)
if err != nil {
return "", err
}
}
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum[:])
xattr.Set(name, etagkey, []byte(etag))
@@ -802,7 +967,10 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
return etag, nil
}
func (p *Posix) DeleteObject(bucket, object string) error {
func (p *Posix) DeleteObject(_ context.Context, input *s3.DeleteObjectInput) error {
bucket := *input.Bucket
object := *input.Key
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -811,7 +979,7 @@ func (p *Posix) DeleteObject(bucket, object string) error {
return fmt.Errorf("stat bucket: %w", err)
}
os.Remove(filepath.Join(bucket, object))
err = os.Remove(filepath.Join(bucket, object))
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchKey)
}
@@ -856,19 +1024,47 @@ func (p *Posix) removeParents(bucket, object string) error {
return nil
}
func (p *Posix) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
func (p *Posix) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
// delete object already checks bucket
for _, obj := range objects.Delete.Objects {
err := p.DeleteObject(bucket, *obj.Key)
if err != nil {
return err
delResult, errs := []types.DeletedObject{}, []types.Error{}
for _, obj := range input.Delete.Objects {
//TODO: Make the delete operation concurrent
err := p.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: input.Bucket,
Key: obj.Key,
})
if err == nil {
delResult = append(delResult, types.DeletedObject{Key: obj.Key})
} else {
serr, ok := err.(s3err.APIError)
if ok {
errs = append(errs, types.Error{
Key: obj.Key,
Code: &serr.Code,
Message: &serr.Description,
})
} else {
errs = append(errs, types.Error{
Key: obj.Key,
Code: getStringPtr("InternalError"),
Message: getStringPtr(err.Error()),
})
}
}
}
return nil
return s3response.DeleteObjectsResult{
Deleted: delResult,
Errors: errs,
}, nil
}
func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
bucket := *input.Bucket
object := *input.Key
acceptRange := *input.Range
var contentRange string
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -891,11 +1087,18 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
return nil, err
}
if startOffset+length > fi.Size() {
// TODO: is ErrInvalidRequest correct here?
if length == -1 {
length = fi.Size() - startOffset + 1
}
if startOffset+length > fi.Size()+1 {
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
if acceptRange != "" {
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, fi.Size())
}
f, err := os.Open(objPath)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
@@ -935,10 +1138,14 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
LastModified: backend.GetTimePtr(fi.ModTime()),
Metadata: userMetaData,
TagCount: int32(len(tags)),
ContentRange: &contentRange,
}, nil
}
func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
bucket := *input.Bucket
object := *input.Key
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -975,7 +1182,14 @@ func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
}, nil
}
func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
srcBucket, srcObject, ok := strings.Cut(*input.CopySource, "/")
if !ok {
return nil, s3err.GetAPIError(s3err.ErrInvalidCopySource)
}
dstBucket := *input.Bucket
dstObject := *input.Key
_, err := os.Stat(srcBucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -984,7 +1198,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
return nil, fmt.Errorf("stat bucket: %w", err)
}
_, err = os.Stat(DstBucket)
_, err = os.Stat(dstBucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
@@ -1002,12 +1216,17 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
}
defer f.Close()
etag, err := p.PutObject(&s3.PutObjectInput{Bucket: &DstBucket, Key: &dstObject, Body: f})
fInfo, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("stat object: %w", err)
}
etag, err := p.PutObject(ctx, &s3.PutObjectInput{Bucket: &dstBucket, Key: &dstObject, Body: f, ContentLength: fInfo.Size()})
if err != nil {
return nil, err
}
fi, err := os.Stat(filepath.Join(DstBucket, dstObject))
fi, err := os.Stat(filepath.Join(dstBucket, dstObject))
if err != nil {
return nil, fmt.Errorf("stat dst object: %w", err)
}
@@ -1020,7 +1239,13 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
}, nil
}
func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
bucket := *input.Bucket
prefix := *input.Prefix
marker := *input.Marker
delim := *input.Delimiter
maxkeys := input.MaxKeys
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -1042,7 +1267,7 @@ func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (
Delimiter: &delim,
IsTruncated: results.Truncated,
Marker: &marker,
MaxKeys: int32(maxkeys),
MaxKeys: maxkeys,
Name: &bucket,
NextMarker: &results.NextMarker,
Prefix: &prefix,
@@ -1110,7 +1335,13 @@ func fileToObj(bucket string) backend.GetObjFunc {
}
}
func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
bucket := *input.Bucket
prefix := *input.Prefix
marker := *input.ContinuationToken
delim := *input.Delimiter
maxkeys := input.MaxKeys
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -1120,7 +1351,7 @@ func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int)
}
fileSystem := os.DirFS(bucket)
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
fileToObj(bucket), []string{metaTmpDir})
if err != nil {
return nil, fmt.Errorf("walk %v: %w", bucket, err)
@@ -1136,10 +1367,11 @@ func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int)
Name: &bucket,
NextContinuationToken: &results.NextMarker,
Prefix: &prefix,
KeyCount: int32(len(results.Objects)),
}, nil
}
func (p *Posix) PutBucketAcl(bucket string, data []byte) error {
func (p *Posix) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -1155,8 +1387,8 @@ func (p *Posix) PutBucketAcl(bucket string, data []byte) error {
return nil
}
func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
_, err := os.Stat(bucket)
func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
_, err := os.Stat(*input.Bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
@@ -1164,7 +1396,7 @@ func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
return nil, fmt.Errorf("stat bucket: %w", err)
}
b, err := xattr.Get(bucket, aclkey)
b, err := xattr.Get(*input.Bucket, aclkey)
if isNoAttr(err) {
return []byte{}, nil
}
@@ -1174,7 +1406,7 @@ func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
return b, nil
}
func (p *Posix) GetTags(bucket, object string) (map[string]string, error) {
func (p *Posix) GetTags(_ context.Context, bucket, object string) (map[string]string, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -1207,7 +1439,7 @@ func (p *Posix) getXattrTags(bucket, object string) (map[string]string, error) {
return tags, nil
}
func (p *Posix) SetTags(bucket, object string, tags map[string]string) error {
func (p *Posix) SetTags(_ context.Context, bucket, object string, tags map[string]string) error {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -1243,8 +1475,8 @@ func (p *Posix) SetTags(bucket, object string, tags map[string]string) error {
return nil
}
func (p *Posix) RemoveTags(bucket, object string) error {
return p.SetTags(bucket, object, nil)
func (p *Posix) RemoveTags(ctx context.Context, bucket, object string) error {
return p.SetTags(ctx, bucket, object, nil)
}
const (
@@ -1257,7 +1489,7 @@ func (p *Posix) InitIAM() error {
_, err := os.ReadFile(iamFile)
if errors.Is(err, fs.ErrNotExist) {
b, err := json.Marshal(auth.IAMConfig{})
b, err := json.Marshal(auth.IAMConfig{AccessAccounts: map[string]auth.Account{}})
if err != nil {
return fmt.Errorf("marshal default iam: %w", err)
}
@@ -1452,3 +1684,14 @@ func isNoAttr(err error) bool {
}
return false
}
func getString(str *string) string {
if str == nil {
return ""
}
return *str
}
func getStringPtr(str string) *string {
return &str
}

View File

@@ -15,6 +15,7 @@
package scoutfs
import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
@@ -114,7 +115,12 @@ func (*ScoutFS) String() string {
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
// ioctl to not have to read and copy the part data to the final object. This
// saves a read and write cycle for all mutlipart uploads.
func (s *ScoutFS) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
func (s *ScoutFS) CompleteMultipartUpload(_ context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
bucket := *input.Bucket
object := *input.Key
uploadID := *input.UploadId
parts := input.MultipartUpload.Parts
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -160,7 +166,9 @@ func (s *ScoutFS) CompleteMultipartUpload(bucket, object, uploadID string, parts
if err != nil {
etag = ""
}
parts[i].ETag = &etag
if etag != *parts[i].ETag {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
}
// use totalsize=0 because we wont be writing to the file, only moving
@@ -347,7 +355,10 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
return nil
}
func (s *ScoutFS) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
bucket := *input.Bucket
object := *input.Key
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -417,7 +428,11 @@ func (s *ScoutFS) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error
}, nil
}
func (s *ScoutFS) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
bucket := *input.Bucket
object := *input.Key
acceptRange := *input.Range
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -440,8 +455,11 @@ func (s *ScoutFS) GetObject(bucket, object, acceptRange string, writer io.Writer
return nil, err
}
if length == -1 {
length = fi.Size() - startOffset + 1
}
if startOffset+length > fi.Size() {
// TODO: is ErrInvalidRequest correct here?
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
@@ -524,7 +542,13 @@ func (s *ScoutFS) getXattrTags(bucket, object string) (map[string]string, error)
return tags, nil
}
func (s *ScoutFS) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
bucket := *input.Bucket
prefix := *input.Prefix
marker := *input.Marker
delim := *input.Delimiter
maxkeys := input.MaxKeys
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -546,14 +570,20 @@ func (s *ScoutFS) ListObjects(bucket, prefix, marker, delim string, maxkeys int)
Delimiter: &delim,
IsTruncated: results.Truncated,
Marker: &marker,
MaxKeys: int32(maxkeys),
MaxKeys: maxkeys,
Name: &bucket,
NextMarker: &results.NextMarker,
Prefix: &prefix,
}, nil
}
func (s *ScoutFS) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
func (s *ScoutFS) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
bucket := *input.Bucket
prefix := *input.Prefix
marker := *input.ContinuationToken
delim := *input.Delimiter
maxkeys := input.MaxKeys
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
@@ -563,7 +593,7 @@ func (s *ScoutFS) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys in
}
fileSystem := os.DirFS(bucket)
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
s.fileToObj(bucket), []string{metaTmpDir})
if err != nil {
return nil, fmt.Errorf("walk %v: %w", bucket, err)
@@ -660,7 +690,10 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
// RestoreObject will set stage request on file if offline and do nothing if
// file is online
func (s *ScoutFS) RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error {
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
bucket := *input.Bucket
object := *input.Key
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)

View File

@@ -38,7 +38,7 @@ var ErrSkipObj = errors.New("skip this object")
// Walk walks the supplied fs.FS and returns results compatible with list
// objects responses
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
cpmap := make(map[string]struct{})
var objects []types.Object
@@ -129,7 +129,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
}
objects = append(objects, obj)
if max > 0 && (len(objects)+len(cpmap)) == max {
if max > 0 && (len(objects)+len(cpmap)) == int(max) {
pastMax = true
}
@@ -168,7 +168,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
return fmt.Errorf("file to object %q: %w", path, err)
}
objects = append(objects, obj)
if (len(objects) + len(cpmap)) == max {
if (len(objects) + len(cpmap)) == int(max) {
pastMax = true
}
return nil
@@ -178,7 +178,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
// These are abstractly a "directory", so need to include the
// delimiter at the end.
cpmap[prefix+before+delimiter] = struct{}{}
if (len(objects) + len(cpmap)) == max {
if (len(objects) + len(cpmap)) == int(max) {
pastMax = true
}

View File

@@ -30,7 +30,6 @@ import (
var (
adminAccess string
adminSecret string
adminRegion string
)
func adminCommand() *cli.Command {
@@ -65,12 +64,6 @@ func adminCommand() *cli.Command {
Required: true,
Aliases: []string{"r"},
},
&cli.StringFlag{
Name: "region",
Usage: "s3 region string for the user",
Value: "us-east-1",
Aliases: []string{"rg"},
},
},
},
{
@@ -90,40 +83,33 @@ func adminCommand() *cli.Command {
Flags: []cli.Flag{
// TODO: create a configuration file for this
&cli.StringFlag{
Name: "adminAccess",
Name: "access",
Usage: "admin access account",
EnvVars: []string{"ADMIN_ACCESS_KEY_ID", "ADMIN_ACCESS_KEY"},
Aliases: []string{"aa"},
Aliases: []string{"a"},
Destination: &adminAccess,
},
&cli.StringFlag{
Name: "adminSecret",
Name: "secret",
Usage: "admin secret access key",
EnvVars: []string{"ADMIN_SECRET_ACCESS_KEY", "ADMIN_SECRET_KEY"},
Aliases: []string{"as"},
Aliases: []string{"s"},
Destination: &adminSecret,
},
&cli.StringFlag{
Name: "adminRegion",
Usage: "s3 region string",
Value: "us-east-1",
Destination: &adminRegion,
Aliases: []string{"ar"},
},
},
}
}
func createUser(ctx *cli.Context) error {
access, secret, role, region := ctx.String("access"), ctx.String("secret"), ctx.String("role"), ctx.String("region")
if access == "" || secret == "" || region == "" {
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
if access == "" || secret == "" {
return fmt.Errorf("invalid input parameters for the new user")
}
if role != "admin" && role != "user" {
return fmt.Errorf("invalid input parameter for role")
}
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://localhost:7070/create-user?access=%v&secret=%v&role=%v&region=%v", access, secret, role, region), nil)
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("http://localhost:7070/create-user?access=%v&secret=%v&role=%v", access, secret, role), nil)
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
@@ -135,7 +121,7 @@ func createUser(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -152,7 +138,7 @@ func createUser(ctx *cli.Context) error {
return err
}
fmt.Printf("%s", body)
fmt.Printf("%s\n", body)
return nil
}
@@ -163,7 +149,7 @@ func deleteUser(ctx *cli.Context) error {
return fmt.Errorf("invalid input parameter for the new user")
}
req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("http://localhost:7070/delete-user?access=%v", access), nil)
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("http://localhost:7070/delete-user?access=%v", access), nil)
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
@@ -175,7 +161,7 @@ func deleteUser(ctx *cli.Context) error {
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
@@ -192,7 +178,7 @@ func deleteUser(ctx *cli.Context) error {
return err
}
fmt.Printf("%s", body)
fmt.Printf("%s\n", body)
return nil
}

View File

@@ -15,6 +15,7 @@
package main
import (
"context"
"crypto/tls"
"fmt"
"log"
@@ -26,15 +27,21 @@ import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api"
"github.com/versity/versitygw/s3api/middlewares"
"github.com/versity/versitygw/s3event"
"github.com/versity/versitygw/s3log"
)
var (
port string
rootUserAccess string
rootUserSecret string
region string
certFile, keyFile string
debug bool
port string
rootUserAccess string
rootUserSecret string
region string
certFile, keyFile string
kafkaURL, kafkaTopic, kafkaKey string
natsURL, natsTopic string
logWebhookURL string
accessLog string
debug bool
)
var (
@@ -47,6 +54,8 @@ var (
)
func main() {
setupSignalHandler()
app := initApp()
app.Commands = []*cli.Command{
@@ -56,7 +65,14 @@ func main() {
testCommand(),
}
if err := app.Run(os.Args); err != nil {
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-sigDone
fmt.Fprintf(os.Stderr, "terminating signal caught, shutting down\n")
cancel()
}()
if err := app.RunContext(ctx, os.Args); err != nil {
log.Fatal(err)
}
}
@@ -131,10 +147,52 @@ func initFlags() []cli.Flag {
Usage: "enable debug output",
Destination: &debug,
},
&cli.StringFlag{
Name: "access-log",
Usage: "enable server access logging to specified file",
EnvVars: []string{"LOGFILE"},
Destination: &accessLog,
},
&cli.StringFlag{
Name: "log-webhook-url",
Usage: "webhook url to send the audit logs",
EnvVars: []string{"WEBHOOK"},
Destination: &logWebhookURL,
},
&cli.StringFlag{
Name: "event-kafka-url",
Usage: "kafka server url to send the bucket notifications.",
Destination: &kafkaURL,
Aliases: []string{"eku"},
},
&cli.StringFlag{
Name: "event-kafka-topic",
Usage: "kafka server pub-sub topic to send the bucket notifications to",
Destination: &kafkaTopic,
Aliases: []string{"ekt"},
},
&cli.StringFlag{
Name: "event-kafka-key",
Usage: "kafka server put-sub topic key to send the bucket notifications to",
Destination: &kafkaKey,
Aliases: []string{"ekk"},
},
&cli.StringFlag{
Name: "event-nats-url",
Usage: "nats server url to send the bucket notifications",
Destination: &natsURL,
Aliases: []string{"enu"},
},
&cli.StringFlag{
Name: "event-nats-topic",
Usage: "nats server pub-sub topic to send the bucket notifications to",
Destination: &natsTopic,
Aliases: []string{"ent"},
},
}
}
func runGateway(be backend.Backend, s auth.Storer) error {
func runGateway(ctx *cli.Context, be backend.Backend, s auth.Storer) error {
app := fiber.New(fiber.Config{
AppName: "versitygw",
ServerHeader: "VERSITYGW",
@@ -172,13 +230,62 @@ func runGateway(be backend.Backend, s auth.Storer) error {
return fmt.Errorf("setup internal iam service: %w", err)
}
logger, err := s3log.InitLogger(&s3log.LogConfig{
LogFile: accessLog,
WebhookURL: logWebhookURL,
})
if err != nil {
return fmt.Errorf("setup logger: %w", err)
}
evSender, err := s3event.InitEventSender(&s3event.EventConfig{
KafkaURL: kafkaURL,
KafkaTopic: kafkaTopic,
KafkaTopicKey: kafkaKey,
NatsURL: natsURL,
NatsTopic: natsTopic,
})
if err != nil {
return fmt.Errorf("unable to connect to the message broker: %w", err)
}
srv, err := s3api.New(app, be, middlewares.RootUserConfig{
Access: rootUserAccess,
Secret: rootUserSecret,
}, port, region, iam, opts...)
}, port, region, iam, logger, evSender, opts...)
if err != nil {
return fmt.Errorf("init gateway: %v", err)
}
return srv.Serve()
c := make(chan error, 1)
go func() { c <- srv.Serve() }()
// for/select blocks until shutdown
Loop:
for {
select {
case <-ctx.Done():
err = ctx.Err()
break Loop
case err = <-c:
break Loop
case <-sigHup:
if logger != nil {
err = logger.HangUp()
if err != nil {
err = fmt.Errorf("HUP logger: %w", err)
break Loop
}
}
}
}
be.Shutdown()
if logger != nil {
lerr := logger.Shutdown()
if lerr != nil {
fmt.Fprintf(os.Stderr, "shutdown logger: %v\n", lerr)
}
}
return err
}

View File

@@ -49,5 +49,5 @@ func runPosix(ctx *cli.Context) error {
return fmt.Errorf("init posix: %v", err)
}
return runGateway(be, be)
return runGateway(ctx, be, be)
}

View File

@@ -69,5 +69,5 @@ func runScoutfs(ctx *cli.Context) error {
return fmt.Errorf("init scoutfs: %v", err)
}
return runGateway(be, be)
return runGateway(ctx, be, be)
}

44
cmd/versitygw/signal.go Normal file
View File

@@ -0,0 +1,44 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
)
var (
sigDone = make(chan bool, 1)
sigHup = make(chan bool, 1)
)
func setupSignalHandler() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
go func() {
for sig := range sigs {
fmt.Fprintf(os.Stderr, "caught signal %v\n", sig)
switch sig {
case syscall.SIGINT, syscall.SIGTERM:
sigDone <- true
case syscall.SIGHUP:
sigHup <- true
}
}
}()
}

View File

@@ -8,9 +8,19 @@ import (
)
var (
awsID string
awsSecret string
endpoint string
awsID string
awsSecret string
endpoint string
prefix string
dstBucket string
partSize int64
objSize int64
concurrency int
files int
upload bool
download bool
pathStyle bool
checksumDisable bool
)
func testCommand() *cli.Command {
@@ -58,17 +68,20 @@ func initTestFlags() []cli.Flag {
func initTestCommands() []*cli.Command {
return []*cli.Command{
{
Name: "make-bucket",
Usage: "Test bucket creation.",
Name: "bucket-actions",
Usage: "Test bucket creation, checking the existence, deletes it.",
Description: `Calls s3 gateway create-bucket action to create a new bucket,
then calls delete-bucket action to delete the bucket.`,
calls head-bucket action to check the existence, then calls delete-bucket action to delete the bucket.`,
Action: getAction(integration.TestMakeBucket),
},
{
Name: "put-get-object",
Usage: "Test put & get object.",
Name: "object-actions",
Usage: "Test put/get/delete/copy objects.",
Description: `Creates a bucket with s3 gateway action, puts an object in it,
gets the object from the bucket, deletes both the object and bucket.`,
tries to copy into another bucket, that doesn't exist, creates the destination bucket for copying,
copies the object, get's the object to check the length and content,
get's the copied object to check the length and content, deletes all the objects inside the source bucket,
deletes both the objects and buckets.`,
Action: getAction(integration.TestPutGetObject),
},
{
@@ -147,12 +160,127 @@ func initTestCommands() []*cli.Command {
removes both the object and bucket`,
Action: getAction(integration.TestInvalidMultiParts),
},
{
Name: "object-tag-actions",
Usage: "Tests get/put/delete object tag actions.",
Description: `Creates a bucket with s3 gateway action, puts an object in it,
puts some tags for the object, gets the tags, compares the results, removes the tags,
gets the tags again, checks it to be empty, then removes both the object and bucket`,
Action: getAction(integration.TestPutGetRemoveTags),
},
{
Name: "bucket-acl-actions",
Usage: "Tests put/get bucket actions.",
Description: `Creates a bucket with s3 gateway action, puts some bucket acls
gets the acl, verifies it, then removes the bucket`,
Action: getAction(integration.TestAclActions),
},
{
Name: "full-flow",
Usage: "Tests the full flow of gateway.",
Description: `Runs all the available tests to test the full flow of the gateway.`,
Action: getAction(integration.TestFullFlow),
},
{
Name: "bench",
Usage: "Runs download/upload performance test on the gateway",
Description: `Uploads/downloads some number(specified by flags) of files with some capacity(bytes).
Logs the results to the console`,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "files",
Usage: "Number of objects to read/write",
Value: 1,
Destination: &files,
},
&cli.Int64Flag{
Name: "objsize",
Usage: "Uploading object size",
Value: 0,
Destination: &objSize,
},
&cli.StringFlag{
Name: "prefix",
Usage: "Object name prefix",
Destination: &prefix,
},
&cli.BoolFlag{
Name: "upload",
Usage: "Upload data to the gateway",
Value: false,
Destination: &upload,
},
&cli.BoolFlag{
Name: "download",
Usage: "Download data to the gateway",
Value: false,
Destination: &download,
},
&cli.StringFlag{
Name: "bucket",
Usage: "Destination bucket name to read/write data",
Destination: &dstBucket,
},
&cli.Int64Flag{
Name: "partSize",
Usage: "Upload/download size per thread",
Value: 64 * 1024 * 1024,
Destination: &partSize,
},
&cli.IntFlag{
Name: "concurrency",
Usage: "Upload/download threads per object",
Value: 1,
Destination: &concurrency,
},
&cli.BoolFlag{
Name: "pathStyle",
Usage: "Use Pathstyle bucket addressing",
Value: false,
Destination: &pathStyle,
},
&cli.BoolFlag{
Name: "checksumDis",
Usage: "Disable server checksum",
Value: false,
Destination: &checksumDisable,
},
},
Action: func(ctx *cli.Context) error {
if upload && download {
return fmt.Errorf("must only specify one of upload or download")
}
if !upload && !download {
return fmt.Errorf("must specify one of upload or download")
}
if dstBucket == "" {
return fmt.Errorf("must specify bucket")
}
opts := []integration.Option{
integration.WithAccess(awsID),
integration.WithSecret(awsSecret),
integration.WithRegion(region),
integration.WithEndpoint(endpoint),
integration.WithConcurrency(concurrency),
integration.WithPartSize(partSize),
}
if debug {
opts = append(opts, integration.WithDebug())
}
if pathStyle {
opts = append(opts, integration.WithPathStyle())
}
if checksumDisable {
opts = append(opts, integration.WithDisableChecksum())
}
s3conf := integration.NewS3Conf(opts...)
return integration.TestPerformance(s3conf, upload, download, files, objSize, dstBucket, prefix)
},
},
}
}
@@ -175,6 +303,9 @@ func getAction(tf testFunc) func(*cli.Context) error {
fmt.Println()
fmt.Println("RAN:", integration.RunCount, "PASS:", integration.PassCount, "FAIL:", integration.FailCount)
if integration.FailCount > 0 {
return fmt.Errorf("test failed with %v errors", integration.FailCount)
}
return nil
}
}

64
go.mod
View File

@@ -3,51 +3,57 @@ module github.com/versity/versitygw
go 1.20
require (
github.com/aws/aws-sdk-go-v2 v1.18.1
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0
github.com/aws/smithy-go v1.13.5
github.com/gofiber/fiber/v2 v2.46.0
github.com/aws/aws-sdk-go-v2 v1.20.0
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1
github.com/aws/smithy-go v1.14.0
github.com/gofiber/fiber/v2 v2.48.0
github.com/google/uuid v1.3.0
github.com/nats-io/nats.go v1.28.0
github.com/pkg/xattr v0.4.9
github.com/urfave/cli/v2 v2.25.6
github.com/valyala/fasthttp v1.47.0
github.com/segmentio/kafka-go v0.4.42
github.com/urfave/cli/v2 v2.25.7
github.com/valyala/fasthttp v1.48.0
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9
golang.org/x/sys v0.9.0
golang.org/x/sys v0.10.0
)
require (
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/nats-io/nats-server/v2 v2.9.20 // indirect
github.com/nats-io/nkeys v0.4.4 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/stretchr/testify v1.8.1 // indirect
golang.org/x/crypto v0.11.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
)
require (
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.27
github.com/aws/aws-sdk-go-v2/credentials v1.13.26
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.32
github.com/aws/aws-sdk-go-v2/credentials v1.13.31
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/klauspost/compress v1.16.6 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 // indirect
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee // indirect
github.com/tinylib/msgp v1.1.8 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect

187
go.sum
View File

@@ -1,144 +1,167 @@
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo=
github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.18.27 h1:Az9uLwmssTE6OGTpsFqOnaGpLnKDqNYOJzWuC6UAYzA=
github.com/aws/aws-sdk-go-v2/config v1.18.27/go.mod h1:0My+YgmkGxeqjXZb5BYme5pc4drjTnM+x1GJ3zv42Nw=
github.com/aws/aws-sdk-go-v2/credentials v1.13.26 h1:qmU+yhKmOCyujmuPY7tf5MxR/RKyZrOPO3V4DobiTUk=
github.com/aws/aws-sdk-go-v2/credentials v1.13.26/go.mod h1:GoXt2YC8jHUBbA4jr+W3JiemnIbkXOfxSXcisUsZ3os=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 h1:LxK/bitrAr4lnh9LnIS6i7zWbCOdMsfzKFBI6LUCS0I=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4/go.mod h1:E1hLXN/BL2e6YizK1zFlYd8vsfi2GTjbjBazinMmeaM=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70 h1:4bh28MeeXoBFTjb0JjQ5sVatzlf5xA1DziV8mZed9v4=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70/go.mod h1:9yI5NXzqy2yOiMytv6QLZHvlyHLwYxO9iIq+bZIbrFg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 h1:A5UqQEmPaCFpedKouS4v+dHCTUo2sKqhoKO9U5kxyWo=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 h1:srIVS45eQuewqz6fKKu6ZGXaq6FuFg5NzgQBAM6g8Y4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 h1:LWA+3kDM8ly001vJ1X1waCuLJdtTl48gwkPKWy9sosI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35/go.mod h1:0Eg1YjxE0Bhn56lx+SHJwCzhW+2JGtizsrx+lCqrfm0=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 h1:wscW+pnn3J1OYnanMnza5ZVYXLX4cKk5rAvUAl4Qu+c=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26/go.mod h1:MtYiox5gvyB+OyP0Mr0Sm/yzbEAIPL9eijj/ouHAPw0=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 h1:zZSLP3v3riMOP14H7b4XP0uyfREDQOYv2cqIrvTXDNQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29/go.mod h1:z7EjRjVwZ6pWcWdI2H64dKttvzaP99jRIj5hphW0M5U=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 h1:dBL3StFxHtpBzJJ/mNEsjXVgfO+7jR0dAIEwLqMapEA=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3/go.mod h1:f1QyiAsvIv4B49DmCqrhlXqyaR+0IxMmyX+1P+AnzOM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.34.1 h1:rYYwwsGqbwvGgQHjBkqgDt8MynXk+I8xgS0IEj5gOT0=
github.com/aws/aws-sdk-go-v2/service/s3 v1.34.1/go.mod h1:aVbf0sko/TsLWHx30c/uVu7c62+0EAJ3vbxaJga0xCw=
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0 h1:ya7fmrN2fE7s1P2gaPbNg5MTkERVWfsH8ToP1YC4Z9o=
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0/go.mod h1:aVbf0sko/TsLWHx30c/uVu7c62+0EAJ3vbxaJga0xCw=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 h1:nneMBM2p79PGWBQovYO/6Xnc2ryRMw3InnDJq1FHkSY=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12/go.mod h1:HuCOxYsF21eKrerARYO6HapNeh9GBNq7fius2AcwodY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 h1:2qTR7IFk7/0IN/adSFhYu9Xthr0zVFTgBrmPldILn80=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12/go.mod h1:E4VrHCPzmVB/KFXtqBGKb3c8zpbNBgKe3fisDNLAW5w=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 h1:XFJ2Z6sNUUcAz9poj+245DMkrHE4h2j5I9/xD50RHfE=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2/go.mod h1:dp0yLPsLBOi++WTxzCjA/oZqi6NPIhoR+uF7GeMU9eg=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aws/aws-sdk-go-v2 v1.20.0 h1:INUDpYLt4oiPOJl0XwZDK2OVAVf0Rzo+MGVTv9f+gy8=
github.com/aws/aws-sdk-go-v2 v1.20.0/go.mod h1:uWOr0m0jDsiWw8nnXiqZ+YG6LdvAlGYDLLf2NmHZoy4=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11 h1:/MS8AzqYNAhhRNalOmxUvYs8VEbNGifTnzhPFdcRQkQ=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11/go.mod h1:va22++AdXht4ccO3kH2SHkHHYvZ2G9Utz+CXKmm2CaU=
github.com/aws/aws-sdk-go-v2/config v1.18.32 h1:tqEOvkbTxwEV7hToRcJ1xZRjcATqwDVsWbAscgRKyNI=
github.com/aws/aws-sdk-go-v2/config v1.18.32/go.mod h1:U3ZF0fQRRA4gnbn9GGvOWLoT2EzzZfAWeKwnVrm1rDc=
github.com/aws/aws-sdk-go-v2/credentials v1.13.31 h1:vJyON3lG7R8VOErpJJBclBADiWTwzcwdkQpTKx8D2sk=
github.com/aws/aws-sdk-go-v2/credentials v1.13.31/go.mod h1:T4sESjBtY2lNxLgkIASmeP57b5j7hTQqCbqG0tWnxC4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7 h1:X3H6+SU21x+76LRglk21dFRgMTJMa5QcpW+SqUf5BBg=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7/go.mod h1:3we0V09SwcJBzNlnyovrR2wWJhWmVdqAsmVs4uronv8=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76 h1:DJ1kHj0GI9BbX+XhF0kHxlzOVjcncmDUXmCvXdbfdAE=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76/go.mod h1:/AZCdswMSgwpB2yMSFfY5H4pVeBLnCuPehdmO/r3xSM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37 h1:zr/gxAZkMcvP71ZhQOcvdm8ReLjFgIXnIn0fw5AM7mo=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37/go.mod h1:Pdn4j43v49Kk6+82spO3Tu5gSeQXRsxo56ePPQAvFiA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31 h1:0HCMIkAkVY9KMgueD8tf4bRTUanzEYvhw7KkPXIMpO0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31/go.mod h1:fTJDMe8LOFYtqiFFFeHA+SVMAwqLhoq0kcInYoLa9Js=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38 h1:+i1DOFrW3YZ3apE45tCal9+aDKK6kNEbW6Ib7e1nFxE=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38/go.mod h1:1/jLp0OgOaWIetycOmycW+vYTYgTZFPttJQRgsI1PoU=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0 h1:U5yySdwt2HPo/pnQec04DImLzWORbeWML1fJiLkKruI=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0/go.mod h1:EhC/83j8/hL/UB1WmExo3gkElaja/KlmZM/gl1rTfjM=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12 h1:uAiiHnWihGP2rVp64fHwzLDrswGjEjsPszwRYMiYQPU=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12/go.mod h1:fUTHpOXqRQpXvEpDPSa3zxCc2fnpW6YnBoba+eQr+Bg=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32 h1:kvN1jPHr9UffqqG3bSgZ8tx4+1zKVHz/Ktw/BwW6hX8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32/go.mod h1:QmMEM7es84EUkbYWcpnkx8i5EW2uERPfrTFeOch128Y=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31 h1:auGDJ0aLZahF5SPvkJ6WcUuX7iQ7kyl2MamV7Tm8QBk=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31/go.mod h1:3+lloe3sZuBQw1aBc5MyndvodzQlyqCZ7x1QPDHaWP4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0 h1:Wgjft9X4W5pMeuqgPCHIQtbZ87wsgom7S5F8obreg+c=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0/go.mod h1:FWNzS4+zcWAP05IF7TDYTY1ysZAzIvogxWaDT9p8fsA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1 h1:mTgFVlfQT8gikc5+/HwD8UL9jnUro5MGv8n/VEYF12I=
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1/go.mod h1:6SOWLiobcZZshbmECRTADIRYliPL0etqFSigauQEeT0=
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1 h1:DSNpSbfEgFXRV+IfEcKE5kTbqxm+MeF5WgyeRlsLnHY=
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1/go.mod h1:TC9BubuFMVScIU+TLKamO6VZiYTkYoEHqlSQwAe2omw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1 h1:hd0SKLMdOL/Sl6Z0np1PX9LeH2gqNtBe0MhTedA8MGI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1/go.mod h1:XO/VcyoQ8nKyKfFW/3DMsRQXsfh/052tHTWmg3xBXRg=
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1 h1:pAOJj+80tC8sPVgSDHzMYD6KLWsaLQ1kZw31PTeORbs=
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1/go.mod h1:G8SbvL0rFk4WOJroU8tKBczhsbhj2p/YY7qeJezJ3CI=
github.com/aws/smithy-go v1.14.0 h1:+X90sB94fizKjDmwb4vyl2cTTPXTE5E2G/1mjByb0io=
github.com/aws/smithy-go v1.14.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gofiber/fiber/v2 v2.46.0 h1:wkkWotblsGVlLjXj2dpgKQAYHtXumsK/HyFugQM68Ns=
github.com/gofiber/fiber/v2 v2.46.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gofiber/fiber/v2 v2.48.0 h1:cRVMCb9aUJDsyHxGFLwz/sGzDggdailZZyptU9F9cU0=
github.com/gofiber/fiber/v2 v2.48.0/go.mod h1:xqJgfqrc23FJuqGOW6DVgi3HyZEm2Mn9pRqUb2kHSX8=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/nats-io/jwt/v2 v2.4.1 h1:Y35W1dgbbz2SQUYDPCaclXcuqleVmpbRa7646Jf2EX4=
github.com/nats-io/nats-server/v2 v2.9.20 h1:bt1dW6xsL1hWWwv7Hovm+EJt5L6iplyqlgEFkoEUk0k=
github.com/nats-io/nats-server/v2 v2.9.20/go.mod h1:aTb/xtLCGKhfTFLxP591CMWfkdgBmcUUSkiSOe5A3gw=
github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c=
github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc=
github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA=
github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8=
github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4=
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee h1:8Iv5m6xEo1NR1AvpV+7XmhI4r39LGNzwUL4YpMuL5vk=
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee/go.mod h1:qwtSXrKuJh/zsFQ12yEE89xfCrGKK63Rr7ctU/uCo4g=
github.com/segmentio/kafka-go v0.4.42 h1:qffhBZCz4WcWyNuHEclHjIMLs2slp6mZO8px+5W5tfU=
github.com/segmentio/kafka-go v0.4.42/go.mod h1:d0g15xPMqoUookug0OU75DhGZxXwCFxSLeJ4uphwJzg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
github.com/urfave/cli/v2 v2.25.6 h1:yuSkgDSZfH3L1CjF2/5fNNg2KbM47pY2EvjBq4ESQnU=
github.com/urfave/cli/v2 v2.25.6/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.47.0 h1:y7moDoxYzMooFpT5aHgNgVOQDrS3qlkfiP9mDtGGK9c=
github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
github.com/valyala/fasthttp v1.48.0 h1:oJWvHb9BIZToTQS3MuQ2R3bJZiNSa2KiNdeI8A+79Tc=
github.com/valyala/fasthttp v1.48.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9 h1:ZfmQR01Kk6/kQh6+zlqfBYszVY02fzf9xYrchOY4NFM=
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -38,6 +38,26 @@ func (r *RReader) Sum() []byte {
return r.hash.Sum(nil)
}
type ZReader struct {
buf []byte
dataleft int
}
func NewZeroReader(totalsize, bufsize int) *ZReader {
b := make([]byte, bufsize)
return &ZReader{buf: b, dataleft: totalsize}
}
func (r *ZReader) Read(p []byte) (int, error) {
n := min(len(p), len(r.buf), r.dataleft)
r.dataleft -= n
err := error(nil)
if n == 0 {
err = io.EOF
}
return copy(p, r.buf[:n]), err
}
func min(values ...int) int {
if len(values) == 0 {
return 0
@@ -52,3 +72,13 @@ func min(values ...int) int {
return min
}
type NW struct{}
func NewNullWriter() NW {
return NW{}
}
func (NW) WriteAt(p []byte, off int64) (n int, err error) {
return len(p), nil
}

View File

@@ -2,6 +2,7 @@ package integration
import (
"context"
"io"
"log"
"net/http"
"os"
@@ -10,6 +11,8 @@ import (
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/smithy-go/middleware"
)
@@ -26,10 +29,7 @@ type S3Conf struct {
}
func NewS3Conf(opts ...Option) *S3Conf {
s := &S3Conf{
PartSize: 64 * 1024 * 1024, // 64B default chunksize
Concurrency: 1, // 1 default concurrency
}
s := &S3Conf{}
for _, opt := range opts {
opt(s)
@@ -123,3 +123,31 @@ func (c *S3Conf) Config() aws.Config {
return cfg
}
func (c *S3Conf) UploadData(r io.Reader, bucket, object string) error {
uploader := manager.NewUploader(s3.NewFromConfig(c.Config()))
uploader.PartSize = c.PartSize
uploader.Concurrency = c.Concurrency
upinfo := &s3.PutObjectInput{
Body: r,
Bucket: &bucket,
Key: &object,
}
_, err := uploader.Upload(context.Background(), upinfo)
return err
}
func (c *S3Conf) DownloadData(w io.WriterAt, bucket, object string) (int64, error) {
downloader := manager.NewDownloader(s3.NewFromConfig(c.Config()))
downloader.PartSize = c.PartSize
downloader.Concurrency = c.Concurrency
downinfo := &s3.GetObjectInput{
Bucket: &bucket,
Key: &object,
}
return downloader.Download(context.Background(), w, downinfo)
}

View File

@@ -7,11 +7,12 @@ import (
"crypto/sha256"
"fmt"
"io"
"math"
"os"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
@@ -20,80 +21,42 @@ var (
shortTimeout = 10 * time.Second
)
func setup(s *S3Conf, bucket string) error {
s3client := s3.NewFromConfig(s.Config())
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.CreateBucket(ctx, &s3.CreateBucketInput{
Bucket: &bucket,
})
cancel()
return err
}
func teardown(s *S3Conf, bucket string) error {
s3client := s3.NewFromConfig(s.Config())
deleteObject := func(bucket, key, versionId *string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: bucket,
Key: key,
VersionId: versionId,
})
cancel()
if err != nil {
return fmt.Errorf("failed to delete object %v: %v", *key, err)
}
return nil
}
in := &s3.ListObjectsV2Input{Bucket: &bucket}
for {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.ListObjectsV2(ctx, in)
cancel()
if err != nil {
return fmt.Errorf("failed to list objects: %v", err)
}
for _, item := range out.Contents {
err = deleteObject(&bucket, item.Key, nil)
if err != nil {
return err
}
}
if out.IsTruncated {
in.ContinuationToken = out.ContinuationToken
} else {
break
}
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.DeleteBucket(ctx, &s3.DeleteBucketInput{
Bucket: &bucket,
})
cancel()
return err
}
func TestMakeBucket(s *S3Conf) {
testname := "test make bucket"
testname := "test make/head/delete bucket"
runF(testname)
s3client := s3.NewFromConfig(s.Config())
invBucket := "aa"
err := setup(s, invBucket)
if err == nil {
failF("%v: expected bucket name validation error", testname)
return
}
bucket := "testbucket"
err := setup(s, bucket)
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.HeadBucket(ctx, &s3.HeadBucketInput{Bucket: &bucket})
cancel()
if err == nil {
failF("%v: expected error, instead got success response", testname)
return
}
err = setup(s, bucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
passF(testname)
testname = "test delete empty bucket"
runF(testname)
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.HeadBucket(ctx, &s3.HeadBucketInput{Bucket: &bucket})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
err = teardown(s, bucket)
if err != nil {
@@ -104,10 +67,16 @@ func TestMakeBucket(s *S3Conf) {
}
func TestPutGetObject(s *S3Conf) {
testname := "test put/get object"
testname := "test put/get/delete/copy objects"
runF(testname)
bucket := "testbucket1"
dstBucket := "testdstbucket"
obj := "myobject"
obj2 := "myobject2"
copySource := bucket + "/" + obj
s3client := s3.NewFromConfig(s.Config())
err := setup(s, bucket)
if err != nil {
@@ -122,13 +91,22 @@ func TestPutGetObject(s *S3Conf) {
csum := sha256.Sum256(data)
r := bytes.NewReader(data)
name := "myobject"
s3client := s3.NewFromConfig(s.Config())
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.PutObject(ctx, &s3.PutObjectInput{
Bucket: &bucket,
Key: &name,
Key: &obj,
Body: r,
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.PutObject(ctx, &s3.PutObjectInput{
Bucket: &bucket,
Key: &obj2,
Body: r,
})
cancel()
@@ -140,7 +118,7 @@ func TestPutGetObject(s *S3Conf) {
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &name,
Key: &obj,
})
defer cancel()
if err != nil {
@@ -166,11 +144,102 @@ func TestPutGetObject(s *S3Conf) {
return
}
// Expected error: destination bucket doesn't exist
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{Bucket: &dstBucket, Key: &obj, CopySource: &copySource})
cancel()
if err == nil {
failF("%v: expect bucket not found error instead got success response", testname)
return
}
err = setup(s, dstBucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CopyObject(ctx, &s3.CopyObjectInput{Bucket: &dstBucket, Key: &obj, CopySource: &copySource})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
copyObjOut, err := s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &dstBucket,
Key: &obj,
})
defer cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
defer copyObjOut.Body.Close()
if copyObjOut.ContentLength != int64(datalen) {
failF("%v: content length got %v expected %v", testname, copyObjOut.ContentLength, datalen)
return
}
b, err = io.ReadAll(copyObjOut.Body)
if err != nil {
failF("%v: read body %v", testname, err)
return
}
copysum := sha256.Sum256(b)
if csum != copysum {
failF("%v: copied object checksum got %x expected %x", testname, copysum, csum)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.DeleteObjects(ctx, &s3.DeleteObjectsInput{Bucket: &bucket, Delete: &types.Delete{Objects: []types.ObjectIdentifier{{Key: &obj}, {Key: &obj2}}}})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
objCount := 0
in := &s3.ListObjectsV2Input{Bucket: &bucket}
for {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.ListObjectsV2(ctx, in)
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
objCount += len(out.Contents)
if out.IsTruncated {
in.ContinuationToken = out.ContinuationToken
} else {
break
}
}
if objCount != 0 {
failF("%v: expected object count %v instead got %v", testname, 2, objCount)
return
}
err = teardown(s, bucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
err = teardown(s, dstBucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
passF(testname)
}
@@ -193,7 +262,7 @@ func TestPutGetMPObject(s *S3Conf) {
dr := NewDataReader(datalen, 5*1024*1024)
WithPartSize(5 * 1024 * 1024)
s.PartSize = 5 * 1024 * 1024
err = uploadData(s, dr, bucket, name)
err = s.UploadData(dr, bucket, name)
if err != nil {
failF("%v: %v", testname, err)
return
@@ -244,35 +313,6 @@ func TestPutGetMPObject(s *S3Conf) {
passF(testname)
}
func isEqual(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i, d := range a {
if d != b[i] {
return false
}
}
return true
}
func uploadData(s *S3Conf, r io.Reader, bucket, object string) error {
uploader := manager.NewUploader(s3.NewFromConfig(s.Config()))
uploader.PartSize = s.PartSize
uploader.Concurrency = s.Concurrency
upinfo := &s3.PutObjectInput{
Body: r,
Bucket: &bucket,
Key: &object,
}
_, err := uploader.Upload(context.Background(), upinfo)
return err
}
func TestPutDirObject(s *S3Conf) {
testname := "test put directory object"
runF(testname)
@@ -394,6 +434,10 @@ func TestListObject(s *S3Conf) {
failF("object %v not found", obj2)
return
}
if out.KeyCount != 2 {
failF("%v: expected key count: %v, instead got: %v", testname, 2, out.KeyCount)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.DeleteObject(ctx, &s3.DeleteObjectInput{
@@ -448,16 +492,6 @@ func TestListObject(s *S3Conf) {
passF(testname)
}
func contains(name string, list []types.Object) bool {
for _, item := range list {
fmt.Println(*item.Key)
if strings.EqualFold(name, *item.Key) {
return true
}
}
return false
}
func TestListAbortMultiPartObject(s *S3Conf) {
testname := "list/abort multipart objects"
runF(testname)
@@ -542,15 +576,6 @@ func TestListAbortMultiPartObject(s *S3Conf) {
passF(testname)
}
func containsUID(name, id string, list []types.MultipartUpload) bool {
for _, item := range list {
if strings.EqualFold(name, *item.Key) && strings.EqualFold(id, *item.UploadId) {
return true
}
}
return false
}
func TestListMultiParts(s *S3Conf) {
testname := "list multipart parts"
runF(testname)
@@ -753,8 +778,45 @@ func TestIncorrectMultiParts(s *S3Conf) {
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
Bucket: &bucket,
Key: &obj,
UploadId: mpu.UploadId,
MultipartUpload: &types.CompletedMultipartUpload{
Parts: []types.CompletedPart{
{
ETag: mp2.ETag,
PartNumber: 96,
},
{
ETag: mp1.ETag,
PartNumber: 99,
},
},
},
})
cancel()
if err == nil {
failF("%v: complete multipart expected err", testname)
return
}
badEtag := "bogusEtagValue"
// Empty multipart upload
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
Bucket: &bucket,
Key: &obj,
UploadId: mpu.UploadId,
})
cancel()
if err == nil {
failF("%v: complete multipart expected err", testname)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
Bucket: &bucket,
@@ -768,7 +830,7 @@ func TestIncorrectMultiParts(s *S3Conf) {
},
{
ETag: &badEtag,
PartNumber: 99,
PartNumber: 42,
},
},
},
@@ -921,15 +983,6 @@ func TestIncompleteMultiParts(s *S3Conf) {
passF(testname)
}
func containsPart(part int32, list []types.Part) bool {
for _, item := range list {
if item.PartNumber == part {
return true
}
}
return false
}
func TestIncompletePutObject(s *S3Conf) {
testname := "test incomplete put object"
runF(testname)
@@ -1016,6 +1069,20 @@ func TestRangeGet(s *S3Conf) {
return
}
// Invalid range
invRange := "bytes=100-asd"
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &name,
Range: &invRange,
})
defer cancel()
if err == nil {
failF("%v: expected range error", testname)
return
}
rangeString := "bytes=100-200"
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
@@ -1031,6 +1098,14 @@ func TestRangeGet(s *S3Conf) {
}
defer out.Body.Close()
if getString(out.ContentRange) != fmt.Sprintf("bytes 100-200/%v", datalen) {
failF("%v: expected content range: %v, instead got: %v", testname, fmt.Sprintf("bytes 100-200/%v", datalen), getString(out.ContentRange))
return
}
if getString(out.AcceptRanges) != rangeString {
failF("%v: expected accept range: %v, instead got: %v", testname, rangeString, getString(out.AcceptRanges))
}
b, err := io.ReadAll(out.Body)
if err != nil {
failF("%v: read body %v", testname, err)
@@ -1038,7 +1113,34 @@ func TestRangeGet(s *S3Conf) {
}
// bytes range is inclusive, go range for second value is not
if !isSame(b, data[100:201]) {
if !isEqual(b, data[100:201]) {
failF("%v: data mismatch of range", testname)
return
}
rangeString = "bytes=100-"
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err = s3client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &bucket,
Key: &name,
Range: &rangeString,
})
defer cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
defer out.Body.Close()
b, err = io.ReadAll(out.Body)
if err != nil {
failF("%v: read body %v", testname, err)
return
}
// bytes range is inclusive, go range for second value is not
if !isEqual(b, data[100:]) {
failF("%v: data mismatch of range", testname)
return
}
@@ -1051,18 +1153,6 @@ func TestRangeGet(s *S3Conf) {
passF(testname)
}
func isSame(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i, x := range a {
if x != b[i] {
return false
}
}
return true
}
func TestInvalidMultiParts(s *S3Conf) {
testname := "invalid multipart parts"
runF(testname)
@@ -1148,6 +1238,288 @@ func TestInvalidMultiParts(s *S3Conf) {
passF(testname)
}
type prefResult struct {
elapsed time.Duration
size int64
err error
}
func TestPerformance(s *S3Conf, upload, download bool, files int, objectSize int64, bucket, prefix string) error {
var sg sync.WaitGroup
results := make([]prefResult, files)
start := time.Now()
if upload {
if objectSize == 0 {
return fmt.Errorf("must specify object size for upload")
}
if objectSize > (int64(10000) * s.PartSize) {
return fmt.Errorf("object size can not exceed 10000 * chunksize")
}
runF("performance test: upload/download objects")
for i := 0; i < files; i++ {
sg.Add(1)
go func(i int) {
var r io.Reader = NewDataReader(int(objectSize), int(s.PartSize))
start := time.Now()
err := s.UploadData(r, bucket, fmt.Sprintf("%v%v", prefix, i))
results[i].elapsed = time.Since(start)
results[i].err = err
results[i].size = objectSize
sg.Done()
}(i)
}
}
if download {
for i := 0; i < files; i++ {
sg.Add(1)
go func(i int) {
nw := NewNullWriter()
start := time.Now()
n, err := s.DownloadData(nw, bucket, fmt.Sprintf("%v%v", prefix, i))
results[i].elapsed = time.Since(start)
results[i].err = err
results[i].size = n
sg.Done()
}(i)
}
}
sg.Wait()
elapsed := time.Since(start)
var tot int64
for i, res := range results {
if res.err != nil {
failF("%v: %v\n", i, res.err)
break
}
tot += res.size
fmt.Printf("%v: %v in %v (%v MB/s)\n",
i, res.size, res.elapsed,
int(math.Ceil(float64(res.size)/res.elapsed.Seconds())/1048576))
}
fmt.Println()
passF("run perf: %v in %v (%v MB/s)\n",
tot, elapsed, int(math.Ceil(float64(tot)/elapsed.Seconds())/1048576))
return nil
}
func TestPutGetRemoveTags(s *S3Conf) {
testname := "test put/get/remove object tags"
runF(testname)
bucket := "testbucket13"
err := setup(s, bucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
obj := "myobject"
s3client := s3.NewFromConfig(s.Config())
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.PutObject(ctx, &s3.PutObjectInput{
Bucket: &bucket,
Key: &obj,
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
key1 := "hello1"
key2 := "hello2"
val1 := "world1"
val2 := "world2"
tagging := types.Tagging{TagSet: []types.Tag{{Key: &key1, Value: &val1}, {Key: &key2, Value: &val2}}}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.PutObjectTagging(ctx, &s3.PutObjectTaggingInput{
Bucket: &bucket,
Key: &obj,
Tagging: &tagging,
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
Key: &obj,
Bucket: &bucket,
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
ok := areTagsSame(tagging.TagSet, out.TagSet)
if !ok {
failF("%v: expected %v instead got %v", testname, tagging.TagSet, out.TagSet)
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.DeleteObjectTagging(ctx, &s3.DeleteObjectTaggingInput{
Key: &obj,
Bucket: &bucket,
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
out, err = s3client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{
Key: &obj,
Bucket: &bucket,
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
if len(out.TagSet) > 0 {
failF("%v: expected empty tag set instead got %v", testname, out.TagSet)
}
err = teardown(s, bucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
passF(testname)
}
func TestAclActions(s *S3Conf) {
testname := "test put/get acl"
runF(testname)
bucket := "testbucket14"
err := setup(s, bucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
s3client := s3.NewFromConfig(s.Config())
rootAccess := s.awsID
rootSecret := s.awsSecret
s.awsID = "grt1"
s.awsSecret = "grt1secret"
userS3Client := s3.NewFromConfig(s.Config())
s.awsID = rootAccess
s.awsSecret = rootSecret
grt1 := "grt1"
grants := []types.Grant{
{
Permission: "READ",
Grantee: &types.Grantee{
ID: &grt1,
Type: "CanonicalUser",
},
},
}
succUsrCrt := "The user has been created successfully"
failUsrCrt := "failed to create a user: update iam data: account already exists"
out, err := execCommand("admin", "-a", s.awsID, "-s", s.awsSecret, "create-user", "-a", grt1, "-s", "grt1secret", "-r", "user")
if err != nil {
failF("%v: %v", err)
return
}
if !strings.Contains(string(out), succUsrCrt) && !strings.Contains(string(out), failUsrCrt) {
failF("%v: failed to create user accounts", testname)
return
}
// Validation error case
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.PutBucketAcl(ctx, &s3.PutBucketAclInput{
Bucket: &bucket,
AccessControlPolicy: &types.AccessControlPolicy{
Grants: grants,
},
ACL: "private",
})
cancel()
if err == nil {
failF("%v: expected validation error", testname)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = s3client.PutBucketAcl(ctx, &s3.PutBucketAclInput{
Bucket: &bucket,
AccessControlPolicy: &types.AccessControlPolicy{
Grants: grants,
Owner: &types.Owner{ID: &s.awsID},
},
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
acl, err := s3client.GetBucketAcl(ctx, &s3.GetBucketAclInput{
Bucket: &bucket,
})
cancel()
if err != nil {
failF("%v: %v", testname, err)
return
}
if *acl.Owner.ID != s.awsID {
failF("%v: expected bucket owner: %v, instead got: %v", testname, s.awsID, *acl.Owner.ID)
return
}
if !checkGrants(acl.Grants, grants) {
failF("%v: expected %v, instead got %v", testname, grants, acl.Grants)
return
}
ctx, cancel = context.WithTimeout(context.Background(), shortTimeout)
_, err = userS3Client.PutBucketAcl(ctx, &s3.PutBucketAclInput{
Bucket: &bucket,
})
cancel()
if err == nil {
failF("%v: expected acl access denied error", testname)
return
}
err = teardown(s, bucket)
if err != nil {
failF("%v: %v", testname, err)
return
}
passF(testname)
}
// Full flow test
func TestFullFlow(s *S3Conf) {
// TODO: add more test cases to get 100% coverage
@@ -1161,6 +1533,8 @@ func TestFullFlow(s *S3Conf) {
TestIncompleteMultiParts(s)
TestIncorrectMultiParts(s)
TestListAbortMultiPartObject(s)
TestListAbortMultiPartObject(s)
TestRangeGet(s)
TestInvalidMultiParts(s)
TestPutGetRemoveTags(s)
TestAclActions(s)
}

163
integration/utils.go Normal file
View File

@@ -0,0 +1,163 @@
package integration
import (
"context"
"fmt"
"os/exec"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
func setup(s *S3Conf, bucket string) error {
s3client := s3.NewFromConfig(s.Config())
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.CreateBucket(ctx, &s3.CreateBucketInput{
Bucket: &bucket,
})
cancel()
return err
}
func teardown(s *S3Conf, bucket string) error {
s3client := s3.NewFromConfig(s.Config())
deleteObject := func(bucket, key, versionId *string) error {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: bucket,
Key: key,
VersionId: versionId,
})
cancel()
if err != nil {
return fmt.Errorf("failed to delete object %v: %v", *key, err)
}
return nil
}
in := &s3.ListObjectsV2Input{Bucket: &bucket}
for {
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
out, err := s3client.ListObjectsV2(ctx, in)
cancel()
if err != nil {
return fmt.Errorf("failed to list objects: %v", err)
}
for _, item := range out.Contents {
err = deleteObject(&bucket, item.Key, nil)
if err != nil {
return err
}
}
if out.IsTruncated {
in.ContinuationToken = out.ContinuationToken
} else {
break
}
}
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
_, err := s3client.DeleteBucket(ctx, &s3.DeleteBucketInput{
Bucket: &bucket,
})
cancel()
return err
}
func isEqual(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i, d := range a {
if d != b[i] {
return false
}
}
return true
}
func contains(name string, list []types.Object) bool {
for _, item := range list {
fmt.Println(*item.Key)
if strings.EqualFold(name, *item.Key) {
return true
}
}
return false
}
func containsUID(name, id string, list []types.MultipartUpload) bool {
for _, item := range list {
if strings.EqualFold(name, *item.Key) && strings.EqualFold(id, *item.UploadId) {
return true
}
}
return false
}
func containsPart(part int32, list []types.Part) bool {
for _, item := range list {
if item.PartNumber == part {
return true
}
}
return false
}
func areTagsSame(tags1, tags2 []types.Tag) bool {
if len(tags1) != len(tags2) {
return false
}
for _, tag := range tags1 {
if !containsTag(tag, tags2) {
return false
}
}
return true
}
func containsTag(tag types.Tag, list []types.Tag) bool {
for _, item := range list {
if *item.Key == *tag.Key && *item.Value == *tag.Value {
return true
}
}
return false
}
func checkGrants(grts1, grts2 []types.Grant) bool {
if len(grts1) != len(grts2) {
return false
}
for i, grt := range grts1 {
if grt.Permission != grts2[i].Permission {
return false
}
if *grt.Grantee.ID != *grts2[i].Grantee.ID {
return false
}
}
return true
}
func execCommand(args ...string) ([]byte, error) {
cmd := exec.Command("./versitygw", args...)
return cmd.CombinedOutput()
}
func getString(str *string) string {
if str == nil {
return ""
}
return *str
}

37
runtests.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/bin/bash
# make temp dirs
mkdir /tmp/gw
rm -rf /tmp/covdata
mkdir /tmp/covdata
# run server in background
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass posix /tmp/gw &
GW_PID=$!
# wait a second for server to start up
sleep 1
# check if server is still running
if ! kill -0 $GW_PID; then
echo "server no longer running"
exit 1
fi
# run tests
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
echo "tests failed"
kill $GW_PID
exit 1
fi
# kill off server
kill $GW_PID
exit 0
# if the above binary was built with -cover enabled (make testbin),
# then the following can be used for code coverage reports:
# go tool covdata percent -i=/tmp/covdata
# go tool covdata textfmt -i=/tmp/covdata -o profile.txt
# go tool cover -html=profile.txt

View File

@@ -27,11 +27,14 @@ type AdminController struct {
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
access, secret, role := ctx.Query("access"), ctx.Query("secret"), ctx.Query("role")
requesterRole := ctx.Locals("role")
requesterRole := ctx.Locals("role").(string)
if requesterRole != "admin" {
return fmt.Errorf("access denied: only admin users have access to this resource")
}
if role != "user" && role != "admin" {
return fmt.Errorf("invalid parameters: user role have to be one of the following: 'user', 'admin'")
}
user := auth.Account{Secret: secret, Role: role}
@@ -40,13 +43,12 @@ func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
return fmt.Errorf("failed to create a user: %w", err)
}
ctx.SendString("The user has been created successfully")
return nil
return ctx.SendString("The user has been created successfully")
}
func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
access := ctx.Query("access")
requesterRole := ctx.Locals("role")
requesterRole := ctx.Locals("role").(string)
if requesterRole != "admin" {
return fmt.Errorf("access denied: only admin users have access to this resource")
}
@@ -56,6 +58,5 @@ func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
return err
}
ctx.SendString("The user has been created successfully")
return nil
return ctx.SendString("The user has been deleted successfully")
}

View File

@@ -0,0 +1,173 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package controllers
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
)
func TestAdminController_CreateUser(t *testing.T) {
type args struct {
req *http.Request
}
adminController := AdminController{
IAMService: &IAMServiceMock{
CreateAccountFunc: func(access string, account auth.Account) error {
return nil
},
},
}
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("role", "admin")
return ctx.Next()
})
app.Patch("/create-user", adminController.CreateUser)
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("role", "user")
return ctx.Next()
})
appErr.Patch("/create-user", adminController.CreateUser)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
statusCode int
}{
{
name: "Admin-create-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=user", nil),
},
wantErr: false,
statusCode: 200,
},
{
name: "Admin-create-user-invalid-user-role",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=invalid", nil),
},
wantErr: false,
statusCode: 500,
},
{
name: "Admin-create-user-invalid-requester-role",
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=admin", nil),
},
wantErr: false,
statusCode: 500,
},
}
for _, tt := range tests {
resp, err := tt.app.Test(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("AdminController.CreateUser() error = %v, wantErr %v", err, tt.wantErr)
}
if resp.StatusCode != tt.statusCode {
t.Errorf("AdminController.CreateUser() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
}
}
}
func TestAdminController_DeleteUser(t *testing.T) {
type args struct {
req *http.Request
}
adminController := AdminController{
IAMService: &IAMServiceMock{
DeleteUserAccountFunc: func(access string) error {
return nil
},
},
}
app := fiber.New()
app.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("role", "admin")
return ctx.Next()
})
app.Patch("/delete-user", adminController.DeleteUser)
appErr := fiber.New()
appErr.Use(func(ctx *fiber.Ctx) error {
ctx.Locals("role", "user")
return ctx.Next()
})
appErr.Patch("/delete-user", adminController.DeleteUser)
tests := []struct {
name string
app *fiber.App
args args
wantErr bool
statusCode int
}{
{
name: "Admin-delete-user-success",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/delete-user?access=test", nil),
},
wantErr: false,
statusCode: 200,
},
{
name: "Admin-delete-user-invalid-requester-role",
app: appErr,
args: args{
req: httptest.NewRequest(http.MethodPatch, "/delete-user?access=test", nil),
},
wantErr: false,
statusCode: 500,
},
}
for _, tt := range tests {
resp, err := tt.app.Test(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("AdminController.DeleteUser() error = %v, wantErr %v", err, tt.wantErr)
}
if resp.StatusCode != tt.statusCode {
t.Errorf("AdminController.DeleteUser() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,169 @@
// Code generated by moq; DO NOT EDIT.
// github.com/matryer/moq
package controllers
import (
"github.com/versity/versitygw/auth"
"sync"
)
// Ensure, that IAMServiceMock does implement auth.IAMService.
// If this is not the case, regenerate this file with moq.
var _ auth.IAMService = &IAMServiceMock{}
// IAMServiceMock is a mock implementation of auth.IAMService.
//
// func TestSomethingThatUsesIAMService(t *testing.T) {
//
// // make and configure a mocked auth.IAMService
// mockedIAMService := &IAMServiceMock{
// CreateAccountFunc: func(access string, account auth.Account) error {
// panic("mock out the CreateAccount method")
// },
// DeleteUserAccountFunc: func(access string) error {
// panic("mock out the DeleteUserAccount method")
// },
// GetUserAccountFunc: func(access string) (auth.Account, error) {
// panic("mock out the GetUserAccount method")
// },
// }
//
// // use mockedIAMService in code that requires auth.IAMService
// // and then make assertions.
//
// }
type IAMServiceMock struct {
// CreateAccountFunc mocks the CreateAccount method.
CreateAccountFunc func(access string, account auth.Account) error
// DeleteUserAccountFunc mocks the DeleteUserAccount method.
DeleteUserAccountFunc func(access string) error
// GetUserAccountFunc mocks the GetUserAccount method.
GetUserAccountFunc func(access string) (auth.Account, error)
// calls tracks calls to the methods.
calls struct {
// CreateAccount holds details about calls to the CreateAccount method.
CreateAccount []struct {
// Access is the access argument value.
Access string
// Account is the account argument value.
Account auth.Account
}
// DeleteUserAccount holds details about calls to the DeleteUserAccount method.
DeleteUserAccount []struct {
// Access is the access argument value.
Access string
}
// GetUserAccount holds details about calls to the GetUserAccount method.
GetUserAccount []struct {
// Access is the access argument value.
Access string
}
}
lockCreateAccount sync.RWMutex
lockDeleteUserAccount sync.RWMutex
lockGetUserAccount sync.RWMutex
}
// CreateAccount calls CreateAccountFunc.
func (mock *IAMServiceMock) CreateAccount(access string, account auth.Account) error {
if mock.CreateAccountFunc == nil {
panic("IAMServiceMock.CreateAccountFunc: method is nil but IAMService.CreateAccount was just called")
}
callInfo := struct {
Access string
Account auth.Account
}{
Access: access,
Account: account,
}
mock.lockCreateAccount.Lock()
mock.calls.CreateAccount = append(mock.calls.CreateAccount, callInfo)
mock.lockCreateAccount.Unlock()
return mock.CreateAccountFunc(access, account)
}
// CreateAccountCalls gets all the calls that were made to CreateAccount.
// Check the length with:
//
// len(mockedIAMService.CreateAccountCalls())
func (mock *IAMServiceMock) CreateAccountCalls() []struct {
Access string
Account auth.Account
} {
var calls []struct {
Access string
Account auth.Account
}
mock.lockCreateAccount.RLock()
calls = mock.calls.CreateAccount
mock.lockCreateAccount.RUnlock()
return calls
}
// DeleteUserAccount calls DeleteUserAccountFunc.
func (mock *IAMServiceMock) DeleteUserAccount(access string) error {
if mock.DeleteUserAccountFunc == nil {
panic("IAMServiceMock.DeleteUserAccountFunc: method is nil but IAMService.DeleteUserAccount was just called")
}
callInfo := struct {
Access string
}{
Access: access,
}
mock.lockDeleteUserAccount.Lock()
mock.calls.DeleteUserAccount = append(mock.calls.DeleteUserAccount, callInfo)
mock.lockDeleteUserAccount.Unlock()
return mock.DeleteUserAccountFunc(access)
}
// DeleteUserAccountCalls gets all the calls that were made to DeleteUserAccount.
// Check the length with:
//
// len(mockedIAMService.DeleteUserAccountCalls())
func (mock *IAMServiceMock) DeleteUserAccountCalls() []struct {
Access string
} {
var calls []struct {
Access string
}
mock.lockDeleteUserAccount.RLock()
calls = mock.calls.DeleteUserAccount
mock.lockDeleteUserAccount.RUnlock()
return calls
}
// GetUserAccount calls GetUserAccountFunc.
func (mock *IAMServiceMock) GetUserAccount(access string) (auth.Account, error) {
if mock.GetUserAccountFunc == nil {
panic("IAMServiceMock.GetUserAccountFunc: method is nil but IAMService.GetUserAccount was just called")
}
callInfo := struct {
Access string
}{
Access: access,
}
mock.lockGetUserAccount.Lock()
mock.calls.GetUserAccount = append(mock.calls.GetUserAccount, callInfo)
mock.lockGetUserAccount.Unlock()
return mock.GetUserAccountFunc(access)
}
// GetUserAccountCalls gets all the calls that were made to GetUserAccount.
// Check the length with:
//
// len(mockedIAMService.GetUserAccountCalls())
func (mock *IAMServiceMock) GetUserAccountCalls() []struct {
Access string
} {
var calls []struct {
Access string
}
mock.lockGetUserAccount.RLock()
calls = mock.calls.GetUserAccount
mock.lockGetUserAccount.RUnlock()
return calls
}

View File

@@ -0,0 +1,61 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"net/http"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3log"
)
func AclParser(be backend.Backend, logger s3log.AuditLogger) fiber.Handler {
return func(ctx *fiber.Ctx) error {
isRoot, access := ctx.Locals("isRoot").(bool), ctx.Locals("access").(string)
path := ctx.Path()
pathParts := strings.Split(path, "/")
bucket := pathParts[1]
if path == "/" && ctx.Method() == http.MethodGet {
return ctx.Next()
}
if ctx.Method() == http.MethodPatch {
return ctx.Next()
}
if len(pathParts) == 2 && pathParts[1] != "" && ctx.Method() == http.MethodPut && !ctx.Request().URI().QueryArgs().Has("acl") {
if err := auth.IsAdmin(access, isRoot); err != nil {
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
}
return ctx.Next()
}
//TODO: provide correct action names for the logger, after implementing DetectAction middleware
data, err := be.GetBucketAcl(ctx.Context(), &s3.GetBucketAclInput{Bucket: &bucket})
if err != nil {
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
}
parsedAcl, err := auth.ParseACL(data)
if err != nil {
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
}
ctx.Locals("parsedAcl", parsedAcl)
return ctx.Next()
}
}

View File

@@ -29,6 +29,7 @@ import (
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
const (
@@ -40,74 +41,90 @@ type RootUserConfig struct {
Secret string
}
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string, debug bool) fiber.Handler {
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, region string, debug bool) fiber.Handler {
acct := accounts{root: root, iam: iam}
return func(ctx *fiber.Ctx) error {
ctx.Locals("region", region)
ctx.Locals("startTime", time.Now())
authorization := ctx.Get("Authorization")
if authorization == "" {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty), &controllers.MetaOpts{Logger: logger})
}
// Check the signature version
authParts := strings.Split(authorization, " ")
if len(authParts) < 4 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
}
if authParts[0] != "AWS4-HMAC-SHA256" {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported))
authParts := strings.Split(authorization, ",")
for i, el := range authParts {
authParts[i] = strings.TrimSpace(el)
}
credKv := strings.Split(authParts[1], "=")
if len(authParts) != 3 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields), &controllers.MetaOpts{Logger: logger})
}
startParts := strings.Split(authParts[0], " ")
if startParts[0] != "AWS4-HMAC-SHA256" {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported), &controllers.MetaOpts{Logger: logger})
}
credKv := strings.Split(startParts[1], "=")
if len(credKv) != 2 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
}
creds := strings.Split(credKv[1], "/")
if len(creds) < 4 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
}
signHdrKv := strings.Split(authParts[2][:len(authParts[2])-1], "=")
ctx.Locals("access", creds[0])
ctx.Locals("isRoot", creds[0] == root.Access)
signHdrKv := strings.Split(authParts[1], "=")
if len(signHdrKv) != 2 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
}
signedHdrs := strings.Split(signHdrKv[1], ";")
account, err := acct.getAccount(creds[0])
if err == auth.ErrNoSuchUser {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), &controllers.MetaOpts{Logger: logger})
}
if err != nil {
return controllers.SendResponse(ctx, err)
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
}
ctx.Locals("role", account.Role)
// Check X-Amz-Date header
date := ctx.Get("X-Amz-Date")
if date == "" {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader), &controllers.MetaOpts{Logger: logger})
}
// Parse the date and check the date validity
tdate, err := time.Parse(iso8601Format, date)
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate), &controllers.MetaOpts{Logger: logger})
}
// Calculate the hash of the request payload
hashedPayload := sha256.Sum256(ctx.Body())
hexPayload := hex.EncodeToString(hashedPayload[:])
hashPayloadHeader := ctx.Get("X-Amz-Content-Sha256")
ok := isSpecialPayload(hashPayloadHeader)
// Compare the calculated hash with the hash provided
if hashPayloadHeader != hexPayload {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch))
if !ok {
// Calculate the hash of the request payload
hashedPayload := sha256.Sum256(ctx.Body())
hexPayload := hex.EncodeToString(hashedPayload[:])
// Compare the calculated hash with the hash provided
if hashPayloadHeader != hexPayload {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch), &controllers.MetaOpts{Logger: logger})
}
}
// Create a new http request instance from fasthttp request
req, err := utils.CreateHttpRequestFromCtx(ctx, signedHdrs)
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError), &controllers.MetaOpts{Logger: logger})
}
signer := v4.NewSigner()
@@ -115,31 +132,27 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string,
signErr := signer.SignHTTP(req.Context(), aws.Credentials{
AccessKeyID: creds[0],
SecretAccessKey: account.Secret,
}, req, hexPayload, creds[3], region, tdate, func(options *v4.SignerOptions) {
}, req, hashPayloadHeader, creds[3], region, tdate, func(options *v4.SignerOptions) {
if debug {
options.LogSigning = true
options.Logger = logging.NewStandardLogger(os.Stderr)
}
})
if signErr != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError), &controllers.MetaOpts{Logger: logger})
}
parts := strings.Split(req.Header.Get("Authorization"), " ")
if len(parts) < 4 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields), &controllers.MetaOpts{Logger: logger})
}
calculatedSign := strings.Split(parts[3], "=")[1]
expectedSign := strings.Split(authParts[3], "=")[1]
expectedSign := strings.Split(authParts[2], "=")[1]
if expectedSign != calculatedSign {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch), &controllers.MetaOpts{Logger: logger})
}
ctx.Locals("role", account.Role)
ctx.Locals("access", creds[0])
ctx.Locals("isRoot", creds[0] == root.Access)
return ctx.Next()
}
}
@@ -159,3 +172,16 @@ func (a accounts) getAccount(access string) (auth.Account, error) {
return a.iam.GetUserAccount(access)
}
func isSpecialPayload(str string) bool {
specialValues := map[string]bool{
"UNSIGNED-PAYLOAD": true,
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
}
return specialValues[str]
}

View File

@@ -21,9 +21,10 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3log"
)
func VerifyMD5Body() fiber.Handler {
func VerifyMD5Body(logger s3log.AuditLogger) fiber.Handler {
return func(ctx *fiber.Ctx) error {
incomingSum := ctx.Get("Content-Md5")
if incomingSum == "" {
@@ -34,10 +35,9 @@ func VerifyMD5Body() fiber.Handler {
calculatedSum := base64.StdEncoding.EncodeToString(sum[:])
if incomingSum != calculatedSum {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest))
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest), &controllers.MetaOpts{Logger: logger})
}
return ctx.Next()
}
}

View File

@@ -19,19 +19,20 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3event"
"github.com/versity/versitygw/s3log"
)
type S3ApiRouter struct{}
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService) {
s3ApiController := controllers.New(be)
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender) {
s3ApiController := controllers.New(be, iam, logger, evs)
adminController := controllers.AdminController{IAMService: iam}
// TODO: think of better routing system
app.Post("/create-user", adminController.CreateUser)
app.Patch("/create-user", adminController.CreateUser)
// Admin Delete api
app.Delete("/delete-user", adminController.DeleteUser)
app.Patch("/delete-user", adminController.DeleteUser)
// ListBuckets action
app.Get("/", s3ApiController.ListBuckets)

View File

@@ -45,7 +45,7 @@ func TestS3ApiRouter_Init(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam)
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil)
})
}
}

View File

@@ -22,6 +22,8 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/middlewares"
"github.com/versity/versitygw/s3event"
"github.com/versity/versitygw/s3log"
)
type S3ApiServer struct {
@@ -33,7 +35,7 @@ type S3ApiServer struct {
debug bool
}
func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, opts ...Option) (*S3ApiServer, error) {
func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, evs s3event.S3EventSender, opts ...Option) (*S3ApiServer, error) {
server := &S3ApiServer{
app: app,
backend: be,
@@ -50,10 +52,11 @@ func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, po
app.Use(middlewares.RequestLogger(server.debug))
// Authentication middlewares
app.Use(middlewares.VerifyV4Signature(root, iam, region, server.debug))
app.Use(middlewares.VerifyMD5Body())
app.Use(middlewares.VerifyV4Signature(root, iam, l, region, server.debug))
app.Use(middlewares.VerifyMD5Body(l))
app.Use(middlewares.AclParser(be, l))
server.router.Init(app, be, iam)
server.router.Init(app, be, iam, l, evs)
return server, nil
}

View File

@@ -15,6 +15,7 @@
package s3api
import (
"crypto/tls"
"reflect"
"testing"
@@ -63,7 +64,7 @@ func TestNew(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.root,
tt.args.port, "us-east-1", &auth.IAMServiceInternal{})
tt.args.port, "us-east-1", &auth.IAMServiceInternal{}, nil, nil)
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -82,15 +83,26 @@ func TestS3ApiServer_Serve(t *testing.T) {
wantErr bool
}{
{
name: "Return error when serving S3 api server with invalid address",
name: "Serve-invalid-address",
wantErr: true,
sa: &S3ApiServer{
app: fiber.New(),
backend: backend.BackendUnsupported{},
port: "Wrong address",
port: "Invalid address",
router: &S3ApiRouter{},
},
},
{
name: "Serve-invalid-address-with-certificate",
wantErr: true,
sa: &S3ApiServer{
app: fiber.New(),
backend: backend.BackendUnsupported{},
port: "Invalid address",
router: &S3ApiRouter{},
cert: &tls.Certificate{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -19,12 +19,18 @@ import (
"errors"
"fmt"
"net/http"
"regexp"
"strings"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
)
var (
bucketNameRegexp = regexp.MustCompile(`^[a-z0-9][a-z0-9.-]+[a-z0-9]$`)
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
)
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
metadata = make(map[string]string)
headers.VisitAll(func(key, value []byte) {
@@ -83,6 +89,22 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
}
}
func IsValidBucketName(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
return false
}
// Checks to contain only digits, lowercase letters, dot, hyphen.
// Checks to start and end with only digits and lowercase letters.
if !bucketNameRegexp.MatchString(bucket) {
return false
}
// Checks not to be a valid IP address
if bucketNameIpRegexp.MatchString(bucket) {
return false
}
return true
}
func includeHeader(hdr string, signedHdrs []string) bool {
for _, shdr := range signedHdrs {
if strings.EqualFold(hdr, shdr) {

View File

@@ -117,3 +117,107 @@ func TestGetUserMetaData(t *testing.T) {
})
}
}
func Test_includeHeader(t *testing.T) {
type args struct {
hdr string
signedHdrs []string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "include-header-falsy-case",
args: args{
hdr: "Content-Type",
signedHdrs: []string{"X-Amz-Acl", "Content-Encoding"},
},
want: false,
},
{
name: "include-header-falsy-case",
args: args{
hdr: "Content-Type",
signedHdrs: []string{"X-Amz-Acl", "Content-Type"},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := includeHeader(tt.args.hdr, tt.args.signedHdrs); got != tt.want {
t.Errorf("includeHeader() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsValidBucketName(t *testing.T) {
type args struct {
bucket string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "IsValidBucketName-short-name",
args: args{
bucket: "a",
},
want: false,
},
{
name: "IsValidBucketName-start-with-hyphen",
args: args{
bucket: "-bucket",
},
want: false,
},
{
name: "IsValidBucketName-start-with-dot",
args: args{
bucket: ".bucket",
},
want: false,
},
{
name: "IsValidBucketName-contain-invalid-character",
args: args{
bucket: "my@bucket",
},
want: false,
},
{
name: "IsValidBucketName-end-with-hyphen",
args: args{
bucket: "bucket-",
},
want: false,
},
{
name: "IsValidBucketName-end-with-dot",
args: args{
bucket: "bucket.",
},
want: false,
},
{
name: "IsValidBucketName-valid-bucket-name",
args: args{
bucket: "my-bucket",
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidBucketName(tt.args.bucket); got != tt.want {
t.Errorf("IsValidBucketName() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -106,7 +106,9 @@ const (
ErrNotImplemented
ErrPreconditionFailed
ErrInvalidObjectState
ErrInvalidRange
// Non-AWS errors
ErrExistingObjectIsDirectory
ErrObjectParentIsFile
)
@@ -374,6 +376,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The operation is not valid for the current state of the object",
HTTPStatusCode: http.StatusForbidden,
},
ErrInvalidRange: {
Code: "InvalidRange",
Description: "The requested range is not valid for the request. Try another range.",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrExistingObjectIsDirectory: {
Code: "ExistingObjectIsDirectory",
Description: "Existing Object is a directory.",

130
s3event/event.go Normal file
View File

@@ -0,0 +1,130 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3event
import (
"fmt"
"github.com/gofiber/fiber/v2"
)
type S3EventSender interface {
SendEvent(ctx *fiber.Ctx, meta EventMeta)
}
type EventMeta struct {
BucketOwner string
EventName EventType
ObjectSize int64
ObjectETag *string
VersionId *string
}
type EventFields struct {
Records []EventSchema
}
type EventType string
const (
EventObjectPut EventType = "s3:ObjectCreated:Put"
EventObjectCopy EventType = "s3:ObjectCreated:Copy"
EventCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
EventObjectDelete EventType = "s3:ObjectRemoved:Delete"
EventObjectRestoreCompleted EventType = "s3:ObjectRestore:Completed"
EventObjectTaggingPut EventType = "s3:ObjectTagging:Put"
EventObjectTaggingDelete EventType = "s3:ObjectTagging:Delete"
EventObjectAclPut EventType = "s3:ObjectAcl:Put"
// Not supported
// EventObjectRestorePost EventType = "s3:ObjectRestore:Post"
// EventObjectRestoreDelete EventType = "s3:ObjectRestore:Delete"
)
type EventSchema struct {
EventVersion string `json:"eventVersion"`
EventSource string `json:"eventSource"`
AwsRegion string `json:"awsRegion"`
EventTime string `json:"eventTime"`
EventName EventType `json:"eventName"`
UserIdentity EventUserIdentity `json:"userIdentity"`
RequestParameters EventRequestParams `json:"requestParameters"`
ResponseElements EventResponseElements `json:"responseElements"`
S3 EventS3Data `json:"s3"`
GlacierEventData EventGlacierData `json:"glacierEventData"`
}
type EventUserIdentity struct {
PrincipalId string `json:"PrincipalId"`
}
type EventRequestParams struct {
SourceIPAddress string `json:"sourceIPAddress"`
}
type EventResponseElements struct {
RequestId string `json:"x-amz-request-id"`
HostId string `json:"x-amz-id-2"`
}
type EventS3Data struct {
S3SchemaVersion string `json:"s3SchemaVersion"`
ConfigurationId string `json:"configurationId"`
Bucket EventS3BucketData `json:"bucket"`
Object EventObjectData `json:"object"`
}
type EventGlacierData struct {
RestoreEventData EventRestoreData `json:"restoreEventData"`
}
type EventRestoreData struct {
LifecycleRestorationExpiryTime string `json:"lifecycleRestorationExpiryTime"`
LifecycleRestoreStorageClass string `json:"lifecycleRestoreStorageClass"`
}
type EventS3BucketData struct {
Name string `json:"name"`
OwnerIdentity EventUserIdentity `json:"ownerIdentity"`
Arn string `json:"arn"`
}
type EventObjectData struct {
Key string `json:"key"`
Size int64 `json:"size"`
ETag *string `json:"eTag"`
VersionId *string `json:"versionId"`
Sequencer string `json:"sequencer"`
}
type EventConfig struct {
KafkaURL string
KafkaTopic string
KafkaTopicKey string
NatsURL string
NatsTopic string
}
func InitEventSender(cfg *EventConfig) (S3EventSender, error) {
if cfg.KafkaURL != "" && cfg.NatsURL != "" {
return nil, fmt.Errorf("there should be specified one of the following: kafka, nats")
}
if cfg.NatsURL != "" {
return InitNatsEventService(cfg.NatsURL, cfg.NatsTopic)
}
if cfg.KafkaURL != "" {
return InitKafkaEventService(cfg.KafkaURL, cfg.KafkaTopic, cfg.KafkaTopicKey)
}
return nil, nil
}

153
s3event/kafka.go Normal file
View File

@@ -0,0 +1,153 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3event
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"sync"
"time"
"github.com/gofiber/fiber/v2"
"github.com/segmentio/kafka-go"
)
var sequencer = 0
type Kafka struct {
key string
writer *kafka.Writer
mu sync.Mutex
}
func InitKafkaEventService(url, topic, key string) (S3EventSender, error) {
if topic == "" {
return nil, fmt.Errorf("kafka message topic should be specified")
}
w := kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{url},
Topic: topic,
Balancer: &kafka.LeastBytes{},
BatchTimeout: 5 * time.Millisecond,
})
msg := map[string]string{
"Service": "S3",
"Event": "s3:TestEvent",
"Time": time.Now().Format(time.RFC3339),
"Bucket": "Test-Bucket",
}
msgJSON, err := json.Marshal(msg)
if err != nil {
return nil, err
}
message := kafka.Message{
Key: []byte(key),
Value: msgJSON,
}
ctx := context.Background()
err = w.WriteMessages(ctx, message)
if err != nil {
return nil, err
}
return &Kafka{
key: key,
writer: w,
}, nil
}
func (ks *Kafka) SendEvent(ctx *fiber.Ctx, meta EventMeta) {
ks.mu.Lock()
defer ks.mu.Unlock()
path := strings.Split(ctx.Path(), "/")
bucket, object := path[1], strings.Join(path[2:], "/")
schema := EventSchema{
EventVersion: "2.2",
EventSource: "aws:s3",
AwsRegion: ctx.Locals("region").(string),
EventTime: time.Now().Format(time.RFC3339),
EventName: meta.EventName,
UserIdentity: EventUserIdentity{
PrincipalId: ctx.Locals("access").(string),
},
RequestParameters: EventRequestParams{
SourceIPAddress: ctx.IP(),
},
ResponseElements: EventResponseElements{
RequestId: ctx.Get("X-Amz-Request-Id"),
HostId: ctx.Get("X-Amx-Id-2"),
},
S3: EventS3Data{
S3SchemaVersion: "1.0",
// This field will come up after implementing per bucket notifications
ConfigurationId: "kafka-global",
Bucket: EventS3BucketData{
Name: bucket,
OwnerIdentity: EventUserIdentity{
PrincipalId: ctx.Locals("access").(string),
},
Arn: fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/")),
},
Object: EventObjectData{
Key: object,
Size: meta.ObjectSize,
ETag: meta.ObjectETag,
VersionId: meta.VersionId,
Sequencer: genSequencer(),
},
},
GlacierEventData: EventGlacierData{
// Not supported
RestoreEventData: EventRestoreData{},
},
}
ks.send([]EventSchema{schema})
}
func (ks *Kafka) send(evnt []EventSchema) {
msg, err := json.Marshal(evnt)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse the event data: %v\n", err.Error())
return
}
message := kafka.Message{
Key: []byte(ks.key),
Value: msg,
}
ctx := context.Background()
err = ks.writer.WriteMessages(ctx, message)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to send kafka event: %v\n", err.Error())
}
}
func genSequencer() string {
sequencer = sequencer + 1
return fmt.Sprintf("%X", sequencer)
}

112
s3event/nats.go Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3event
import (
"encoding/json"
"fmt"
"os"
"strings"
"sync"
"time"
"github.com/gofiber/fiber/v2"
"github.com/nats-io/nats.go"
)
type NatsEventSender struct {
topic string
client *nats.Conn
mu sync.Mutex
}
func InitNatsEventService(url, topic string) (S3EventSender, error) {
if topic == "" {
return nil, fmt.Errorf("nats message topic should be specified")
}
client, err := nats.Connect(url)
if err != nil {
return nil, err
}
return &NatsEventSender{
topic: topic,
client: client,
}, nil
}
func (ns *NatsEventSender) SendEvent(ctx *fiber.Ctx, meta EventMeta) {
ns.mu.Lock()
defer ns.mu.Unlock()
path := strings.Split(ctx.Path(), "/")
bucket, object := path[1], strings.Join(path[2:], "/")
schema := EventSchema{
EventVersion: "2.2",
EventSource: "aws:s3",
AwsRegion: ctx.Locals("region").(string),
EventTime: time.Now().Format(time.RFC3339),
EventName: meta.EventName,
UserIdentity: EventUserIdentity{
PrincipalId: ctx.Locals("access").(string),
},
RequestParameters: EventRequestParams{
SourceIPAddress: ctx.IP(),
},
ResponseElements: EventResponseElements{
RequestId: ctx.Get("X-Amz-Request-Id"),
HostId: ctx.Get("X-Amx-Id-2"),
},
S3: EventS3Data{
S3SchemaVersion: "1.0",
// This field will come up after implementing per bucket notifications
ConfigurationId: "nats-global",
Bucket: EventS3BucketData{
Name: bucket,
OwnerIdentity: EventUserIdentity{
PrincipalId: ctx.Locals("access").(string),
},
Arn: fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/")),
},
Object: EventObjectData{
Key: object,
Size: meta.ObjectSize,
ETag: meta.ObjectETag,
VersionId: meta.VersionId,
Sequencer: genSequencer(),
},
},
GlacierEventData: EventGlacierData{
// Not supported
RestoreEventData: EventRestoreData{},
},
}
ns.send([]EventSchema{schema})
}
func (ns *NatsEventSender) send(evnt []EventSchema) {
msg, err := json.Marshal(evnt)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse the event data: %v\n", err.Error())
}
err = ns.client.Publish(ns.topic, msg)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to send nats event: %v\n", err.Error())
}
}

112
s3log/audit-logger.go Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3log
import (
"crypto/tls"
"encoding/hex"
"fmt"
"math/rand"
"strings"
"time"
"github.com/gofiber/fiber/v2"
)
type AuditLogger interface {
Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta)
HangUp() error
Shutdown() error
}
type LogMeta struct {
BucketOwner string
ObjectSize int64
Action string
}
type LogConfig struct {
LogFile string
WebhookURL string
}
type LogFields struct {
BucketOwner string
Bucket string
Time time.Time
RemoteIP string
Requester string
RequestID string
Operation string
Key string
RequestURI string
HttpStatus int
ErrorCode string
BytesSent int
ObjectSize int64
TotalTime int64
TurnAroundTime int64
Referer string
UserAgent string
VersionID string
HostID string
SignatureVersion string
CipherSuite string
AuthenticationType string
HostHeader string
TLSVersion string
AccessPointARN string
AclRequired string
}
func InitLogger(cfg *LogConfig) (AuditLogger, error) {
if cfg.WebhookURL != "" && cfg.LogFile != "" {
return nil, fmt.Errorf("there should be specified one of the following: file, webhook")
}
if cfg.WebhookURL != "" {
return InitWebhookLogger(cfg.WebhookURL)
}
if cfg.LogFile != "" {
return InitFileLogger(cfg.LogFile)
}
return nil, nil
}
func genID() string {
src := rand.New(rand.NewSource(time.Now().UnixNano()))
b := make([]byte, 8)
if _, err := src.Read(b); err != nil {
panic(err)
}
return strings.ToUpper(hex.EncodeToString(b))
}
func getTLSVersionName(version uint16) string {
switch version {
case tls.VersionTLS10:
return "TLSv1.0"
case tls.VersionTLS11:
return "TLSv1.1"
case tls.VersionTLS12:
return "TLSv1.2"
case tls.VersionTLS13:
return "TLSv1.3"
default:
return ""
}
}

230
s3log/file.go Normal file
View File

@@ -0,0 +1,230 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3log
import (
"crypto/tls"
"fmt"
"os"
"strings"
"sync"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3err"
)
const (
logFileMode = 0600
timeFormat = "02/January/2006:15:04:05 -0700"
)
// FileLogger is a local file audit log
type FileLogger struct {
logfile string
f *os.File
gotErr bool
mu sync.Mutex
}
var _ AuditLogger = &FileLogger{}
// InitFileLogger initializes audit logs to local file
func InitFileLogger(logname string) (AuditLogger, error) {
f, err := os.OpenFile(logname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, fmt.Errorf("open log: %w", err)
}
f.WriteString(fmt.Sprintf("log starts %v\n", time.Now()))
return &FileLogger{logfile: logname, f: f}, nil
}
// Log sends log message to file logger
func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
f.mu.Lock()
defer f.mu.Unlock()
if f.gotErr {
return
}
lf := LogFields{}
access := "-"
reqURI := ctx.Request().URI().String()
path := strings.Split(ctx.Path(), "/")
bucket, object := path[1], strings.Join(path[2:], "/")
errorCode := ""
httpStatus := 200
startTime := ctx.Locals("startTime").(time.Time)
tlsConnState := ctx.Context().TLSConnectionState()
if tlsConnState != nil {
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
lf.TLSVersion = getTLSVersionName(tlsConnState.Version)
}
if err != nil {
serr, ok := err.(s3err.APIError)
if ok {
errorCode = serr.Code
httpStatus = serr.HTTPStatusCode
} else {
errorCode = err.Error()
httpStatus = 500
}
}
switch ctx.Locals("access").(type) {
case string:
access = ctx.Locals("access").(string)
}
lf.BucketOwner = meta.BucketOwner
lf.Bucket = bucket
lf.Time = time.Now()
lf.RemoteIP = ctx.IP()
lf.Requester = access
lf.RequestID = genID()
lf.Operation = meta.Action
lf.Key = object
lf.RequestURI = reqURI
lf.HttpStatus = httpStatus
lf.ErrorCode = errorCode
lf.BytesSent = len(body)
lf.ObjectSize = meta.ObjectSize
lf.TotalTime = time.Since(startTime).Milliseconds()
lf.TurnAroundTime = time.Since(startTime).Milliseconds()
lf.Referer = ctx.Get("Referer")
lf.UserAgent = ctx.Get("User-Agent")
lf.VersionID = ctx.Query("versionId")
lf.HostID = ctx.Get("X-Amz-Id-2")
lf.SignatureVersion = "SigV4"
lf.AuthenticationType = "AuthHeader"
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
lf.AclRequired = "Yes"
f.writeLog(lf)
}
func (f *FileLogger) writeLog(lf LogFields) {
if lf.BucketOwner == "" {
lf.BucketOwner = "-"
}
if lf.Bucket == "" {
lf.Bucket = "-"
}
if lf.RemoteIP == "" {
lf.RemoteIP = "-"
}
if lf.Requester == "" {
lf.Requester = "-"
}
if lf.Operation == "" {
lf.Operation = "-"
}
if lf.Key == "" {
lf.Key = "-"
}
if lf.RequestURI == "" {
lf.RequestURI = "-"
}
if lf.ErrorCode == "" {
lf.ErrorCode = "-"
}
if lf.Referer == "" {
lf.Referer = "-"
}
if lf.UserAgent == "" {
lf.UserAgent = "-"
}
if lf.VersionID == "" {
lf.VersionID = "-"
}
if lf.HostID == "" {
lf.HostID = "-"
}
if lf.CipherSuite == "" {
lf.CipherSuite = "-"
}
if lf.HostHeader == "" {
lf.HostHeader = "-"
}
if lf.TLSVersion == "" {
lf.TLSVersion = "-"
}
log := fmt.Sprintf("%v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v\n",
lf.BucketOwner,
lf.Bucket,
fmt.Sprintf("[%v]", lf.Time.Format(timeFormat)),
lf.RemoteIP,
lf.Requester,
lf.RequestID,
lf.Operation,
lf.Key,
lf.RequestURI,
lf.HttpStatus,
lf.ErrorCode,
lf.BytesSent,
lf.ObjectSize,
lf.TotalTime,
lf.TurnAroundTime,
lf.Referer,
lf.UserAgent,
lf.VersionID,
lf.HostID,
lf.SignatureVersion,
lf.CipherSuite,
lf.AuthenticationType,
lf.HostHeader,
lf.TLSVersion,
lf.AccessPointARN,
lf.AclRequired,
)
_, err := f.f.WriteString(log)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing to log file: %v\n", err)
// TODO: do we need to terminate on log error?
// set err for now so that we don't spew errors
f.gotErr = true
}
}
// HangUp closes current logfile handle and opens a new one
// typically needed for log rotations
func (f *FileLogger) HangUp() error {
err := f.f.Close()
if err != nil {
return fmt.Errorf("close log: %w", err)
}
f.f, err = os.OpenFile(f.logfile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("open log: %w", err)
}
f.f.WriteString(fmt.Sprintf("log starts %v\n", time.Now()))
return nil
}
// Shutdown closes logfile handle
func (f *FileLogger) Shutdown() error {
return f.f.Close()
}

156
s3log/webhook.go Normal file
View File

@@ -0,0 +1,156 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3log
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3err"
)
// WebhookLogger is a webhook URL audit log
type WebhookLogger struct {
mu sync.Mutex
url string
}
var _ AuditLogger = &WebhookLogger{}
// InitWebhookLogger initializes audit logs to webhook URL
func InitWebhookLogger(url string) (AuditLogger, error) {
client := &http.Client{
Timeout: 3 * time.Second,
}
_, err := client.Post(url, "application/json", nil)
if err != nil {
if err, ok := err.(net.Error); ok && !err.Timeout() {
return nil, fmt.Errorf("unreachable webhook url: %w", err)
}
}
return &WebhookLogger{
url: url,
}, nil
}
// Log sends log message to webhook
func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
wl.mu.Lock()
defer wl.mu.Unlock()
lf := LogFields{}
access := "-"
reqURI := ctx.Request().URI().String()
path := strings.Split(ctx.Path(), "/")
bucket, object := path[1], strings.Join(path[2:], "/")
errorCode := ""
httpStatus := 200
startTime := ctx.Locals("startTime").(time.Time)
tlsConnState := ctx.Context().TLSConnectionState()
if tlsConnState != nil {
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
lf.TLSVersion = getTLSVersionName(tlsConnState.Version)
}
if err != nil {
serr, ok := err.(s3err.APIError)
if ok {
errorCode = serr.Code
httpStatus = serr.HTTPStatusCode
} else {
errorCode = err.Error()
httpStatus = 500
}
}
switch ctx.Locals("access").(type) {
case string:
access = ctx.Locals("access").(string)
}
lf.BucketOwner = meta.BucketOwner
lf.Bucket = bucket
lf.Time = time.Now()
lf.RemoteIP = ctx.IP()
lf.Requester = access
lf.RequestID = genID()
lf.Operation = meta.Action
lf.Key = object
lf.RequestURI = reqURI
lf.HttpStatus = httpStatus
lf.ErrorCode = errorCode
lf.BytesSent = len(body)
lf.ObjectSize = meta.ObjectSize
lf.TotalTime = time.Since(startTime).Milliseconds()
lf.TurnAroundTime = time.Since(startTime).Milliseconds()
lf.Referer = ctx.Get("Referer")
lf.UserAgent = ctx.Get("User-Agent")
lf.VersionID = ctx.Query("versionId")
lf.HostID = ctx.Get("X-Amz-Id-2")
lf.SignatureVersion = "SigV4"
lf.AuthenticationType = "AuthHeader"
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
lf.AclRequired = "Yes"
wl.sendLog(lf)
}
func (wl *WebhookLogger) sendLog(lf LogFields) {
jsonLog, err := json.Marshal(lf)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse the log data: %v\n", err.Error())
}
req, err := http.NewRequest(http.MethodPost, wl.url, bytes.NewReader(jsonLog))
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
req.Header.Set("Content-Type", "application/json; charset=utf-8")
go makeRequest(req)
}
func makeRequest(req *http.Request) {
client := &http.Client{
Timeout: 1 * time.Second,
}
_, err := client.Do(req)
if err != nil {
if err, ok := err.(net.Error); ok && !err.Timeout() {
fmt.Fprintf(os.Stderr, "error sending webhook log: %v\n", err)
}
}
}
// HangUp does nothing for webhooks
func (wl *WebhookLogger) HangUp() error {
return nil
}
// Shutdown does nothing for webhooks
func (wl *WebhookLogger) Shutdown() error {
return nil
}

View File

@@ -16,6 +16,8 @@ package s3response
import (
"encoding/xml"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
// Part describes part metadata.
@@ -27,7 +29,7 @@ type Part struct {
}
// ListPartsResponse - s3 api list parts response.
type ListPartsResponse struct {
type ListPartsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
Bucket string
@@ -50,7 +52,7 @@ type ListPartsResponse struct {
}
// ListMultipartUploadsResponse - s3 api list multipart uploads response.
type ListMultipartUploadsResponse struct {
type ListMultipartUploadsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
Bucket string
@@ -94,3 +96,41 @@ type Owner struct {
ID string
DisplayName string
}
type Tag struct {
Key string `xml:"Key"`
Value string `xml:"Value"`
}
type TagSet struct {
Tags []Tag `xml:"Tag"`
}
type Tagging struct {
TagSet TagSet `xml:"TagSet"`
}
type DeleteObjects struct {
Objects []types.ObjectIdentifier `xml:"Object"`
}
type DeleteObjectsResult struct {
Deleted []types.DeletedObject
Errors []types.Error
}
type SelectObjectContentPayload struct {
Expression *string
ExpressionType types.ExpressionType
RequestProgress *types.RequestProgress
InputSerialization *types.InputSerialization
OutputSerialization *types.OutputSerialization
ScanRange *types.ScanRange
}
type SelectObjectContentResult struct {
Records *types.RecordsEvent
Stats *types.StatsEvent
Progress *types.ProgressEvent
Cont *string
End *string
}