mirror of
https://github.com/versity/versitygw.git
synced 2026-01-25 20:42:02 +00:00
Compare commits
133 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ef5578baf | ||
|
|
473ff0f4d5 | ||
|
|
08c0118839 | ||
|
|
6ab4090216 | ||
|
|
3360466b5e | ||
|
|
8d2e2a4106 | ||
|
|
d320c953d3 | ||
|
|
ef92f57e7d | ||
|
|
17651fc139 | ||
|
|
7620651a49 | ||
|
|
4c7584c99f | ||
|
|
fc4780020b | ||
|
|
df81ead6bc | ||
|
|
d7148105be | ||
|
|
2bcfa0e01b | ||
|
|
d80580380d | ||
|
|
4d50d970ea | ||
|
|
cb2f6a87aa | ||
|
|
3d129789e0 | ||
|
|
07e0372531 | ||
|
|
49e70f9385 | ||
|
|
53cf4f342f | ||
|
|
a58ce0c238 | ||
|
|
3573a31ae6 | ||
|
|
9dafc0e73b | ||
|
|
d058dcb898 | ||
|
|
07a8efe4d3 | ||
|
|
e1f8cbc346 | ||
|
|
05d6e618b2 | ||
|
|
e8b06a72f9 | ||
|
|
c389e1b28c | ||
|
|
a2439264b2 | ||
|
|
56f452f1a2 | ||
|
|
a05179b14f | ||
|
|
22227c875a | ||
|
|
da99990225 | ||
|
|
cb9a7853f9 | ||
|
|
da3ad55483 | ||
|
|
2cc0c7203c | ||
|
|
a325dd6834 | ||
|
|
7814979efa | ||
|
|
059507deae | ||
|
|
7d8a795e95 | ||
|
|
1d662e93c5 | ||
|
|
cc0316aa99 | ||
|
|
cc28535618 | ||
|
|
bc131d5f99 | ||
|
|
13ce76ba21 | ||
|
|
67fc857cdd | ||
|
|
dde13ddc9a | ||
|
|
34830954c3 | ||
|
|
77a4a9e3a5 | ||
|
|
25b02dc8fa | ||
|
|
009ceee748 | ||
|
|
af69adf080 | ||
|
|
97847735c8 | ||
|
|
ac9aa25ff1 | ||
|
|
091375fa00 | ||
|
|
f1e22b0a4d | ||
|
|
3f8c218431 | ||
|
|
70818de594 | ||
|
|
366ed21ede | ||
|
|
b96da570a7 | ||
|
|
898c3efaa0 | ||
|
|
838a7f9ef9 | ||
|
|
bf33b9f5a2 | ||
|
|
77080328c1 | ||
|
|
b0259ae1de | ||
|
|
884fd029c3 | ||
|
|
36eb6d795f | ||
|
|
7de01cc983 | ||
|
|
7fb2a7f9ba | ||
|
|
5a9b744dd1 | ||
|
|
5b31a7bafc | ||
|
|
ee703479d0 | ||
|
|
bedd353d72 | ||
|
|
84fe647b81 | ||
|
|
1649c5cafd | ||
|
|
4c451a4822 | ||
|
|
287db7a7b6 | ||
|
|
c598ee5416 | ||
|
|
7c08ea44a6 | ||
|
|
e73d661de1 | ||
|
|
2291c22eaa | ||
|
|
51e818b3e3 | ||
|
|
daa4aa1510 | ||
|
|
8765a6c67f | ||
|
|
c5a7b5aae1 | ||
|
|
2ae39c3ee8 | ||
|
|
d0b3139640 | ||
|
|
7bceaaca39 | ||
|
|
6f0f527e5f | ||
|
|
fe547a19e9 | ||
|
|
df7f01f7e2 | ||
|
|
5aeb96f138 | ||
|
|
ef1de682a4 | ||
|
|
87d61a1eb3 | ||
|
|
18899f8029 | ||
|
|
ca28792458 | ||
|
|
8c469cbd69 | ||
|
|
ff4bf23b6b | ||
|
|
38ddbc4712 | ||
|
|
cb193c42b4 | ||
|
|
fbafc6b34c | ||
|
|
d26b8856c1 | ||
|
|
23f738f37f | ||
|
|
a10729b3ff | ||
|
|
0330685c5c | ||
|
|
47dea2db7c | ||
|
|
db484eb900 | ||
|
|
140d41de40 | ||
|
|
39803cb158 | ||
|
|
9c858b0396 | ||
|
|
f63545c9b7 | ||
|
|
2894d4d5f3 | ||
|
|
46097fbf70 | ||
|
|
9db01362a0 | ||
|
|
fbd7bce530 | ||
|
|
7e34078d6a | ||
|
|
3c69c6922a | ||
|
|
08db927634 | ||
|
|
6d99c69953 | ||
|
|
4bfb3d84d3 | ||
|
|
30dbd02a83 | ||
|
|
f8afeec0a0 | ||
|
|
45e3c0922d | ||
|
|
a3f95520a8 | ||
|
|
c45280b7db | ||
|
|
77b0759f86 | ||
|
|
1da0c1ceba | ||
|
|
1d476c6d4d | ||
|
|
c4f5f958eb | ||
|
|
f84cfe58e7 |
27
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
27
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior.
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Server Version**
|
||||
output of
|
||||
```
|
||||
./versitygw -version
|
||||
uname -a
|
||||
```
|
||||
|
||||
**Additional context**
|
||||
Describe s3 client and version if applicable.
|
||||
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
30
.github/workflows/functional.yml
vendored
Normal file
30
.github/workflows/functional.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: functional tests
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: RunTests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
- name: Get Dependencies
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Build and Run
|
||||
run: |
|
||||
make testbin
|
||||
./runtests.sh
|
||||
|
||||
- name: Coverage Report
|
||||
run: |
|
||||
go tool covdata percent -i=/tmp/covdata
|
||||
10
.github/workflows/go.yml
vendored
10
.github/workflows/go.yml
vendored
@@ -7,15 +7,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Verify all files pass gofmt formatting
|
||||
run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then gofmt -s -d .; exit 1; fi
|
||||
|
||||
@@ -24,10 +24,10 @@ jobs:
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Build
|
||||
run: go build -o versitygw cmd/versitygw/*.go
|
||||
run: make
|
||||
|
||||
- name: Test
|
||||
run: go test -v -timeout 30s -tags=github ./...
|
||||
run: go test -coverprofile profile.txt -race -v -timeout 30s -tags=github ./...
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
31
.github/workflows/goreleaser.yml
vendored
Normal file
31
.github/workflows/goreleaser.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
# run only against tags
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
# packages: write
|
||||
# issues: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- run: git fetch --force --tags
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: stable
|
||||
- uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.TOKEN }}
|
||||
31
.github/workflows/static.yml
vendored
31
.github/workflows/static.yml
vendored
@@ -1,23 +1,22 @@
|
||||
name: staticcheck
|
||||
on: pull_request
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
id: go
|
||||
-
|
||||
name: "Set up repo"
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
fetch-depth: 1
|
||||
-
|
||||
name: "staticcheck"
|
||||
uses: dominikh/staticcheck-action@v1.3.0
|
||||
with:
|
||||
install-go: false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 'stable'
|
||||
id: go
|
||||
|
||||
- name: "staticcheck"
|
||||
uses: dominikh/staticcheck-action@v1.3.0
|
||||
with:
|
||||
install-go: false
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -32,3 +32,7 @@ VERSION
|
||||
/versitygw.spec
|
||||
*.tar
|
||||
*.tar.gz
|
||||
**/rand.data
|
||||
/profile.txt
|
||||
|
||||
dist/
|
||||
|
||||
50
.goreleaser.yaml
Normal file
50
.goreleaser.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
- goos:
|
||||
- linux
|
||||
- darwin
|
||||
# windows is untested, we can start doing windows releases
|
||||
# if someone is interested in taking on testing
|
||||
# - windows
|
||||
main: ./cmd/versitygw
|
||||
binary: ./cmd/versitygw
|
||||
id: versitygw
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
ldflags:
|
||||
- -X=main.Build={{.Commit}} -X=main.BuildTime={{.Date}} -X=main.Version={{.Version}}
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
# this name template makes the OS and Arch compatible with the results of uname.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
snapshot:
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
3
Makefile
3
Makefile
@@ -34,6 +34,9 @@ build: $(BIN)
|
||||
$(BIN):
|
||||
$(GOBUILD) $(LDFLAGS) -o $(BIN) cmd/$(BIN)/*.go
|
||||
|
||||
testbin:
|
||||
$(GOBUILD) $(LDFLAGS) -o $(BIN) -cover -race cmd/$(BIN)/*.go
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
$(GOTEST) ./...
|
||||
|
||||
12
README.md
12
README.md
@@ -1,4 +1,4 @@
|
||||
# The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
|
||||
# The Versity S3 Gateway:<br/>A High-Performance S3 Translation Service
|
||||
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/versity/versitygw/blob/assets/assets/logo-white.svg">
|
||||
@@ -8,13 +8,15 @@
|
||||
|
||||
[](https://github.com/versity/versitygw/blob/main/LICENSE)
|
||||
|
||||
The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
|
||||
|
||||
Current status: Alpha, in development not yet suited for production use
|
||||
**Current status:** Beta: Most clients functional, work in progress for more test coverage. Issue reports welcome.
|
||||
|
||||
See project [documentation](https://github.com/versity/versitygw/wiki) on the wiki.
|
||||
|
||||
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and file-based storage systems. The Versity Gateway bridges the gap between S3-reliant applications and file storage systems, enabling enhanced compatibility and integration with file based systems while offering exceptional scalability.
|
||||
* Share filesystem directory via S3 protocol
|
||||
* Simple to deploy S3 server with a single command
|
||||
* Protocol compatibility allows common access to files via posix or S3
|
||||
|
||||
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and storage systems. The Versity Gateway bridges the gap between S3-reliant applications and other storage systems, enabling enhanced compatibility and integration while offering exceptional scalability.
|
||||
|
||||
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
|
||||
|
||||
|
||||
133
auth/acl.go
133
auth/acl.go
@@ -42,7 +42,11 @@ type GetBucketAclOutput struct {
|
||||
}
|
||||
|
||||
type AccessControlList struct {
|
||||
Grants []types.Grant
|
||||
Grants []types.Grant `xml:"Grant"`
|
||||
}
|
||||
type AccessControlPolicy struct {
|
||||
AccessControlList AccessControlList `xml:"AccessControlList"`
|
||||
Owner types.Owner
|
||||
}
|
||||
|
||||
func ParseACL(data []byte) (ACL, error) {
|
||||
@@ -80,72 +84,91 @@ func ParseACLOutput(data []byte) (GetBucketAclOutput, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) error {
|
||||
func UpdateACL(input *s3.PutBucketAclInput, acl ACL, iam IAMService) ([]byte, error) {
|
||||
if input == nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
if acl.Owner != *input.AccessControlPolicy.Owner.ID {
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
return nil, s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
// if the ACL is specified, set the ACL, else replace the grantees
|
||||
if input.ACL != "" {
|
||||
acl.ACL = input.ACL
|
||||
acl.Grantees = []Grantee{}
|
||||
return nil
|
||||
} else {
|
||||
grantees := []Grantee{}
|
||||
accs := []string{}
|
||||
|
||||
if input.GrantRead != nil {
|
||||
fullControlList, readList, readACPList, writeList, writeACPList := []string{}, []string{}, []string{}, []string{}, []string{}
|
||||
|
||||
if *input.GrantFullControl != "" {
|
||||
fullControlList = splitUnique(*input.GrantFullControl, ",")
|
||||
fmt.Println(fullControlList)
|
||||
for _, str := range fullControlList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "FULL_CONTROL"})
|
||||
}
|
||||
}
|
||||
if *input.GrantRead != "" {
|
||||
readList = splitUnique(*input.GrantRead, ",")
|
||||
for _, str := range readList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ"})
|
||||
}
|
||||
}
|
||||
if *input.GrantReadACP != "" {
|
||||
readACPList = splitUnique(*input.GrantReadACP, ",")
|
||||
for _, str := range readACPList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ_ACP"})
|
||||
}
|
||||
}
|
||||
if *input.GrantWrite != "" {
|
||||
writeList = splitUnique(*input.GrantWrite, ",")
|
||||
for _, str := range writeList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE"})
|
||||
}
|
||||
}
|
||||
if *input.GrantWriteACP != "" {
|
||||
writeACPList = splitUnique(*input.GrantWriteACP, ",")
|
||||
for _, str := range writeACPList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE_ACP"})
|
||||
}
|
||||
}
|
||||
|
||||
accs = append(append(append(append(fullControlList, readList...), writeACPList...), readACPList...), writeList...)
|
||||
} else {
|
||||
cache := make(map[string]bool)
|
||||
for _, grt := range input.AccessControlPolicy.Grants {
|
||||
grantees = append(grantees, Grantee{Access: *grt.Grantee.ID, Permission: grt.Permission})
|
||||
if _, ok := cache[*grt.Grantee.ID]; !ok {
|
||||
cache[*grt.Grantee.ID] = true
|
||||
accs = append(accs, *grt.Grantee.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the specified accounts exist
|
||||
accList, err := CheckIfAccountsExist(accs, iam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(accList) > 0 {
|
||||
return nil, fmt.Errorf("accounts does not exist: %s", strings.Join(accList, ", "))
|
||||
}
|
||||
|
||||
acl.Grantees = grantees
|
||||
acl.ACL = ""
|
||||
}
|
||||
|
||||
grantees := []Grantee{}
|
||||
|
||||
fullControlList, readList, readACPList, writeList, writeACPList := []string{}, []string{}, []string{}, []string{}, []string{}
|
||||
|
||||
if *input.GrantFullControl != "" {
|
||||
fullControlList = splitUnique(*input.GrantFullControl, ",")
|
||||
fmt.Println(fullControlList)
|
||||
for _, str := range fullControlList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "FULL_CONTROL"})
|
||||
}
|
||||
}
|
||||
if *input.GrantRead != "" {
|
||||
readList = splitUnique(*input.GrantRead, ",")
|
||||
for _, str := range readList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ"})
|
||||
}
|
||||
}
|
||||
if *input.GrantReadACP != "" {
|
||||
readACPList = splitUnique(*input.GrantReadACP, ",")
|
||||
for _, str := range readACPList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "READ_ACP"})
|
||||
}
|
||||
}
|
||||
if *input.GrantWrite != "" {
|
||||
writeList = splitUnique(*input.GrantWrite, ",")
|
||||
for _, str := range writeList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE"})
|
||||
}
|
||||
}
|
||||
if *input.GrantWriteACP != "" {
|
||||
writeACPList = splitUnique(*input.GrantWriteACP, ",")
|
||||
for _, str := range writeACPList {
|
||||
grantees = append(grantees, Grantee{Access: str, Permission: "WRITE_ACP"})
|
||||
}
|
||||
}
|
||||
|
||||
accs := append(append(append(append(fullControlList, readList...), writeACPList...), readACPList...), writeList...)
|
||||
|
||||
// Check if the specified accounts exist
|
||||
accList, err := checkIfAccountsExist(accs, iam)
|
||||
result, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(accList) > 0 {
|
||||
return fmt.Errorf("accounts does not exist: %s", strings.Join(accList, ", "))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acl.Grantees = grantees
|
||||
acl.ACL = ""
|
||||
|
||||
return nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func checkIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
|
||||
func CheckIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
|
||||
result := []string{}
|
||||
|
||||
for _, acc := range accs {
|
||||
@@ -153,7 +176,7 @@ func checkIfAccountsExist(accs []string, iam IAMService) ([]string, error) {
|
||||
if err != nil && err != ErrNoSuchUser {
|
||||
return nil, fmt.Errorf("check user account: %w", err)
|
||||
}
|
||||
if err == nil {
|
||||
if err == ErrNoSuchUser {
|
||||
result = append(result, acc)
|
||||
}
|
||||
}
|
||||
@@ -238,5 +261,5 @@ func IsAdmin(access string, isRoot bool) error {
|
||||
if acc.Role == "admin" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("only admin users have access to this resource")
|
||||
return s3err.GetAPIError(s3err.ErrAccessDenied)
|
||||
}
|
||||
|
||||
@@ -20,15 +20,19 @@ import (
|
||||
|
||||
// Account is a gateway IAM account
|
||||
type Account struct {
|
||||
Access string `json:"access"`
|
||||
Secret string `json:"secret"`
|
||||
Role string `json:"role"`
|
||||
}
|
||||
|
||||
// IAMService is the interface for all IAM service implementations
|
||||
//
|
||||
//go:generate moq -out ../s3api/controllers/iam_moq_test.go -pkg controllers . IAMService
|
||||
type IAMService interface {
|
||||
CreateAccount(access string, account Account) error
|
||||
GetUserAccount(access string) (Account, error)
|
||||
DeleteUserAccount(access string) error
|
||||
ListUserAccounts() ([]Account, error)
|
||||
}
|
||||
|
||||
var ErrNoSuchUser = errors.New("user not found")
|
||||
|
||||
@@ -76,7 +76,7 @@ func (s *IAMServiceInternal) CreateAccount(access string, account Account) error
|
||||
return nil, fmt.Errorf("failed to parse iam: %w", err)
|
||||
}
|
||||
} else {
|
||||
conf.AccessAccounts = make(map[string]Account)
|
||||
conf = IAMConfig{AccessAccounts: map[string]Account{}}
|
||||
}
|
||||
|
||||
_, ok := conf.AccessAccounts[access]
|
||||
@@ -85,10 +85,11 @@ func (s *IAMServiceInternal) CreateAccount(access string, account Account) error
|
||||
}
|
||||
conf.AccessAccounts[access] = account
|
||||
|
||||
b, err := json.Marshal(s.accts)
|
||||
b, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize iam: %w", err)
|
||||
}
|
||||
s.accts = conf
|
||||
|
||||
return b, nil
|
||||
})
|
||||
@@ -168,11 +169,44 @@ func (s *IAMServiceInternal) DeleteUserAccount(access string) error {
|
||||
|
||||
delete(conf.AccessAccounts, access)
|
||||
|
||||
b, err := json.Marshal(s.accts)
|
||||
b, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize iam: %w", err)
|
||||
}
|
||||
|
||||
s.accts = conf
|
||||
|
||||
return b, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ListUserAccounts lists all the user accounts stored.
|
||||
func (s *IAMServiceInternal) ListUserAccounts() (accs []Account, err error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
data, err := s.storer.GetIAM()
|
||||
if err != nil {
|
||||
return []Account{}, fmt.Errorf("get iam data: %w", err)
|
||||
}
|
||||
|
||||
serial := crc32.ChecksumIEEE(data)
|
||||
if serial != s.serial {
|
||||
s.mu.RUnlock()
|
||||
err := s.updateCache()
|
||||
s.mu.RLock()
|
||||
if err != nil {
|
||||
return []Account{}, fmt.Errorf("refresh iam cache: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for access, usr := range s.accts.AccessAccounts {
|
||||
accs = append(accs, Account{
|
||||
Access: access,
|
||||
Secret: usr.Secret,
|
||||
Role: usr.Role,
|
||||
})
|
||||
}
|
||||
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
@@ -15,53 +15,61 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
//go:generate moq -out backend_moq_test.go . Backend
|
||||
//go:generate moq -out ../s3api/controllers/backend_moq_test.go -pkg controllers . Backend
|
||||
type Backend interface {
|
||||
fmt.Stringer
|
||||
Shutdown()
|
||||
|
||||
ListBuckets() (s3response.ListAllMyBucketsResult, error)
|
||||
HeadBucket(bucket string) (*s3.HeadBucketOutput, error)
|
||||
GetBucketAcl(bucket string) ([]byte, error)
|
||||
PutBucket(bucket, owner string) error
|
||||
PutBucketAcl(bucket string, data []byte) error
|
||||
DeleteBucket(bucket string) error
|
||||
// bucket operations
|
||||
ListBuckets(_ context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error)
|
||||
HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error)
|
||||
CreateBucket(context.Context, *s3.CreateBucketInput) error
|
||||
PutBucketAcl(_ context.Context, bucket string, data []byte) error
|
||||
DeleteBucket(context.Context, *s3.DeleteBucketInput) error
|
||||
|
||||
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error)
|
||||
AbortMultipartUpload(*s3.AbortMultipartUploadInput) error
|
||||
ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
|
||||
CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error)
|
||||
PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error)
|
||||
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
|
||||
// multipart operations
|
||||
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error
|
||||
ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error)
|
||||
ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error)
|
||||
UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error)
|
||||
UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
|
||||
|
||||
PutObject(*s3.PutObjectInput) (string, error)
|
||||
HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
|
||||
GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error)
|
||||
CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
|
||||
DeleteObject(bucket, object string) error
|
||||
DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error
|
||||
PutObjectAcl(*s3.PutObjectAclInput) error
|
||||
RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error
|
||||
// standard object operations
|
||||
PutObject(context.Context, *s3.PutObjectInput) (string, error)
|
||||
HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
|
||||
CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
|
||||
DeleteObject(context.Context, *s3.DeleteObjectInput) error
|
||||
DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error)
|
||||
PutObjectAcl(context.Context, *s3.PutObjectAclInput) error
|
||||
|
||||
GetTags(bucket, object string) (map[string]string, error)
|
||||
SetTags(bucket, object string, tags map[string]string) error
|
||||
RemoveTags(bucket, object string) error
|
||||
// special case object operations
|
||||
RestoreObject(context.Context, *s3.RestoreObjectInput) error
|
||||
SelectObjectContent(context.Context, *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error)
|
||||
|
||||
// object tags operations
|
||||
GetTags(_ context.Context, bucket, object string) (map[string]string, error)
|
||||
SetTags(_ context.Context, bucket, object string, tags map[string]string) error
|
||||
RemoveTags(_ context.Context, bucket, object string) error
|
||||
|
||||
// non AWS actions
|
||||
ChangeBucketOwner(_ context.Context, bucket, newOwner string) error
|
||||
}
|
||||
|
||||
type BackendUnsupported struct{}
|
||||
@@ -75,93 +83,98 @@ func (BackendUnsupported) Shutdown() {}
|
||||
func (BackendUnsupported) String() string {
|
||||
return "Unsupported"
|
||||
}
|
||||
func (BackendUnsupported) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
|
||||
func (BackendUnsupported) ListBuckets(context.Context, string, bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketAcl(bucket string, data []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectAcl(*s3.PutObjectAclInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) {
|
||||
func (BackendUnsupported) HeadBucket(context.Context, *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketAcl(bucket string) ([]byte, error) {
|
||||
func (BackendUnsupported) GetBucketAcl(context.Context, *s3.GetBucketAclInput) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucket(bucket, owner string) error {
|
||||
func (BackendUnsupported) CreateBucket(context.Context, *s3.CreateBucketInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucket(bucket string) error {
|
||||
func (BackendUnsupported) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucket(context.Context, *s3.DeleteBucketInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) CreateMultipartUpload(input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
func (BackendUnsupported) CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (BackendUnsupported) CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) error {
|
||||
func (BackendUnsupported) AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
return s3response.ListMultipartUploadsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) ListMultipartUploads(context.Context, *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
return s3response.ListMultipartUploadsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
return s3response.ListPartsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) ListParts(context.Context, *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
return s3response.ListPartsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error) {
|
||||
func (BackendUnsupported) UploadPart(context.Context, *s3.UploadPartInput) (etag string, err error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) UploadPartCopy(context.Context, *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) PutObject(*s3.PutObjectInput) (string, error) {
|
||||
func (BackendUnsupported) PutObject(context.Context, *s3.PutObjectInput) (string, error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObject(bucket, object string) error {
|
||||
func (BackendUnsupported) HeadObject(context.Context, *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObject(context.Context, *s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAcl(context.Context, *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAttributes(context.Context, *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyObject(context.Context, *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjects(context.Context, *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectsV2(context.Context, *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObject(context.Context, *s3.DeleteObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
|
||||
func (BackendUnsupported) DeleteObjects(context.Context, *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
return s3response.DeleteObjectsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectAcl(context.Context, *s3.PutObjectAclInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetTags(bucket, object string) (map[string]string, error) {
|
||||
func (BackendUnsupported) RestoreObject(context.Context, *s3.RestoreObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) SelectObjectContent(context.Context, *s3.SelectObjectContentInput) (s3response.SelectObjectContentResult, error) {
|
||||
return s3response.SelectObjectContentResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetTags(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) SetTags(bucket, object string, tags map[string]string) error {
|
||||
func (BackendUnsupported) SetTags(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) RemoveTags(bucket, object string) error {
|
||||
func (BackendUnsupported) RemoveTags(_ context.Context, bucket, object string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) ChangeBucketOwner(_ context.Context, bucket, newOwner string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,222 +0,0 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
func TestBackend_ListBuckets(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
}
|
||||
type test struct {
|
||||
name string
|
||||
c Backend
|
||||
args args
|
||||
wantErr bool
|
||||
}
|
||||
var tests []test
|
||||
tests = append(tests, test{
|
||||
name: "list-Bucket",
|
||||
c: &BackendMock{
|
||||
ListBucketsFunc: func() (s3response.ListAllMyBucketsResult, error) {
|
||||
return s3response.ListAllMyBucketsResult{
|
||||
Buckets: s3response.ListAllMyBucketsList{
|
||||
Bucket: []s3response.ListAllMyBucketsEntry{
|
||||
{
|
||||
Name: "t1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, s3err.GetAPIError(0)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
},
|
||||
wantErr: false,
|
||||
}, test{
|
||||
name: "list-Bucket-error",
|
||||
c: &BackendMock{
|
||||
ListBucketsFunc: func() (s3response.ListAllMyBucketsResult, error) {
|
||||
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
},
|
||||
wantErr: true,
|
||||
})
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if _, err := tt.c.ListBuckets(); (err.(s3err.APIError).Code != "") != tt.wantErr {
|
||||
t.Errorf("Backend.ListBuckets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestBackend_HeadBucket(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
BucketName string
|
||||
}
|
||||
type test struct {
|
||||
name string
|
||||
c Backend
|
||||
args args
|
||||
wantErr bool
|
||||
}
|
||||
var tests []test
|
||||
tests = append(tests, test{
|
||||
name: "head-buckets-error",
|
||||
c: &BackendMock{
|
||||
HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
BucketName: "b1",
|
||||
},
|
||||
wantErr: true,
|
||||
})
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if _, err := tt.c.HeadBucket(tt.args.BucketName); (err.(s3err.APIError).Code != "") != tt.wantErr {
|
||||
t.Errorf("Backend.HeadBucket() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestBackend_GetBucketAcl(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
bucketName string
|
||||
}
|
||||
type test struct {
|
||||
name string
|
||||
c Backend
|
||||
args args
|
||||
wantErr bool
|
||||
}
|
||||
var tests []test
|
||||
tests = append(tests, test{
|
||||
name: "get bucket acl error",
|
||||
c: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
bucketName: "b1",
|
||||
},
|
||||
wantErr: true,
|
||||
})
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if _, err := tt.c.GetBucketAcl(tt.args.bucketName); (err.(s3err.APIError).Code != "") != tt.wantErr {
|
||||
t.Errorf("Backend.GetBucketAcl() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestBackend_PutBucket(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
bucketName string
|
||||
bucketOwner string
|
||||
}
|
||||
type test struct {
|
||||
name string
|
||||
c Backend
|
||||
args args
|
||||
wantErr bool
|
||||
}
|
||||
var tests []test
|
||||
tests = append(tests, test{
|
||||
name: "put bucket ",
|
||||
c: &BackendMock{
|
||||
PutBucketFunc: func(bucket, owner string) error {
|
||||
return s3err.GetAPIError(0)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
bucketName: "b1",
|
||||
bucketOwner: "owner",
|
||||
},
|
||||
wantErr: false,
|
||||
}, test{
|
||||
name: "put bucket error",
|
||||
c: &BackendMock{
|
||||
PutBucketFunc: func(bucket, owner string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
bucketName: "b2",
|
||||
bucketOwner: "owner",
|
||||
},
|
||||
wantErr: true,
|
||||
})
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := tt.c.PutBucket(tt.args.bucketName, tt.args.bucketOwner); (err.(s3err.APIError).Code != "") != tt.wantErr {
|
||||
t.Errorf("Backend.PutBucket() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestBackend_DeleteBucket(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
bucketName string
|
||||
}
|
||||
type test struct {
|
||||
name string
|
||||
c Backend
|
||||
args args
|
||||
wantErr bool
|
||||
}
|
||||
var tests []test
|
||||
tests = append(tests, test{
|
||||
name: "Delete Bucket Error",
|
||||
c: &BackendMock{
|
||||
DeleteBucketFunc: func(bucket string) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
bucketName: "b1",
|
||||
},
|
||||
wantErr: true,
|
||||
})
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := tt.c.DeleteBucket(tt.args.bucketName); (err.(s3err.APIError).Code != "") != tt.wantErr {
|
||||
t.Errorf("Backend.DeleteBucket() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,6 @@ package backend
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"strconv"
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
@@ -55,6 +55,12 @@ func GetTimePtr(t time.Time) *time.Time {
|
||||
return &t
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidRange = s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
)
|
||||
|
||||
// ParseRange parses input range header and returns startoffset, length, and
|
||||
// error. If no endoffset specified, then length is set to -1.
|
||||
func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, file.Size(), nil
|
||||
@@ -63,32 +69,37 @@ func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
|
||||
if len(rangeKv) < 2 {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) < 2 {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
if len(bRange) < 1 || len(bRange) > 2 {
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
|
||||
endOffset := int64(-1)
|
||||
if len(bRange) == 1 || bRange[1] == "" {
|
||||
return startOffset, endOffset, nil
|
||||
}
|
||||
|
||||
endOffset, err = strconv.ParseInt(bRange[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
if endOffset < startOffset {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
return 0, 0, errInvalidRange
|
||||
}
|
||||
|
||||
return int64(startOffset), int64(endOffset - startOffset + 1), nil
|
||||
return startOffset, endOffset - startOffset + 1, nil
|
||||
}
|
||||
|
||||
func GetMultipartMD5(parts []types.Part) string {
|
||||
func GetMultipartMD5(parts []types.CompletedPart) string {
|
||||
var partsEtagBytes []byte
|
||||
for _, part := range parts {
|
||||
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package posix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
@@ -65,6 +66,7 @@ const (
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
onameAttr = "user.objname"
|
||||
tagHdr = "X-Amz-Tagging"
|
||||
metaHdr = "X-Amz-Meta"
|
||||
contentTypeHdr = "content-type"
|
||||
contentEncHdr = "content-encoding"
|
||||
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
@@ -96,7 +98,7 @@ func (p *Posix) String() string {
|
||||
return "Posix Gateway"
|
||||
}
|
||||
|
||||
func (p *Posix) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
|
||||
func (p *Posix) ListBuckets(_ context.Context, owner string, isAdmin bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
entries, err := os.ReadDir(".")
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{},
|
||||
@@ -116,10 +118,32 @@ func (p *Posix) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
|
||||
Name: entry.Name(),
|
||||
CreationDate: fi.ModTime(),
|
||||
})
|
||||
// return all the buckets for admin users
|
||||
if isAdmin {
|
||||
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
|
||||
Name: entry.Name(),
|
||||
CreationDate: fi.ModTime(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
aclTag, err := xattr.Get(entry.Name(), aclkey)
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{}, fmt.Errorf("get acl tag: %w", err)
|
||||
}
|
||||
|
||||
var acl auth.ACL
|
||||
err = json.Unmarshal(aclTag, &acl)
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{}, fmt.Errorf("parse acl tag: %w", err)
|
||||
}
|
||||
|
||||
if acl.Owner == owner {
|
||||
buckets = append(buckets, s3response.ListAllMyBucketsEntry{
|
||||
Name: entry.Name(),
|
||||
CreationDate: fi.ModTime(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(backend.ByBucketName(buckets))
|
||||
@@ -128,11 +152,14 @@ func (p *Posix) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
|
||||
Buckets: s3response.ListAllMyBucketsList{
|
||||
Bucket: buckets,
|
||||
},
|
||||
Owner: s3response.CanonicalUser{
|
||||
ID: owner,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
_, err := os.Lstat(bucket)
|
||||
func (p *Posix) HeadBucket(_ context.Context, input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
_, err := os.Lstat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
@@ -143,7 +170,10 @@ func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
return &s3.HeadBucketOutput{}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucket(bucket string, owner string) error {
|
||||
func (p *Posix) CreateBucket(_ context.Context, input *s3.CreateBucketInput) error {
|
||||
bucket := *input.Bucket
|
||||
owner := string(input.ObjectOwnership)
|
||||
|
||||
err := os.Mkdir(bucket, 0777)
|
||||
if err != nil && os.IsExist(err) {
|
||||
return s3err.GetAPIError(s3err.ErrBucketAlreadyExists)
|
||||
@@ -165,8 +195,8 @@ func (p *Posix) PutBucket(bucket string, owner string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucket(bucket string) error {
|
||||
names, err := os.ReadDir(bucket)
|
||||
func (p *Posix) DeleteBucket(_ context.Context, input *s3.DeleteBucketInput) error {
|
||||
names, err := os.ReadDir(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
@@ -177,13 +207,13 @@ func (p *Posix) DeleteBucket(bucket string) error {
|
||||
if len(names) == 1 && names[0].Name() == metaTmpDir {
|
||||
// if .sgwtmp is only item in directory
|
||||
// then clean this up before trying to remove the bucket
|
||||
err = os.RemoveAll(filepath.Join(bucket, metaTmpDir))
|
||||
err = os.RemoveAll(filepath.Join(*input.Bucket, metaTmpDir))
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove temp dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = os.Remove(bucket)
|
||||
err = os.Remove(*input.Bucket)
|
||||
if err != nil && err.(*os.PathError).Err == syscall.ENOTEMPTY {
|
||||
return s3err.GetAPIError(s3err.ErrBucketNotEmpty)
|
||||
}
|
||||
@@ -194,7 +224,7 @@ func (p *Posix) DeleteBucket(bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
func (p *Posix) CreateMultipartUpload(_ context.Context, mpu *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
bucket := *mpu.Bucket
|
||||
object := *mpu.Key
|
||||
|
||||
@@ -245,7 +275,12 @@ func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.C
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (p *Posix) CompleteMultipartUpload(_ context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
parts := input.MultipartUpload.Parts
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -286,7 +321,9 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
parts[i].ETag = &etag
|
||||
if etag != *parts[i].ETag {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, totalsize)
|
||||
@@ -376,19 +413,21 @@ func loadUserMetaData(path string, m map[string]string) (contentType, contentEnc
|
||||
if err != nil || len(ents) == 0 {
|
||||
return
|
||||
}
|
||||
fmt.Println(ents)
|
||||
for _, e := range ents {
|
||||
if !isValidMeta(e) {
|
||||
continue
|
||||
}
|
||||
b, err := xattr.Get(path, e)
|
||||
if err == syscall.ENODATA {
|
||||
m[strings.TrimPrefix(e, "user.")] = ""
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("user.%v.", metaHdr))] = ""
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m[strings.TrimPrefix(e, "user.")] = string(b)
|
||||
fmt.Println(b)
|
||||
m[strings.TrimPrefix(e, fmt.Sprintf("user.%v.", metaHdr))] = string(b)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(path, "user."+contentTypeHdr)
|
||||
@@ -413,7 +452,7 @@ func loadUserMetaData(path string, m map[string]string) (contentType, contentEnc
|
||||
}
|
||||
|
||||
func isValidMeta(val string) bool {
|
||||
if strings.HasPrefix(val, "user.X-Amz-Meta") {
|
||||
if strings.HasPrefix(val, "user."+metaHdr) {
|
||||
return true
|
||||
}
|
||||
if strings.EqualFold(val, "user.Expires") {
|
||||
@@ -467,7 +506,7 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
|
||||
func (p *Posix) AbortMultipartUpload(_ context.Context, mpu *s3.AbortMultipartUploadInput) error {
|
||||
bucket := *mpu.Bucket
|
||||
object := *mpu.Key
|
||||
uploadID := *mpu.UploadId
|
||||
@@ -497,7 +536,7 @@ func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) {
|
||||
bucket := *mpu.Bucket
|
||||
var delimiter string
|
||||
if mpu.Delimiter != nil {
|
||||
@@ -508,7 +547,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
|
||||
prefix = *mpu.Prefix
|
||||
}
|
||||
|
||||
var lmu s3response.ListMultipartUploadsResponse
|
||||
var lmu s3response.ListMultipartUploadsResult
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -522,6 +561,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
|
||||
objs, _ := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir))
|
||||
|
||||
var uploads []s3response.Upload
|
||||
var resultUpds []s3response.Upload
|
||||
|
||||
var keyMarker string
|
||||
if mpu.KeyMarker != nil {
|
||||
@@ -531,12 +571,9 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
|
||||
if mpu.UploadIdMarker != nil {
|
||||
uploadIDMarker = *mpu.UploadIdMarker
|
||||
}
|
||||
var pastMarker bool
|
||||
if keyMarker == "" && uploadIDMarker == "" {
|
||||
pastMarker = true
|
||||
}
|
||||
keyMarkerInd, uploadIdMarkerFound := -1, false
|
||||
|
||||
for i, obj := range objs {
|
||||
for _, obj := range objs {
|
||||
if !obj.IsDir() {
|
||||
continue
|
||||
}
|
||||
@@ -555,22 +592,14 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
|
||||
continue
|
||||
}
|
||||
|
||||
for j, upid := range upids {
|
||||
for _, upid := range upids {
|
||||
if !upid.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if objectName == keyMarker || upid.Name() == uploadIDMarker {
|
||||
pastMarker = true
|
||||
continue
|
||||
}
|
||||
if keyMarker != "" && uploadIDMarker != "" && !pastMarker {
|
||||
continue
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
upiddir := filepath.Join(bucket, metaTmpMultipartDir, obj.Name(), upid.Name())
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
// userMetaData := make(map[string]string)
|
||||
// upiddir := filepath.Join(bucket, metaTmpMultipartDir, obj.Name(), upid.Name())
|
||||
// loadUserMetaData(upiddir, userMetaData)
|
||||
|
||||
fi, err := upid.Info()
|
||||
if err != nil {
|
||||
@@ -578,41 +607,90 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
|
||||
}
|
||||
|
||||
uploadID := upid.Name()
|
||||
if !uploadIdMarkerFound && uploadIDMarker == uploadID {
|
||||
uploadIdMarkerFound = true
|
||||
}
|
||||
if keyMarkerInd == -1 && objectName == keyMarker {
|
||||
keyMarkerInd = len(uploads)
|
||||
}
|
||||
uploads = append(uploads, s3response.Upload{
|
||||
Key: objectName,
|
||||
UploadID: uploadID,
|
||||
Initiated: fi.ModTime().Format(backend.RFC3339TimeFormat),
|
||||
})
|
||||
if len(uploads) == int(mpu.MaxUploads) {
|
||||
return s3response.ListMultipartUploadsResponse{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
IsTruncated: i != len(objs) || j != len(upids),
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
NextKeyMarker: objectName,
|
||||
NextUploadIDMarker: uploadID,
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: uploads,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s3response.ListMultipartUploadsResponse{
|
||||
if (uploadIDMarker != "" && !uploadIdMarkerFound) || (keyMarker != "" && keyMarkerInd == -1) {
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: []s3response.Upload{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
sort.SliceStable(uploads, func(i, j int) bool {
|
||||
return uploads[i].Key < uploads[j].Key
|
||||
})
|
||||
|
||||
for i := keyMarkerInd + 1; i < len(uploads); i++ {
|
||||
if mpu.MaxUploads == 0 {
|
||||
break
|
||||
}
|
||||
if keyMarker != "" && uploadIDMarker != "" && uploads[i].UploadID < uploadIDMarker {
|
||||
continue
|
||||
}
|
||||
if i != len(uploads)-1 && len(resultUpds) == int(mpu.MaxUploads) {
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
NextKeyMarker: resultUpds[i-1].Key,
|
||||
NextUploadIDMarker: resultUpds[i-1].UploadID,
|
||||
IsTruncated: true,
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: resultUpds,
|
||||
}, nil
|
||||
}
|
||||
|
||||
resultUpds = append(resultUpds, uploads[i])
|
||||
}
|
||||
|
||||
return s3response.ListMultipartUploadsResult{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: uploads,
|
||||
Uploads: resultUpds,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
var lpr s3response.ListPartsResponse
|
||||
func (p *Posix) ListParts(_ context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
stringMarker := *input.PartNumberMarker
|
||||
maxParts := int(input.MaxParts)
|
||||
|
||||
var lpr s3response.ListPartsResult
|
||||
|
||||
var partNumberMarker int
|
||||
if stringMarker != "" {
|
||||
var err error
|
||||
partNumberMarker, err = strconv.Atoi(stringMarker)
|
||||
if err != nil {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -681,7 +759,7 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
|
||||
upiddir := filepath.Join(objdir, uploadID)
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
|
||||
return s3response.ListPartsResponse{
|
||||
return s3response.ListPartsResult{
|
||||
Bucket: bucket,
|
||||
IsTruncated: oldLen != newLen,
|
||||
Key: object,
|
||||
@@ -693,11 +771,14 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO: copy part
|
||||
// func (p *Posix) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
|
||||
// }
|
||||
func (p *Posix) UploadPart(_ context.Context, input *s3.UploadPartInput) (string, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
part := input.PartNumber
|
||||
length := input.ContentLength
|
||||
r := input.Body
|
||||
|
||||
func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -708,6 +789,15 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
|
||||
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
_, err = os.Stat(filepath.Join(bucket, objdir, uploadID))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("stat uploadid: %w", err)
|
||||
}
|
||||
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", part))
|
||||
|
||||
f, err := openTmpFile(filepath.Join(bucket, objdir),
|
||||
@@ -715,7 +805,6 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
defer f.cleanup()
|
||||
|
||||
hash := md5.New()
|
||||
tr := io.TeeReader(r, hash)
|
||||
@@ -729,14 +818,123 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
|
||||
return "", fmt.Errorf("link object in namespace: %w", err)
|
||||
}
|
||||
|
||||
f.cleanup()
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum)
|
||||
xattr.Set(partPath, etagkey, []byte(etag))
|
||||
xattr.Set(filepath.Join(bucket, partPath), etagkey, []byte(etag))
|
||||
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
|
||||
func (p *Posix) UploadPartCopy(_ context.Context, upi *s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
_, err := os.Stat(*upi.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
sum := sha256.Sum256([]byte(*upi.Key))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
_, err = os.Stat(filepath.Join(*upi.Bucket, objdir, *upi.UploadId))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("stat uploadid: %w", err)
|
||||
}
|
||||
|
||||
partPath := filepath.Join(objdir, *upi.UploadId, fmt.Sprintf("%v", upi.PartNumber))
|
||||
|
||||
substrs := strings.SplitN(*upi.CopySource, "/", 2)
|
||||
if len(substrs) != 2 {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
|
||||
srcBucket := substrs[0]
|
||||
srcObject := substrs[1]
|
||||
|
||||
_, err = os.Stat(srcBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(srcBucket, srcObject)
|
||||
fi, err := os.Stat(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
startOffset, length, err := backend.ParseRange(fi, *upi.CopySourceRange)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, err
|
||||
}
|
||||
|
||||
if length == -1 {
|
||||
length = fi.Size() - startOffset + 1
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size()+1 {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
f, err := openTmpFile(filepath.Join(*upi.Bucket, objdir),
|
||||
*upi.Bucket, partPath, length)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
defer f.cleanup()
|
||||
|
||||
srcf, err := os.Open(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("open object: %w", err)
|
||||
}
|
||||
defer srcf.Close()
|
||||
|
||||
rdr := io.NewSectionReader(srcf, startOffset, length)
|
||||
hash := md5.New()
|
||||
tr := io.TeeReader(rdr, hash)
|
||||
|
||||
_, err = io.Copy(f, tr)
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("copy part data: %w", err)
|
||||
}
|
||||
|
||||
err = f.link()
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("link object in namespace: %w", err)
|
||||
}
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum)
|
||||
xattr.Set(filepath.Join(*upi.Bucket, partPath), etagkey, []byte(etag))
|
||||
|
||||
fi, err = os.Stat(filepath.Join(*upi.Bucket, partPath))
|
||||
if err != nil {
|
||||
return s3response.CopyObjectResult{}, fmt.Errorf("stat part path: %w", err)
|
||||
}
|
||||
|
||||
return s3response.CopyObjectResult{
|
||||
ETag: etag,
|
||||
LastModified: fi.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutObject(ctx context.Context, po *s3.PutObjectInput) (string, error) {
|
||||
tagsStr := getString(po.Tagging)
|
||||
tags := make(map[string]string)
|
||||
_, err := os.Stat(*po.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -745,6 +943,17 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
|
||||
return "", fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if tagsStr != "" {
|
||||
tagParts := strings.Split(tagsStr, "&")
|
||||
for _, prt := range tagParts {
|
||||
p := strings.Split(prt, "=")
|
||||
if len(p) != 2 {
|
||||
return "", s3err.GetAPIError(s3err.ErrInvalidTag)
|
||||
}
|
||||
tags[p[0]] = p[1]
|
||||
}
|
||||
}
|
||||
|
||||
name := filepath.Join(*po.Bucket, *po.Key)
|
||||
|
||||
if strings.HasSuffix(*po.Key, "/") {
|
||||
@@ -755,7 +964,7 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
|
||||
}
|
||||
|
||||
for k, v := range po.Metadata {
|
||||
xattr.Set(name, "user."+k, []byte(v))
|
||||
xattr.Set(name, fmt.Sprintf("user.%v.%v", metaHdr, k), []byte(v))
|
||||
}
|
||||
|
||||
// set etag attribute to signify this dir was specifically put
|
||||
@@ -788,11 +997,18 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
|
||||
|
||||
err = f.link()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("link object in namespace: %w", err)
|
||||
return "", s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
|
||||
}
|
||||
|
||||
for k, v := range po.Metadata {
|
||||
xattr.Set(name, "user."+k, []byte(v))
|
||||
xattr.Set(name, fmt.Sprintf("user.%v.%v", metaHdr, k), []byte(v))
|
||||
}
|
||||
|
||||
if tagsStr != "" {
|
||||
err := p.SetTags(ctx, *po.Bucket, *po.Key, tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
@@ -802,7 +1018,10 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteObject(bucket, object string) error {
|
||||
func (p *Posix) DeleteObject(_ context.Context, input *s3.DeleteObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -811,7 +1030,7 @@ func (p *Posix) DeleteObject(bucket, object string) error {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
os.Remove(filepath.Join(bucket, object))
|
||||
err = os.Remove(filepath.Join(bucket, object))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -856,19 +1075,47 @@ func (p *Posix) removeParents(bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
|
||||
func (p *Posix) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput) (s3response.DeleteObjectsResult, error) {
|
||||
// delete object already checks bucket
|
||||
for _, obj := range objects.Delete.Objects {
|
||||
err := p.DeleteObject(bucket, *obj.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
delResult, errs := []types.DeletedObject{}, []types.Error{}
|
||||
for _, obj := range input.Delete.Objects {
|
||||
//TODO: Make the delete operation concurrent
|
||||
err := p.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: obj.Key,
|
||||
})
|
||||
if err == nil {
|
||||
delResult = append(delResult, types.DeletedObject{Key: obj.Key})
|
||||
} else {
|
||||
serr, ok := err.(s3err.APIError)
|
||||
if ok {
|
||||
errs = append(errs, types.Error{
|
||||
Key: obj.Key,
|
||||
Code: &serr.Code,
|
||||
Message: &serr.Description,
|
||||
})
|
||||
} else {
|
||||
errs = append(errs, types.Error{
|
||||
Key: obj.Key,
|
||||
Code: getStringPtr("InternalError"),
|
||||
Message: getStringPtr(err.Error()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return s3response.DeleteObjectsResult{
|
||||
Deleted: delResult,
|
||||
Error: errs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (p *Posix) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
acceptRange := *input.Range
|
||||
var contentRange string
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -891,9 +1138,16 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size() {
|
||||
// TODO: is ErrInvalidRequest correct here?
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
if length == -1 {
|
||||
length = fi.Size() - startOffset + 1
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size()+1 {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRange)
|
||||
}
|
||||
|
||||
if acceptRange != "" {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v", startOffset, startOffset+length-1, fi.Size())
|
||||
}
|
||||
|
||||
f, err := os.Open(objPath)
|
||||
@@ -914,6 +1168,7 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
fmt.Println(userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, etagkey)
|
||||
etag := string(b)
|
||||
@@ -935,10 +1190,14 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: int32(len(tags)),
|
||||
ContentRange: &contentRange,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
func (p *Posix) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -975,7 +1234,14 @@ func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
func (p *Posix) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
srcBucket, srcObject, ok := strings.Cut(*input.CopySource, "/")
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
dstBucket := *input.Bucket
|
||||
dstObject := *input.Key
|
||||
|
||||
_, err := os.Stat(srcBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -984,7 +1250,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(DstBucket)
|
||||
_, err = os.Stat(dstBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
@@ -1002,12 +1268,17 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
etag, err := p.PutObject(&s3.PutObjectInput{Bucket: &DstBucket, Key: &dstObject, Body: f})
|
||||
fInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
etag, err := p.PutObject(ctx, &s3.PutObjectInput{Bucket: &dstBucket, Key: &dstObject, Body: f, ContentLength: fInfo.Size()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Join(DstBucket, dstObject))
|
||||
fi, err := os.Stat(filepath.Join(dstBucket, dstObject))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat dst object: %w", err)
|
||||
}
|
||||
@@ -1020,7 +1291,13 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
func (p *Posix) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.Marker
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1042,7 +1319,7 @@ func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (
|
||||
Delimiter: &delim,
|
||||
IsTruncated: results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: int32(maxkeys),
|
||||
MaxKeys: maxkeys,
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
@@ -1110,7 +1387,13 @@ func fileToObj(bucket string) backend.GetObjFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
func (p *Posix) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.ContinuationToken
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1120,7 +1403,7 @@ func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int)
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -1136,10 +1419,11 @@ func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int)
|
||||
Name: &bucket,
|
||||
NextContinuationToken: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
KeyCount: int32(len(results.Objects)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucketAcl(bucket string, data []byte) error {
|
||||
func (p *Posix) PutBucketAcl(_ context.Context, bucket string, data []byte) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1155,8 +1439,8 @@ func (p *Posix) PutBucketAcl(bucket string, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
func (p *Posix) GetBucketAcl(_ context.Context, input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
_, err := os.Stat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
@@ -1164,7 +1448,7 @@ func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(bucket, aclkey)
|
||||
b, err := xattr.Get(*input.Bucket, aclkey)
|
||||
if isNoAttr(err) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
@@ -1174,7 +1458,7 @@ func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetTags(bucket, object string) (map[string]string, error) {
|
||||
func (p *Posix) GetTags(_ context.Context, bucket, object string) (map[string]string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1207,7 +1491,7 @@ func (p *Posix) getXattrTags(bucket, object string) (map[string]string, error) {
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (p *Posix) SetTags(bucket, object string, tags map[string]string) error {
|
||||
func (p *Posix) SetTags(_ context.Context, bucket, object string, tags map[string]string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1243,8 +1527,43 @@ func (p *Posix) SetTags(bucket, object string, tags map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) RemoveTags(bucket, object string) error {
|
||||
return p.SetTags(bucket, object, nil)
|
||||
func (p *Posix) RemoveTags(ctx context.Context, bucket, object string) error {
|
||||
return p.SetTags(ctx, bucket, object, nil)
|
||||
}
|
||||
|
||||
func (p *Posix) ChangeBucketOwner(ctx context.Context, bucket, newOwner string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
aclTag, err := xattr.Get(bucket, aclkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get acl: %w", err)
|
||||
}
|
||||
|
||||
var acl auth.ACL
|
||||
err = json.Unmarshal(aclTag, &acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshal acl: %w", err)
|
||||
}
|
||||
|
||||
acl.Owner = newOwner
|
||||
|
||||
newAcl, err := json.Marshal(acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal acl: %w", err)
|
||||
}
|
||||
|
||||
err = xattr.Set(bucket, aclkey, newAcl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set acl: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -1257,7 +1576,7 @@ func (p *Posix) InitIAM() error {
|
||||
|
||||
_, err := os.ReadFile(iamFile)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
b, err := json.Marshal(auth.IAMConfig{})
|
||||
b, err := json.Marshal(auth.IAMConfig{AccessAccounts: map[string]auth.Account{}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal default iam: %w", err)
|
||||
}
|
||||
@@ -1452,3 +1771,14 @@ func isNoAttr(err error) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getString(str *string) string {
|
||||
if str == nil {
|
||||
return ""
|
||||
}
|
||||
return *str
|
||||
}
|
||||
|
||||
func getStringPtr(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -114,7 +115,12 @@ func (*ScoutFS) String() string {
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (s *ScoutFS) CompleteMultipartUpload(_ context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
parts := input.MultipartUpload.Parts
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -160,7 +166,9 @@ func (s *ScoutFS) CompleteMultipartUpload(bucket, object, uploadID string, parts
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
parts[i].ETag = &etag
|
||||
if etag != *parts[i].ETag {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
}
|
||||
|
||||
// use totalsize=0 because we wont be writing to the file, only moving
|
||||
@@ -347,7 +355,10 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
func (s *ScoutFS) HeadObject(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -417,7 +428,11 @@ func (s *ScoutFS) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (s *ScoutFS) GetObject(_ context.Context, input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
acceptRange := *input.Range
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -440,8 +455,11 @@ func (s *ScoutFS) GetObject(bucket, object, acceptRange string, writer io.Writer
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if length == -1 {
|
||||
length = fi.Size() - startOffset + 1
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size() {
|
||||
// TODO: is ErrInvalidRequest correct here?
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
@@ -524,7 +542,13 @@ func (s *ScoutFS) getXattrTags(bucket, object string) (map[string]string, error)
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
func (s *ScoutFS) ListObjects(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.Marker
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -546,14 +570,20 @@ func (s *ScoutFS) ListObjects(bucket, prefix, marker, delim string, maxkeys int)
|
||||
Delimiter: &delim,
|
||||
IsTruncated: results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: int32(maxkeys),
|
||||
MaxKeys: maxkeys,
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
func (s *ScoutFS) ListObjectsV2(_ context.Context, input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.ContinuationToken
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -563,7 +593,7 @@ func (s *ScoutFS) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys in
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
s.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -660,7 +690,10 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error {
|
||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
|
||||
@@ -38,7 +38,7 @@ var ErrSkipObj = errors.New("skip this object")
|
||||
|
||||
// Walk walks the supplied fs.FS and returns results compatible with list
|
||||
// objects responses
|
||||
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
|
||||
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
|
||||
cpmap := make(map[string]struct{})
|
||||
var objects []types.Object
|
||||
|
||||
@@ -47,7 +47,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
pastMarker = true
|
||||
}
|
||||
|
||||
var pastMax bool
|
||||
pastMax := max == 0
|
||||
var newMarker string
|
||||
var truncated bool
|
||||
|
||||
@@ -55,6 +55,13 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Ignore the root directory
|
||||
if path == "." {
|
||||
return nil
|
||||
}
|
||||
if contains(d.Name(), skipdirs) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
if pastMax {
|
||||
newMarker = path
|
||||
@@ -63,15 +70,6 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
// Ignore the root directory
|
||||
if path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
if contains(d.Name(), skipdirs) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
// If prefix is defined and the directory does not match prefix,
|
||||
// do not descend into the directory because nothing will
|
||||
// match this prefix. Make sure to append the / at the end of
|
||||
@@ -129,7 +127,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
|
||||
if max > 0 && (len(objects)+len(cpmap)) == max {
|
||||
if max > 0 && (len(objects)+len(cpmap)) == int(max) {
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
@@ -168,7 +166,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
return fmt.Errorf("file to object %q: %w", path, err)
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
if (len(objects) + len(cpmap)) == max {
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
pastMax = true
|
||||
}
|
||||
return nil
|
||||
@@ -178,7 +176,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
// These are abstractly a "directory", so need to include the
|
||||
// delimiter at the end.
|
||||
cpmap[prefix+before+delimiter] = struct{}{}
|
||||
if (len(objects) + len(cpmap)) == max {
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
|
||||
@@ -17,30 +17,31 @@ package main
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
)
|
||||
|
||||
var (
|
||||
adminAccess string
|
||||
adminSecret string
|
||||
adminRegion string
|
||||
adminAccess string
|
||||
adminSecret string
|
||||
adminEndpoint string
|
||||
)
|
||||
|
||||
func adminCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "admin",
|
||||
Usage: "admin CLI tool",
|
||||
Description: `admin CLI tool for interacting with admin api.
|
||||
Here is the available api list:
|
||||
create-user
|
||||
`,
|
||||
Name: "admin",
|
||||
Usage: "admin CLI tool",
|
||||
Description: `Admin CLI tool for interacting with admin APIs.`,
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "create-user",
|
||||
@@ -49,13 +50,13 @@ func adminCommand() *cli.Command {
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "access value for the new user",
|
||||
Usage: "access key id for the new user",
|
||||
Required: true,
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "secret",
|
||||
Usage: "secret value for the new user",
|
||||
Usage: "secret access key for the new user",
|
||||
Required: true,
|
||||
Aliases: []string{"s"},
|
||||
},
|
||||
@@ -65,12 +66,6 @@ func adminCommand() *cli.Command {
|
||||
Required: true,
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "region",
|
||||
Usage: "s3 region string for the user",
|
||||
Value: "us-east-1",
|
||||
Aliases: []string{"rg"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -80,50 +75,77 @@ func adminCommand() *cli.Command {
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "access value for the user to be deleted",
|
||||
Usage: "access key id of the user to be deleted",
|
||||
Required: true,
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "list-users",
|
||||
Usage: "List all the gateway users",
|
||||
Action: listUsers,
|
||||
},
|
||||
{
|
||||
Name: "change-bucket-owner",
|
||||
Usage: "Changes the bucket owner",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "bucket",
|
||||
Usage: "the bucket name to change the owner",
|
||||
Required: true,
|
||||
Aliases: []string{"b"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "owner",
|
||||
Usage: "the user access key id, who should be the bucket owner",
|
||||
Required: true,
|
||||
Aliases: []string{"o"},
|
||||
},
|
||||
},
|
||||
Action: changeBucketOwner,
|
||||
},
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
// TODO: create a configuration file for this
|
||||
&cli.StringFlag{
|
||||
Name: "adminAccess",
|
||||
Usage: "admin access account",
|
||||
Name: "access",
|
||||
Usage: "admin access key id",
|
||||
EnvVars: []string{"ADMIN_ACCESS_KEY_ID", "ADMIN_ACCESS_KEY"},
|
||||
Aliases: []string{"aa"},
|
||||
Aliases: []string{"a"},
|
||||
Required: true,
|
||||
Destination: &adminAccess,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "adminSecret",
|
||||
Name: "secret",
|
||||
Usage: "admin secret access key",
|
||||
EnvVars: []string{"ADMIN_SECRET_ACCESS_KEY", "ADMIN_SECRET_KEY"},
|
||||
Aliases: []string{"as"},
|
||||
Aliases: []string{"s"},
|
||||
Required: true,
|
||||
Destination: &adminSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "adminRegion",
|
||||
Usage: "s3 region string",
|
||||
Value: "us-east-1",
|
||||
Destination: &adminRegion,
|
||||
Aliases: []string{"ar"},
|
||||
Name: "endpoint-url",
|
||||
Usage: "admin apis endpoint url",
|
||||
EnvVars: []string{"ADMIN_ENDPOINT_URL"},
|
||||
Aliases: []string{"er"},
|
||||
Required: true,
|
||||
Destination: &adminEndpoint,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
access, secret, role, region := ctx.String("access"), ctx.String("secret"), ctx.String("role"), ctx.String("region")
|
||||
if access == "" || secret == "" || region == "" {
|
||||
access, secret, role := ctx.String("access"), ctx.String("secret"), ctx.String("role")
|
||||
if access == "" || secret == "" {
|
||||
return fmt.Errorf("invalid input parameters for the new user")
|
||||
}
|
||||
if role != "admin" && role != "user" {
|
||||
return fmt.Errorf("invalid input parameter for role")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://localhost:7070/create-user?access=%v&secret=%v&role=%v®ion=%v", access, secret, role, region), nil)
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/create-user?access=%v&secret=%v&role=%v", adminEndpoint, access, secret, role), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
@@ -135,7 +157,7 @@ func createUser(ctx *cli.Context) error {
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
@@ -151,8 +173,9 @@ func createUser(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
fmt.Printf("%s", body)
|
||||
fmt.Printf("%s\n", body)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -163,7 +186,7 @@ func deleteUser(ctx *cli.Context) error {
|
||||
return fmt.Errorf("invalid input parameter for the new user")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("http://localhost:7070/delete-user?access=%v", access), nil)
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/delete-user?access=%v", adminEndpoint, access), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
@@ -175,7 +198,7 @@ func deleteUser(ctx *cli.Context) error {
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
@@ -191,8 +214,108 @@ func deleteUser(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
fmt.Printf("%s", body)
|
||||
fmt.Printf("%s\n", body)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func listUsers(ctx *cli.Context) error {
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/list-users", adminEndpoint), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var accs []auth.Account
|
||||
if err := json.Unmarshal(body, &accs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printAcctTable(accs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// account table formatting
|
||||
minwidth int = 2 // minimal cell width including any padding
|
||||
tabwidth int = 0 // width of tab characters (equivalent number of spaces)
|
||||
padding int = 2 // padding added to a cell before computing its width
|
||||
padchar byte = ' ' // ASCII char used for padding
|
||||
flags uint = 0 // formatting control flags
|
||||
)
|
||||
|
||||
func printAcctTable(accs []auth.Account) {
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(os.Stdout, minwidth, tabwidth, padding, padchar, flags)
|
||||
fmt.Fprintln(w, "Account\tRole")
|
||||
fmt.Fprintln(w, "-------\t----")
|
||||
for _, acc := range accs {
|
||||
fmt.Fprintf(w, "%v\t%v\n", acc.Access, acc.Role)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func changeBucketOwner(ctx *cli.Context) error {
|
||||
bucket, owner := ctx.String("bucket"), ctx.String("owner")
|
||||
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%v/change-bucket-owner/?bucket=%v&owner=%v", adminEndpoint, bucket, owner), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", region, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
fmt.Println(string(body))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
@@ -26,15 +27,21 @@ import (
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
var (
|
||||
port string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
certFile, keyFile string
|
||||
debug bool
|
||||
port string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
certFile, keyFile string
|
||||
kafkaURL, kafkaTopic, kafkaKey string
|
||||
natsURL, natsTopic string
|
||||
logWebhookURL string
|
||||
accessLog string
|
||||
debug bool
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -47,6 +54,8 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
setupSignalHandler()
|
||||
|
||||
app := initApp()
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
@@ -56,7 +65,14 @@ func main() {
|
||||
testCommand(),
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
<-sigDone
|
||||
fmt.Fprintf(os.Stderr, "terminating signal caught, shutting down\n")
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if err := app.RunContext(ctx, os.Args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -131,10 +147,52 @@ func initFlags() []cli.Flag {
|
||||
Usage: "enable debug output",
|
||||
Destination: &debug,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access-log",
|
||||
Usage: "enable server access logging to specified file",
|
||||
EnvVars: []string{"LOGFILE"},
|
||||
Destination: &accessLog,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "log-webhook-url",
|
||||
Usage: "webhook url to send the audit logs",
|
||||
EnvVars: []string{"WEBHOOK"},
|
||||
Destination: &logWebhookURL,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "event-kafka-url",
|
||||
Usage: "kafka server url to send the bucket notifications.",
|
||||
Destination: &kafkaURL,
|
||||
Aliases: []string{"eku"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "event-kafka-topic",
|
||||
Usage: "kafka server pub-sub topic to send the bucket notifications to",
|
||||
Destination: &kafkaTopic,
|
||||
Aliases: []string{"ekt"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "event-kafka-key",
|
||||
Usage: "kafka server put-sub topic key to send the bucket notifications to",
|
||||
Destination: &kafkaKey,
|
||||
Aliases: []string{"ekk"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "event-nats-url",
|
||||
Usage: "nats server url to send the bucket notifications",
|
||||
Destination: &natsURL,
|
||||
Aliases: []string{"enu"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "event-nats-topic",
|
||||
Usage: "nats server pub-sub topic to send the bucket notifications to",
|
||||
Destination: &natsTopic,
|
||||
Aliases: []string{"ent"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runGateway(be backend.Backend, s auth.Storer) error {
|
||||
func runGateway(ctx *cli.Context, be backend.Backend, s auth.Storer) error {
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
@@ -172,13 +230,62 @@ func runGateway(be backend.Backend, s auth.Storer) error {
|
||||
return fmt.Errorf("setup internal iam service: %w", err)
|
||||
}
|
||||
|
||||
logger, err := s3log.InitLogger(&s3log.LogConfig{
|
||||
LogFile: accessLog,
|
||||
WebhookURL: logWebhookURL,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("setup logger: %w", err)
|
||||
}
|
||||
|
||||
evSender, err := s3event.InitEventSender(&s3event.EventConfig{
|
||||
KafkaURL: kafkaURL,
|
||||
KafkaTopic: kafkaTopic,
|
||||
KafkaTopicKey: kafkaKey,
|
||||
NatsURL: natsURL,
|
||||
NatsTopic: natsTopic,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to connect to the message broker: %w", err)
|
||||
}
|
||||
|
||||
srv, err := s3api.New(app, be, middlewares.RootUserConfig{
|
||||
Access: rootUserAccess,
|
||||
Secret: rootUserSecret,
|
||||
}, port, region, iam, opts...)
|
||||
}, port, region, iam, logger, evSender, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init gateway: %v", err)
|
||||
}
|
||||
|
||||
return srv.Serve()
|
||||
c := make(chan error, 1)
|
||||
go func() { c <- srv.Serve() }()
|
||||
|
||||
// for/select blocks until shutdown
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
break Loop
|
||||
case err = <-c:
|
||||
break Loop
|
||||
case <-sigHup:
|
||||
if logger != nil {
|
||||
err = logger.HangUp()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("HUP logger: %w", err)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
be.Shutdown()
|
||||
if logger != nil {
|
||||
lerr := logger.Shutdown()
|
||||
if lerr != nil {
|
||||
fmt.Fprintf(os.Stderr, "shutdown logger: %v\n", lerr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -49,5 +49,5 @@ func runPosix(ctx *cli.Context) error {
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(be, be)
|
||||
return runGateway(ctx, be, be)
|
||||
}
|
||||
|
||||
@@ -69,5 +69,5 @@ func runScoutfs(ctx *cli.Context) error {
|
||||
return fmt.Errorf("init scoutfs: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(be, be)
|
||||
return runGateway(ctx, be, be)
|
||||
}
|
||||
|
||||
44
cmd/versitygw/signal.go
Normal file
44
cmd/versitygw/signal.go
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
sigDone = make(chan bool, 1)
|
||||
sigHup = make(chan bool, 1)
|
||||
)
|
||||
|
||||
func setupSignalHandler() {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
|
||||
go func() {
|
||||
for sig := range sigs {
|
||||
fmt.Fprintf(os.Stderr, "caught signal %v\n", sig)
|
||||
switch sig {
|
||||
case syscall.SIGINT, syscall.SIGTERM:
|
||||
sigDone <- true
|
||||
case syscall.SIGHUP:
|
||||
sigHup <- true
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -8,9 +8,19 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
awsID string
|
||||
awsSecret string
|
||||
endpoint string
|
||||
awsID string
|
||||
awsSecret string
|
||||
endpoint string
|
||||
prefix string
|
||||
dstBucket string
|
||||
partSize int64
|
||||
objSize int64
|
||||
concurrency int
|
||||
files int
|
||||
upload bool
|
||||
download bool
|
||||
pathStyle bool
|
||||
checksumDisable bool
|
||||
)
|
||||
|
||||
func testCommand() *cli.Command {
|
||||
@@ -57,102 +67,112 @@ func initTestFlags() []cli.Flag {
|
||||
|
||||
func initTestCommands() []*cli.Command {
|
||||
return []*cli.Command{
|
||||
{
|
||||
Name: "make-bucket",
|
||||
Usage: "Test bucket creation.",
|
||||
Description: `Calls s3 gateway create-bucket action to create a new bucket,
|
||||
then calls delete-bucket action to delete the bucket.`,
|
||||
Action: getAction(integration.TestMakeBucket),
|
||||
},
|
||||
{
|
||||
Name: "put-get-object",
|
||||
Usage: "Test put & get object.",
|
||||
Description: `Creates a bucket with s3 gateway action, puts an object in it,
|
||||
gets the object from the bucket, deletes both the object and bucket.`,
|
||||
Action: getAction(integration.TestPutGetObject),
|
||||
},
|
||||
{
|
||||
Name: "put-get-mp-object",
|
||||
Usage: "Test put & get multipart object.",
|
||||
Description: `Creates a bucket with s3 gateway action, puts an object in it with multipart upload,
|
||||
gets the object from the bucket, deletes both the object and bucket.`,
|
||||
Action: getAction(integration.TestPutGetMPObject),
|
||||
},
|
||||
{
|
||||
Name: "put-dir-object",
|
||||
Usage: "Test put directory object.",
|
||||
Description: `Creates a bucket with s3 gateway action, puts a directory object in it,
|
||||
lists the bucket's objects, deletes both the objects and bucket.`,
|
||||
Action: getAction(integration.TestPutDirObject),
|
||||
},
|
||||
{
|
||||
Name: "list-objects",
|
||||
Usage: "Test list-objects action.",
|
||||
Description: `Creates a bucket with s3 gateway action, puts 2 directory objects in it,
|
||||
lists the bucket's objects, deletes both the objects and bucket.`,
|
||||
Action: getAction(integration.TestListObject),
|
||||
},
|
||||
{
|
||||
Name: "abort-mp",
|
||||
Usage: "Tests abort-multipart-upload action.",
|
||||
Description: `Creates a bucket with s3 gateway action, creates a multipart upload,
|
||||
lists the multipart upload, aborts the multipart upload, lists the multipart upload again,
|
||||
deletes both the objects and bucket.`,
|
||||
Action: getAction(integration.TestListAbortMultiPartObject),
|
||||
},
|
||||
{
|
||||
Name: "list-parts",
|
||||
Usage: "Tests list-parts action.",
|
||||
Description: `Creates a bucket with s3 gateway action, creates a multipart upload,
|
||||
lists the upload parts, deletes both the objects and bucket.`,
|
||||
Action: getAction(integration.TestListMultiParts),
|
||||
},
|
||||
{
|
||||
Name: "incorrect-mp",
|
||||
Usage: "Tests incorrect multipart case.",
|
||||
Description: `Creates a bucket with s3 gateway action, creates a multipart upload,
|
||||
uploads different parts, completes the multipart upload with incorrect part numbers,
|
||||
calls the head-object action, compares the content length, removes both the object and bucket`,
|
||||
Action: getAction(integration.TestIncorrectMultiParts),
|
||||
},
|
||||
{
|
||||
Name: "incomplete-mp",
|
||||
Usage: "Tests incomplete multi parts.",
|
||||
Description: `Creates a bucket with s3 gateway action, creates a multipart upload,
|
||||
upload a part, lists the parts, checks if the uploaded part is in the list,
|
||||
removes both the object and the bucket`,
|
||||
Action: getAction(integration.TestIncompleteMultiParts),
|
||||
},
|
||||
{
|
||||
Name: "incomplete-put-object",
|
||||
Usage: "Tests incomplete put objects case.",
|
||||
Description: `Creates a bucket with s3 gateway action, puts an object in it,
|
||||
gets the object with head-object action, expects the object to be got,
|
||||
removes both the object and bucket`,
|
||||
Action: getAction(integration.TestIncompletePutObject),
|
||||
},
|
||||
{
|
||||
Name: "get-range",
|
||||
Usage: "Tests get object by range.",
|
||||
Description: `Creates a bucket with s3 gateway action, puts an object in it,
|
||||
gets the object by specifying the object range, compares the range with the original one,
|
||||
removes both the object and the bucket`,
|
||||
Action: getAction(integration.TestRangeGet),
|
||||
},
|
||||
{
|
||||
Name: "invalid-mp",
|
||||
Usage: "Tests invalid multi part case.",
|
||||
Description: `Creates a bucket with s3 gateway action, creates a multi part upload,
|
||||
uploads an invalid part, gets the object with head-object action, expects to get error,
|
||||
removes both the object and bucket`,
|
||||
Action: getAction(integration.TestInvalidMultiParts),
|
||||
},
|
||||
{
|
||||
Name: "full-flow",
|
||||
Usage: "Tests the full flow of gateway.",
|
||||
Description: `Runs all the available tests to test the full flow of the gateway.`,
|
||||
Action: getAction(integration.TestFullFlow),
|
||||
},
|
||||
{
|
||||
Name: "bench",
|
||||
Usage: "Runs download/upload performance test on the gateway",
|
||||
Description: `Uploads/downloads some number(specified by flags) of files with some capacity(bytes).
|
||||
Logs the results to the console`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "files",
|
||||
Usage: "Number of objects to read/write",
|
||||
Value: 1,
|
||||
Destination: &files,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "objsize",
|
||||
Usage: "Uploading object size",
|
||||
Value: 0,
|
||||
Destination: &objSize,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "prefix",
|
||||
Usage: "Object name prefix",
|
||||
Destination: &prefix,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "upload",
|
||||
Usage: "Upload data to the gateway",
|
||||
Value: false,
|
||||
Destination: &upload,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "download",
|
||||
Usage: "Download data to the gateway",
|
||||
Value: false,
|
||||
Destination: &download,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "bucket",
|
||||
Usage: "Destination bucket name to read/write data",
|
||||
Destination: &dstBucket,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "partSize",
|
||||
Usage: "Upload/download size per thread",
|
||||
Value: 64 * 1024 * 1024,
|
||||
Destination: &partSize,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "concurrency",
|
||||
Usage: "Upload/download threads per object",
|
||||
Value: 1,
|
||||
Destination: &concurrency,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "pathStyle",
|
||||
Usage: "Use Pathstyle bucket addressing",
|
||||
Value: false,
|
||||
Destination: &pathStyle,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "checksumDis",
|
||||
Usage: "Disable server checksum",
|
||||
Value: false,
|
||||
Destination: &checksumDisable,
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
if upload && download {
|
||||
return fmt.Errorf("must only specify one of upload or download")
|
||||
}
|
||||
if !upload && !download {
|
||||
return fmt.Errorf("must specify one of upload or download")
|
||||
}
|
||||
|
||||
if dstBucket == "" {
|
||||
return fmt.Errorf("must specify bucket")
|
||||
}
|
||||
|
||||
opts := []integration.Option{
|
||||
integration.WithAccess(awsID),
|
||||
integration.WithSecret(awsSecret),
|
||||
integration.WithRegion(region),
|
||||
integration.WithEndpoint(endpoint),
|
||||
integration.WithConcurrency(concurrency),
|
||||
integration.WithPartSize(partSize),
|
||||
}
|
||||
if debug {
|
||||
opts = append(opts, integration.WithDebug())
|
||||
}
|
||||
if pathStyle {
|
||||
opts = append(opts, integration.WithPathStyle())
|
||||
}
|
||||
if checksumDisable {
|
||||
opts = append(opts, integration.WithDisableChecksum())
|
||||
}
|
||||
|
||||
s3conf := integration.NewS3Conf(opts...)
|
||||
|
||||
return integration.TestPerformance(s3conf, upload, download, files, objSize, dstBucket, prefix)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,6 +195,9 @@ func getAction(tf testFunc) func(*cli.Context) error {
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("RAN:", integration.RunCount, "PASS:", integration.PassCount, "FAIL:", integration.FailCount)
|
||||
if integration.FailCount > 0 {
|
||||
return fmt.Errorf("test failed with %v errors", integration.FailCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
64
go.mod
64
go.mod
@@ -3,51 +3,57 @@ module github.com/versity/versitygw
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.18.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0
|
||||
github.com/aws/smithy-go v1.13.5
|
||||
github.com/gofiber/fiber/v2 v2.46.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1
|
||||
github.com/aws/smithy-go v1.14.0
|
||||
github.com/gofiber/fiber/v2 v2.48.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/nats-io/nats.go v1.28.0
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/urfave/cli/v2 v2.25.6
|
||||
github.com/valyala/fasthttp v1.47.0
|
||||
github.com/segmentio/kafka-go v0.4.42
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
github.com/valyala/fasthttp v1.48.0
|
||||
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9
|
||||
golang.org/x/sys v0.9.0
|
||||
golang.org/x/sys v0.10.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.9.20 // indirect
|
||||
github.com/nats-io/nkeys v0.4.4 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.18 // indirect
|
||||
github.com/stretchr/testify v1.8.1 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.27
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.26
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.32
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.31
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/klauspost/compress v1.16.6 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee // indirect
|
||||
github.com/tinylib/msgp v1.1.8 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
|
||||
187
go.sum
187
go.sum
@@ -1,144 +1,167 @@
|
||||
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
|
||||
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.27 h1:Az9uLwmssTE6OGTpsFqOnaGpLnKDqNYOJzWuC6UAYzA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.27/go.mod h1:0My+YgmkGxeqjXZb5BYme5pc4drjTnM+x1GJ3zv42Nw=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.26 h1:qmU+yhKmOCyujmuPY7tf5MxR/RKyZrOPO3V4DobiTUk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.26/go.mod h1:GoXt2YC8jHUBbA4jr+W3JiemnIbkXOfxSXcisUsZ3os=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 h1:LxK/bitrAr4lnh9LnIS6i7zWbCOdMsfzKFBI6LUCS0I=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4/go.mod h1:E1hLXN/BL2e6YizK1zFlYd8vsfi2GTjbjBazinMmeaM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70 h1:4bh28MeeXoBFTjb0JjQ5sVatzlf5xA1DziV8mZed9v4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70/go.mod h1:9yI5NXzqy2yOiMytv6QLZHvlyHLwYxO9iIq+bZIbrFg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 h1:A5UqQEmPaCFpedKouS4v+dHCTUo2sKqhoKO9U5kxyWo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 h1:srIVS45eQuewqz6fKKu6ZGXaq6FuFg5NzgQBAM6g8Y4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 h1:LWA+3kDM8ly001vJ1X1waCuLJdtTl48gwkPKWy9sosI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35/go.mod h1:0Eg1YjxE0Bhn56lx+SHJwCzhW+2JGtizsrx+lCqrfm0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 h1:wscW+pnn3J1OYnanMnza5ZVYXLX4cKk5rAvUAl4Qu+c=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26/go.mod h1:MtYiox5gvyB+OyP0Mr0Sm/yzbEAIPL9eijj/ouHAPw0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 h1:zZSLP3v3riMOP14H7b4XP0uyfREDQOYv2cqIrvTXDNQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29/go.mod h1:z7EjRjVwZ6pWcWdI2H64dKttvzaP99jRIj5hphW0M5U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 h1:dBL3StFxHtpBzJJ/mNEsjXVgfO+7jR0dAIEwLqMapEA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3/go.mod h1:f1QyiAsvIv4B49DmCqrhlXqyaR+0IxMmyX+1P+AnzOM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.34.1 h1:rYYwwsGqbwvGgQHjBkqgDt8MynXk+I8xgS0IEj5gOT0=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.34.1/go.mod h1:aVbf0sko/TsLWHx30c/uVu7c62+0EAJ3vbxaJga0xCw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0 h1:ya7fmrN2fE7s1P2gaPbNg5MTkERVWfsH8ToP1YC4Z9o=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0/go.mod h1:aVbf0sko/TsLWHx30c/uVu7c62+0EAJ3vbxaJga0xCw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 h1:nneMBM2p79PGWBQovYO/6Xnc2ryRMw3InnDJq1FHkSY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12/go.mod h1:HuCOxYsF21eKrerARYO6HapNeh9GBNq7fius2AcwodY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 h1:2qTR7IFk7/0IN/adSFhYu9Xthr0zVFTgBrmPldILn80=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12/go.mod h1:E4VrHCPzmVB/KFXtqBGKb3c8zpbNBgKe3fisDNLAW5w=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 h1:XFJ2Z6sNUUcAz9poj+245DMkrHE4h2j5I9/xD50RHfE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2/go.mod h1:dp0yLPsLBOi++WTxzCjA/oZqi6NPIhoR+uF7GeMU9eg=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.0 h1:INUDpYLt4oiPOJl0XwZDK2OVAVf0Rzo+MGVTv9f+gy8=
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.0/go.mod h1:uWOr0m0jDsiWw8nnXiqZ+YG6LdvAlGYDLLf2NmHZoy4=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11 h1:/MS8AzqYNAhhRNalOmxUvYs8VEbNGifTnzhPFdcRQkQ=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.11/go.mod h1:va22++AdXht4ccO3kH2SHkHHYvZ2G9Utz+CXKmm2CaU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.32 h1:tqEOvkbTxwEV7hToRcJ1xZRjcATqwDVsWbAscgRKyNI=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.32/go.mod h1:U3ZF0fQRRA4gnbn9GGvOWLoT2EzzZfAWeKwnVrm1rDc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.31 h1:vJyON3lG7R8VOErpJJBclBADiWTwzcwdkQpTKx8D2sk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.31/go.mod h1:T4sESjBtY2lNxLgkIASmeP57b5j7hTQqCbqG0tWnxC4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7 h1:X3H6+SU21x+76LRglk21dFRgMTJMa5QcpW+SqUf5BBg=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.7/go.mod h1:3we0V09SwcJBzNlnyovrR2wWJhWmVdqAsmVs4uronv8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76 h1:DJ1kHj0GI9BbX+XhF0kHxlzOVjcncmDUXmCvXdbfdAE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.76/go.mod h1:/AZCdswMSgwpB2yMSFfY5H4pVeBLnCuPehdmO/r3xSM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37 h1:zr/gxAZkMcvP71ZhQOcvdm8ReLjFgIXnIn0fw5AM7mo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.37/go.mod h1:Pdn4j43v49Kk6+82spO3Tu5gSeQXRsxo56ePPQAvFiA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31 h1:0HCMIkAkVY9KMgueD8tf4bRTUanzEYvhw7KkPXIMpO0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.31/go.mod h1:fTJDMe8LOFYtqiFFFeHA+SVMAwqLhoq0kcInYoLa9Js=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38 h1:+i1DOFrW3YZ3apE45tCal9+aDKK6kNEbW6Ib7e1nFxE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.38/go.mod h1:1/jLp0OgOaWIetycOmycW+vYTYgTZFPttJQRgsI1PoU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0 h1:U5yySdwt2HPo/pnQec04DImLzWORbeWML1fJiLkKruI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.0/go.mod h1:EhC/83j8/hL/UB1WmExo3gkElaja/KlmZM/gl1rTfjM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12 h1:uAiiHnWihGP2rVp64fHwzLDrswGjEjsPszwRYMiYQPU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.12/go.mod h1:fUTHpOXqRQpXvEpDPSa3zxCc2fnpW6YnBoba+eQr+Bg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32 h1:kvN1jPHr9UffqqG3bSgZ8tx4+1zKVHz/Ktw/BwW6hX8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.32/go.mod h1:QmMEM7es84EUkbYWcpnkx8i5EW2uERPfrTFeOch128Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31 h1:auGDJ0aLZahF5SPvkJ6WcUuX7iQ7kyl2MamV7Tm8QBk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.31/go.mod h1:3+lloe3sZuBQw1aBc5MyndvodzQlyqCZ7x1QPDHaWP4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0 h1:Wgjft9X4W5pMeuqgPCHIQtbZ87wsgom7S5F8obreg+c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.0/go.mod h1:FWNzS4+zcWAP05IF7TDYTY1ysZAzIvogxWaDT9p8fsA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1 h1:mTgFVlfQT8gikc5+/HwD8UL9jnUro5MGv8n/VEYF12I=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.1/go.mod h1:6SOWLiobcZZshbmECRTADIRYliPL0etqFSigauQEeT0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1 h1:DSNpSbfEgFXRV+IfEcKE5kTbqxm+MeF5WgyeRlsLnHY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.1/go.mod h1:TC9BubuFMVScIU+TLKamO6VZiYTkYoEHqlSQwAe2omw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1 h1:hd0SKLMdOL/Sl6Z0np1PX9LeH2gqNtBe0MhTedA8MGI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.1/go.mod h1:XO/VcyoQ8nKyKfFW/3DMsRQXsfh/052tHTWmg3xBXRg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1 h1:pAOJj+80tC8sPVgSDHzMYD6KLWsaLQ1kZw31PTeORbs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.1/go.mod h1:G8SbvL0rFk4WOJroU8tKBczhsbhj2p/YY7qeJezJ3CI=
|
||||
github.com/aws/smithy-go v1.14.0 h1:+X90sB94fizKjDmwb4vyl2cTTPXTE5E2G/1mjByb0io=
|
||||
github.com/aws/smithy-go v1.14.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/gofiber/fiber/v2 v2.46.0 h1:wkkWotblsGVlLjXj2dpgKQAYHtXumsK/HyFugQM68Ns=
|
||||
github.com/gofiber/fiber/v2 v2.46.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/gofiber/fiber/v2 v2.48.0 h1:cRVMCb9aUJDsyHxGFLwz/sGzDggdailZZyptU9F9cU0=
|
||||
github.com/gofiber/fiber/v2 v2.48.0/go.mod h1:xqJgfqrc23FJuqGOW6DVgi3HyZEm2Mn9pRqUb2kHSX8=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
|
||||
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
|
||||
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/nats-io/jwt/v2 v2.4.1 h1:Y35W1dgbbz2SQUYDPCaclXcuqleVmpbRa7646Jf2EX4=
|
||||
github.com/nats-io/nats-server/v2 v2.9.20 h1:bt1dW6xsL1hWWwv7Hovm+EJt5L6iplyqlgEFkoEUk0k=
|
||||
github.com/nats-io/nats-server/v2 v2.9.20/go.mod h1:aTb/xtLCGKhfTFLxP591CMWfkdgBmcUUSkiSOe5A3gw=
|
||||
github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c=
|
||||
github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc=
|
||||
github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA=
|
||||
github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
|
||||
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8=
|
||||
github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4=
|
||||
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee h1:8Iv5m6xEo1NR1AvpV+7XmhI4r39LGNzwUL4YpMuL5vk=
|
||||
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee/go.mod h1:qwtSXrKuJh/zsFQ12yEE89xfCrGKK63Rr7ctU/uCo4g=
|
||||
github.com/segmentio/kafka-go v0.4.42 h1:qffhBZCz4WcWyNuHEclHjIMLs2slp6mZO8px+5W5tfU=
|
||||
github.com/segmentio/kafka-go v0.4.42/go.mod h1:d0g15xPMqoUookug0OU75DhGZxXwCFxSLeJ4uphwJzg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
|
||||
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
|
||||
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
|
||||
github.com/urfave/cli/v2 v2.25.6 h1:yuSkgDSZfH3L1CjF2/5fNNg2KbM47pY2EvjBq4ESQnU=
|
||||
github.com/urfave/cli/v2 v2.25.6/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.47.0 h1:y7moDoxYzMooFpT5aHgNgVOQDrS3qlkfiP9mDtGGK9c=
|
||||
github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
|
||||
github.com/valyala/fasthttp v1.48.0 h1:oJWvHb9BIZToTQS3MuQ2R3bJZiNSa2KiNdeI8A+79Tc=
|
||||
github.com/valyala/fasthttp v1.48.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9 h1:ZfmQR01Kk6/kQh6+zlqfBYszVY02fzf9xYrchOY4NFM=
|
||||
github.com/versity/scoutfs-go v0.0.0-20230606232754-0474b14343b9/go.mod h1:gJsq73k+4685y+rbDIpPY8i/5GbsiwP6JFoFyUDB1fQ=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
205
integration/action-tests.go
Normal file
205
integration/action-tests.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package integration
|
||||
|
||||
func TestAuthentication(s *S3Conf) {
|
||||
Authentication_empty_auth_header(s)
|
||||
Authentication_invalid_auth_header(s)
|
||||
Authentication_unsupported_signature_version(s)
|
||||
Authentication_malformed_credentials(s)
|
||||
Authentication_malformed_credentials_invalid_parts(s)
|
||||
Authentication_credentials_terminated_string(s)
|
||||
Authentication_credentials_incorrect_service(s)
|
||||
Authentication_credentials_incorrect_region(s)
|
||||
Authentication_credentials_invalid_date(s)
|
||||
Authentication_credentials_future_date(s)
|
||||
Authentication_credentials_past_date(s)
|
||||
Authentication_credentials_non_existing_access_key(s)
|
||||
Authentication_invalid_signed_headers(s)
|
||||
Authentication_missing_date_header(s)
|
||||
Authentication_invalid_date_header(s)
|
||||
Authentication_date_mismatch(s)
|
||||
Authentication_incorrect_payload_hash(s)
|
||||
Authentication_incorrect_md5(s)
|
||||
Authentication_signature_error_incorrect_secret_key(s)
|
||||
}
|
||||
|
||||
func TestCreateBucket(s *S3Conf) {
|
||||
CreateBucket_invalid_bucket_name(s)
|
||||
CreateBucket_existing_bucket(s)
|
||||
CreateDeleteBucket_success(s)
|
||||
}
|
||||
|
||||
func TestHeadBucket(s *S3Conf) {
|
||||
HeadBucket_non_existing_bucket(s)
|
||||
HeadBucket_success(s)
|
||||
}
|
||||
|
||||
func TestListBuckets(s *S3Conf) {
|
||||
ListBuckets_as_user(s)
|
||||
ListBuckets_as_admin(s)
|
||||
ListBuckets_success(s)
|
||||
}
|
||||
|
||||
func TestDeleteBucket(s *S3Conf) {
|
||||
DeleteBucket_non_existing_bucket(s)
|
||||
DeleteBucket_non_empty_bucket(s)
|
||||
}
|
||||
|
||||
func TestPutObject(s *S3Conf) {
|
||||
PutObject_non_existing_bucket(s)
|
||||
PutObject_special_chars(s)
|
||||
PutObject_existing_dir_obj(s)
|
||||
PutObject_obj_parent_is_file(s)
|
||||
PutObject_success(s)
|
||||
}
|
||||
|
||||
func TestHeadObject(s *S3Conf) {
|
||||
HeadObject_non_existing_object(s)
|
||||
HeadObject_success(s)
|
||||
}
|
||||
|
||||
func TestGetObject(s *S3Conf) {
|
||||
GetObject_non_existing_key(s)
|
||||
GetObject_invalid_ranges(s)
|
||||
GetObject_with_meta(s)
|
||||
GetObject_success(s)
|
||||
GetObject_by_range_success(s)
|
||||
}
|
||||
|
||||
func TestListObjects(s *S3Conf) {
|
||||
ListObjects_non_existing_bucket(s)
|
||||
ListObjects_with_prefix(s)
|
||||
ListObject_truncated(s)
|
||||
ListObjects_invalid_max_keys(s)
|
||||
ListObjects_max_keys_0(s)
|
||||
}
|
||||
|
||||
func TestDeleteObject(s *S3Conf) {
|
||||
DeleteObject_non_existing_object(s)
|
||||
DeleteObject_success(s)
|
||||
}
|
||||
|
||||
func TestDeleteObjects(s *S3Conf) {
|
||||
DeleteObjects_empty_input(s)
|
||||
DeleteObjects_non_existing_objects(s)
|
||||
DeleteObjects_success(s)
|
||||
}
|
||||
|
||||
func TestCopyObject(s *S3Conf) {
|
||||
CopyObject_non_existing_dst_bucket(s)
|
||||
CopyObject_success(s)
|
||||
}
|
||||
|
||||
func TestPutObjectTagging(s *S3Conf) {
|
||||
PutObjectTagging_non_existing_object(s)
|
||||
PutObjectTagging_success(s)
|
||||
}
|
||||
|
||||
func TestGetObjectTagging(s *S3Conf) {
|
||||
GetObjectTagging_non_existing_object(s)
|
||||
GetObjectTagging_success(s)
|
||||
}
|
||||
|
||||
func TestDeleteObjectTagging(s *S3Conf) {
|
||||
DeleteObjectTagging_non_existing_object(s)
|
||||
DeleteObjectTagging_success(s)
|
||||
}
|
||||
|
||||
func TestCreateMultipartUpload(s *S3Conf) {
|
||||
CreateMultipartUpload_non_existing_bucket(s)
|
||||
CreateMultipartUpload_success(s)
|
||||
}
|
||||
|
||||
func TestUploadPart(s *S3Conf) {
|
||||
UploadPart_non_existing_bucket(s)
|
||||
UploadPart_invalid_part_number(s)
|
||||
UploadPart_non_existing_key(s)
|
||||
UploadPart_non_existing_mp_upload(s)
|
||||
UploadPart_success(s)
|
||||
}
|
||||
|
||||
func TestUploadPartCopy(s *S3Conf) {
|
||||
UploadPartCopy_non_existing_bucket(s)
|
||||
UploadPartCopy_incorrect_uploadId(s)
|
||||
UploadPartCopy_incorrect_object_key(s)
|
||||
UploadPartCopy_invalid_part_number(s)
|
||||
UploadPartCopy_invalid_copy_source(s)
|
||||
UploadPartCopy_non_existing_source_bucket(s)
|
||||
UploadPartCopy_non_existing_source_object_key(s)
|
||||
UploadPartCopy_success(s)
|
||||
UploadPartCopy_by_range_invalid_range(s)
|
||||
UploadPartCopy_by_range_success(s)
|
||||
}
|
||||
|
||||
func TestListParts(s *S3Conf) {
|
||||
ListParts_incorrect_uploadId(s)
|
||||
ListParts_incorrect_object_key(s)
|
||||
ListParts_success(s)
|
||||
}
|
||||
|
||||
func TestListMultipartUploads(s *S3Conf) {
|
||||
ListMultipartUploads_non_existing_bucket(s)
|
||||
ListMultipartUploads_empty_result(s)
|
||||
ListMultipartUploads_invalid_max_uploads(s)
|
||||
ListMultipartUploads_max_uploads(s)
|
||||
ListMultipartUploads_incorrect_next_key_marker(s)
|
||||
ListMultipartUploads_ignore_upload_id_marker(s)
|
||||
ListMultipartUploads_success(s)
|
||||
}
|
||||
|
||||
func TestAbortMultipartUpload(s *S3Conf) {
|
||||
AbortMultipartUpload_non_existing_bucket(s)
|
||||
AbortMultipartUpload_incorrect_uploadId(s)
|
||||
AbortMultipartUpload_incorrect_object_key(s)
|
||||
AbortMultipartUpload_success(s)
|
||||
}
|
||||
|
||||
func TestCompleteMultipartUpload(s *S3Conf) {
|
||||
CompletedMultipartUpload_non_existing_bucket(s)
|
||||
CompleteMultipartUpload_invalid_part_number(s)
|
||||
CompleteMultipartUpload_invalid_ETag(s)
|
||||
CompleteMultipartUpload_success(s)
|
||||
}
|
||||
|
||||
func TestPutBucketAcl(s *S3Conf) {
|
||||
PutBucketAcl_non_existing_bucket(s)
|
||||
PutBucketAcl_invalid_acl_canned_and_acp(s)
|
||||
PutBucketAcl_invalid_acl_canned_and_grants(s)
|
||||
PutBucketAcl_invalid_acl_acp_and_grants(s)
|
||||
PutBucketAcl_invalid_owner(s)
|
||||
PutBucketAcl_success_access_denied(s)
|
||||
PutBucketAcl_success_grants(s)
|
||||
PutBucketAcl_success_canned_acl(s)
|
||||
PutBucketAcl_success_acp(s)
|
||||
}
|
||||
|
||||
func TestGetBucketAcl(s *S3Conf) {
|
||||
GetBucketAcl_non_existing_bucket(s)
|
||||
GetBucketAcl_access_denied(s)
|
||||
GetBucketAcl_success(s)
|
||||
}
|
||||
|
||||
func TestFullFlow(s *S3Conf) {
|
||||
TestAuthentication(s)
|
||||
TestCreateBucket(s)
|
||||
TestHeadBucket(s)
|
||||
TestListBuckets(s)
|
||||
TestDeleteBucket(s)
|
||||
TestPutObject(s)
|
||||
TestHeadObject(s)
|
||||
TestGetObject(s)
|
||||
TestListObjects(s)
|
||||
TestDeleteObject(s)
|
||||
TestDeleteObjects(s)
|
||||
TestCopyObject(s)
|
||||
TestPutObjectTagging(s)
|
||||
TestDeleteObjectTagging(s)
|
||||
TestCreateMultipartUpload(s)
|
||||
TestUploadPart(s)
|
||||
TestUploadPartCopy(s)
|
||||
TestListParts(s)
|
||||
TestListMultipartUploads(s)
|
||||
TestAbortMultipartUpload(s)
|
||||
TestCompleteMultipartUpload(s)
|
||||
TestPutBucketAcl(s)
|
||||
TestGetBucketAcl(s)
|
||||
}
|
||||
@@ -38,6 +38,26 @@ func (r *RReader) Sum() []byte {
|
||||
return r.hash.Sum(nil)
|
||||
}
|
||||
|
||||
type ZReader struct {
|
||||
buf []byte
|
||||
dataleft int
|
||||
}
|
||||
|
||||
func NewZeroReader(totalsize, bufsize int) *ZReader {
|
||||
b := make([]byte, bufsize)
|
||||
return &ZReader{buf: b, dataleft: totalsize}
|
||||
}
|
||||
|
||||
func (r *ZReader) Read(p []byte) (int, error) {
|
||||
n := min(len(p), len(r.buf), r.dataleft)
|
||||
r.dataleft -= n
|
||||
err := error(nil)
|
||||
if n == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return copy(p, r.buf[:n]), err
|
||||
}
|
||||
|
||||
func min(values ...int) int {
|
||||
if len(values) == 0 {
|
||||
return 0
|
||||
@@ -52,3 +72,13 @@ func min(values ...int) int {
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
type NW struct{}
|
||||
|
||||
func NewNullWriter() NW {
|
||||
return NW{}
|
||||
}
|
||||
|
||||
func (NW) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -10,6 +11,8 @@ import (
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
)
|
||||
|
||||
@@ -26,10 +29,7 @@ type S3Conf struct {
|
||||
}
|
||||
|
||||
func NewS3Conf(opts ...Option) *S3Conf {
|
||||
s := &S3Conf{
|
||||
PartSize: 64 * 1024 * 1024, // 64B default chunksize
|
||||
Concurrency: 1, // 1 default concurrency
|
||||
}
|
||||
s := &S3Conf{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
@@ -123,3 +123,31 @@ func (c *S3Conf) Config() aws.Config {
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (c *S3Conf) UploadData(r io.Reader, bucket, object string) error {
|
||||
uploader := manager.NewUploader(s3.NewFromConfig(c.Config()))
|
||||
uploader.PartSize = c.PartSize
|
||||
uploader.Concurrency = c.Concurrency
|
||||
|
||||
upinfo := &s3.PutObjectInput{
|
||||
Body: r,
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
}
|
||||
|
||||
_, err := uploader.Upload(context.Background(), upinfo)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *S3Conf) DownloadData(w io.WriterAt, bucket, object string) (int64, error) {
|
||||
downloader := manager.NewDownloader(s3.NewFromConfig(c.Config()))
|
||||
downloader.PartSize = c.PartSize
|
||||
downloader.Concurrency = c.Concurrency
|
||||
|
||||
downinfo := &s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &object,
|
||||
}
|
||||
|
||||
return downloader.Download(context.Background(), w, downinfo)
|
||||
}
|
||||
|
||||
4433
integration/tests.go
4433
integration/tests.go
File diff suppressed because it is too large
Load Diff
508
integration/utils.go
Normal file
508
integration/utils.go
Normal file
@@ -0,0 +1,508 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
var (
|
||||
bcktCount = 0
|
||||
succUsrCrt = "The user has been created successfully"
|
||||
failUsrCrt = "failed to create a user: update iam data: account already exists"
|
||||
)
|
||||
|
||||
func getBucketName() string {
|
||||
bcktCount++
|
||||
return fmt.Sprintf("test-bucket-%v", bcktCount)
|
||||
}
|
||||
|
||||
func setup(s *S3Conf, bucket string) error {
|
||||
s3client := s3.NewFromConfig(s.Config())
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
func teardown(s *S3Conf, bucket string) error {
|
||||
s3client := s3.NewFromConfig(s.Config())
|
||||
|
||||
deleteObject := func(bucket, key, versionId *string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
VersionId: versionId,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete object %v: %v", *key, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
in := &s3.ListObjectsV2Input{Bucket: &bucket}
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.ListObjectsV2(ctx, in)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list objects: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range out.Contents {
|
||||
err = deleteObject(&bucket, item.Key, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if out.IsTruncated {
|
||||
in.ContinuationToken = out.ContinuationToken
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := s3client.DeleteBucket(ctx, &s3.DeleteBucketInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
func actionHandler(s *S3Conf, testName string, handler func(s3client *s3.Client, bucket string) error) {
|
||||
runF(testName)
|
||||
bucketName := getBucketName()
|
||||
err := setup(s, bucketName)
|
||||
if err != nil {
|
||||
failF("%v: failed to create a bucket: %v", testName, err.Error())
|
||||
return
|
||||
}
|
||||
client := s3.NewFromConfig(s.Config())
|
||||
handlerErr := handler(client, bucketName)
|
||||
if handlerErr != nil {
|
||||
failF("%v: %v", testName, handlerErr.Error())
|
||||
}
|
||||
|
||||
err = teardown(s, bucketName)
|
||||
if err != nil {
|
||||
if handlerErr == nil {
|
||||
failF("%v: failed to delete the bucket: %v", testName, err.Error())
|
||||
} else {
|
||||
fmt.Printf(colorRed+"%v: failed to delete the bucket: %v", testName, err.Error())
|
||||
}
|
||||
}
|
||||
if handlerErr == nil {
|
||||
passF(testName)
|
||||
}
|
||||
}
|
||||
|
||||
type authConfig struct {
|
||||
testName string
|
||||
path string
|
||||
method string
|
||||
body []byte
|
||||
service string
|
||||
date time.Time
|
||||
}
|
||||
|
||||
func authHandler(s *S3Conf, cfg *authConfig, handler func(req *http.Request) error) {
|
||||
runF(cfg.testName)
|
||||
req, err := http.NewRequest(cfg.method, fmt.Sprintf("%v/%v", s.endpoint, cfg.path), bytes.NewReader(cfg.body))
|
||||
if err != nil {
|
||||
failF("%v: failed to send the request: %v", cfg.testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: s.awsID, SecretAccessKey: s.awsSecret}, req, hexPayload, cfg.service, s.awsRegion, cfg.date)
|
||||
if signErr != nil {
|
||||
failF("%v: failed to sign the request: %v", cfg.testName, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = handler(req)
|
||||
if err != nil {
|
||||
failF("%v: %v", cfg.testName, err.Error())
|
||||
return
|
||||
}
|
||||
passF(cfg.testName)
|
||||
}
|
||||
|
||||
func checkAuthErr(resp *http.Response, apiErr s3err.APIError) error {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errResp s3err.APIErrorResponse
|
||||
err = xml.Unmarshal(body, &errResp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != apiErr.HTTPStatusCode {
|
||||
return fmt.Errorf("expected response status code to be %v, instead got %v", apiErr.HTTPStatusCode, resp.StatusCode)
|
||||
}
|
||||
if errResp.Code != apiErr.Code {
|
||||
return fmt.Errorf("expected error code to be %v, instead got %v", apiErr.Code, errResp.Code)
|
||||
}
|
||||
if errResp.Message != apiErr.Description {
|
||||
return fmt.Errorf("expected error message to be %v, instead got %v", apiErr.Description, errResp.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkApiErr(err error, apiErr s3err.APIError) error {
|
||||
if err == nil {
|
||||
return fmt.Errorf("expected %v, instead got nil", apiErr.Code)
|
||||
}
|
||||
var ae smithy.APIError
|
||||
if errors.As(err, &ae) {
|
||||
if ae.ErrorCode() == apiErr.Code && ae.ErrorMessage() == apiErr.Description {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("expected %v, instead got %v", apiErr.Code, ae.ErrorCode())
|
||||
} else {
|
||||
return fmt.Errorf("expected aws api error, instead got: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func checkSdkApiErr(err error, code string) error {
|
||||
var ae smithy.APIError
|
||||
if errors.As(err, &ae) {
|
||||
if ae.ErrorCode() != code {
|
||||
return fmt.Errorf("expected %v, instead got %v", ae.ErrorCode(), code)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func putObjects(client *s3.Client, objs []string, bucket string) error {
|
||||
for _, key := range objs {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err := client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Key: &key,
|
||||
Bucket: &bucket,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func putObjectWithData(lgth int64, input *s3.PutObjectInput, client *s3.Client) (csum [32]byte, data []byte, err error) {
|
||||
data = make([]byte, lgth)
|
||||
rand.Read(data)
|
||||
csum = sha256.Sum256(data)
|
||||
r := bytes.NewReader(data)
|
||||
input.Body = r
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
_, err = client.PutObject(ctx, input)
|
||||
cancel()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func CreateMp(s3client *s3.Client, bucket, key string) (*s3.CreateMultipartUploadOutput, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := s3client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
})
|
||||
cancel()
|
||||
return out, err
|
||||
}
|
||||
|
||||
func isEqual(a, b []byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, d := range a {
|
||||
if d != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareMultipartUploads(list1, list2 []types.MultipartUpload) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
for i, item := range list1 {
|
||||
if *item.Key != *list2[i].Key || *item.UploadId != *list2[i].UploadId {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareParts(parts1, parts2 []types.Part) bool {
|
||||
if len(parts1) != len(parts2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, prt := range parts1 {
|
||||
if prt.PartNumber != parts2[i].PartNumber {
|
||||
return false
|
||||
}
|
||||
if *prt.ETag != *parts2[i].ETag {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func areTagsSame(tags1, tags2 []types.Tag) bool {
|
||||
if len(tags1) != len(tags2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, tag := range tags1 {
|
||||
if !containsTag(tag, tags2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func containsTag(tag types.Tag, list []types.Tag) bool {
|
||||
for _, item := range list {
|
||||
if *item.Key == *tag.Key && *item.Value == *tag.Value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func compareGrants(grts1, grts2 []types.Grant) bool {
|
||||
if len(grts1) != len(grts2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, grt := range grts1 {
|
||||
if grt.Permission != grts2[i].Permission {
|
||||
return false
|
||||
}
|
||||
if *grt.Grantee.ID != *grts2[i].Grantee.ID {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func execCommand(args ...string) ([]byte, error) {
|
||||
cmd := exec.Command("./versitygw", args...)
|
||||
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
func getString(str *string) string {
|
||||
if str == nil {
|
||||
return ""
|
||||
}
|
||||
return *str
|
||||
}
|
||||
|
||||
func getPtr(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
func areMapsSame(mp1, mp2 map[string]string) bool {
|
||||
if len(mp1) != len(mp2) {
|
||||
return false
|
||||
}
|
||||
for key, val := range mp1 {
|
||||
if mp2[key] != val {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func compareBuckets(list1 []types.Bucket, list2 []s3response.ListAllMyBucketsEntry) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
|
||||
elementMap := make(map[string]bool)
|
||||
|
||||
for _, elem := range list1 {
|
||||
elementMap[*elem.Name] = true
|
||||
}
|
||||
|
||||
for _, elem := range list2 {
|
||||
if _, found := elementMap[elem.Name]; !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareObjects(list1 []string, list2 []types.Object) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
|
||||
elementMap := make(map[string]bool)
|
||||
|
||||
for _, elem := range list1 {
|
||||
elementMap[elem] = true
|
||||
}
|
||||
|
||||
for _, elem := range list2 {
|
||||
if _, found := elementMap[*elem.Key]; !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareDelObjects(list1 []string, list2 []types.DeletedObject) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
|
||||
elementMap := make(map[string]bool)
|
||||
|
||||
for _, elem := range list1 {
|
||||
elementMap[elem] = true
|
||||
}
|
||||
|
||||
for _, elem := range list2 {
|
||||
if _, found := elementMap[*elem.Key]; !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func uploadParts(client *s3.Client, size, partCount int, bucket, key, uploadId string) (parts []types.Part, err error) {
|
||||
dr := NewDataReader(size, size)
|
||||
datafile := "rand.data"
|
||||
w, err := os.Create(datafile)
|
||||
if err != nil {
|
||||
return parts, err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
_, err = io.Copy(w, dr)
|
||||
if err != nil {
|
||||
return parts, err
|
||||
}
|
||||
|
||||
fileInfo, err := w.Stat()
|
||||
if err != nil {
|
||||
return parts, err
|
||||
}
|
||||
|
||||
partSize := fileInfo.Size() / int64(partCount)
|
||||
var offset int64
|
||||
|
||||
for partNumber := int64(1); partNumber <= int64(partCount); partNumber++ {
|
||||
partStart := (partNumber - 1) * partSize
|
||||
partEnd := partStart + partSize - 1
|
||||
if partEnd > fileInfo.Size()-1 {
|
||||
partEnd = fileInfo.Size() - 1
|
||||
}
|
||||
partBuffer := make([]byte, partEnd-partStart+1)
|
||||
_, err := w.ReadAt(partBuffer, partStart)
|
||||
if err != nil {
|
||||
return parts, err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shortTimeout)
|
||||
out, err := client.UploadPart(ctx, &s3.UploadPartInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
UploadId: &uploadId,
|
||||
Body: bytes.NewReader(partBuffer),
|
||||
PartNumber: int32(partNumber),
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return parts, err
|
||||
} else {
|
||||
parts = append(parts, types.Part{ETag: out.ETag, PartNumber: int32(partNumber)})
|
||||
offset += partSize
|
||||
}
|
||||
}
|
||||
|
||||
return parts, err
|
||||
}
|
||||
|
||||
type user struct {
|
||||
access string
|
||||
secret string
|
||||
role string
|
||||
}
|
||||
|
||||
func createUsers(s *S3Conf, users []user) error {
|
||||
for _, usr := range users {
|
||||
out, err := execCommand("admin", "-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "create-user", "-a", usr.access, "-s", usr.secret, "-r", usr.role)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(string(out), succUsrCrt) && !strings.Contains(string(out), failUsrCrt) {
|
||||
return fmt.Errorf("failed to create a user account")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func changeBucketsOwner(s *S3Conf, buckets []string, owner string) error {
|
||||
for _, bucket := range buckets {
|
||||
out, err := execCommand("admin", "-a", s.awsID, "-s", s.awsSecret, "-er", s.endpoint, "change-bucket-owner", "-b", bucket, "-o", owner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(string(out), "Bucket owner has been updated successfully") {
|
||||
return fmt.Errorf(string(out))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
37
runtests.sh
Executable file
37
runtests.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# make temp dirs
|
||||
mkdir /tmp/gw
|
||||
rm -rf /tmp/covdata
|
||||
mkdir /tmp/covdata
|
||||
|
||||
# run server in background
|
||||
GOCOVERDIR=/tmp/covdata ./versitygw -a user -s pass posix /tmp/gw &
|
||||
GW_PID=$!
|
||||
|
||||
# wait a second for server to start up
|
||||
sleep 1
|
||||
|
||||
# check if server is still running
|
||||
if ! kill -0 $GW_PID; then
|
||||
echo "server no longer running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# run tests
|
||||
if ! ./versitygw test -a user -s pass -e http://127.0.0.1:7070 full-flow; then
|
||||
echo "tests failed"
|
||||
kill $GW_PID
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# kill off server
|
||||
kill $GW_PID
|
||||
exit 0
|
||||
|
||||
# if the above binary was built with -cover enabled (make testbin),
|
||||
# then the following can be used for code coverage reports:
|
||||
# go tool covdata percent -i=/tmp/covdata
|
||||
# go tool covdata textfmt -i=/tmp/covdata -o profile.txt
|
||||
# go tool cover -html=profile.txt
|
||||
|
||||
@@ -19,43 +19,87 @@ import (
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
)
|
||||
|
||||
type AdminController struct {
|
||||
IAMService auth.IAMService
|
||||
iam auth.IAMService
|
||||
be backend.Backend
|
||||
}
|
||||
|
||||
func NewAdminController(iam auth.IAMService, be backend.Backend) AdminController {
|
||||
return AdminController{iam: iam, be: be}
|
||||
}
|
||||
|
||||
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
|
||||
access, secret, role := ctx.Query("access"), ctx.Query("secret"), ctx.Query("role")
|
||||
requesterRole := ctx.Locals("role")
|
||||
requesterRole := ctx.Locals("role").(string)
|
||||
|
||||
if requesterRole != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
if role != "user" && role != "admin" {
|
||||
return fmt.Errorf("invalid parameters: user role have to be one of the following: 'user', 'admin'")
|
||||
}
|
||||
|
||||
user := auth.Account{Secret: secret, Role: role}
|
||||
|
||||
err := c.IAMService.CreateAccount(access, user)
|
||||
err := c.iam.CreateAccount(access, user)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create a user: %w", err)
|
||||
}
|
||||
|
||||
ctx.SendString("The user has been created successfully")
|
||||
return nil
|
||||
return ctx.SendString("The user has been created successfully")
|
||||
}
|
||||
|
||||
func (c AdminController) DeleteUser(ctx *fiber.Ctx) error {
|
||||
access := ctx.Query("access")
|
||||
requesterRole := ctx.Locals("role")
|
||||
requesterRole := ctx.Locals("role").(string)
|
||||
if requesterRole != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
|
||||
err := c.IAMService.DeleteUserAccount(access)
|
||||
err := c.iam.DeleteUserAccount(access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.SendString("The user has been created successfully")
|
||||
return nil
|
||||
return ctx.SendString("The user has been deleted successfully")
|
||||
}
|
||||
|
||||
func (c AdminController) ListUsers(ctx *fiber.Ctx) error {
|
||||
role := ctx.Locals("role").(string)
|
||||
if role != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
accs, err := c.iam.ListUserAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.JSON(accs)
|
||||
}
|
||||
|
||||
func (c AdminController) ChangeBucketOwner(ctx *fiber.Ctx) error {
|
||||
role := ctx.Locals("role").(string)
|
||||
if role != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
owner := ctx.Query("owner")
|
||||
bucket := ctx.Query("bucket")
|
||||
|
||||
accs, err := auth.CheckIfAccountsExist([]string{owner}, c.iam)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(accs) > 0 {
|
||||
return fmt.Errorf("user specified as the new bucket owner does not exist")
|
||||
}
|
||||
|
||||
err = c.be.ChangeBucketOwner(ctx.Context(), bucket, owner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.Status(201).SendString("Bucket owner has been updated successfully")
|
||||
}
|
||||
|
||||
397
s3api/controllers/admin_test.go
Normal file
397
s3api/controllers/admin_test.go
Normal file
@@ -0,0 +1,397 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
)
|
||||
|
||||
func TestAdminController_CreateUser(t *testing.T) {
|
||||
type args struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
adminController := AdminController{
|
||||
iam: &IAMServiceMock{
|
||||
CreateAccountFunc: func(access string, account auth.Account) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
app.Patch("/create-user", adminController.CreateUser)
|
||||
|
||||
appErr := fiber.New()
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appErr.Patch("/create-user", adminController.CreateUser)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
args args
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Admin-create-user-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=user", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Admin-create-user-invalid-user-role",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=invalid", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Admin-create-user-invalid-requester-role",
|
||||
app: appErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/create-user?access=test&secret=test&role=admin", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AdminController.CreateUser() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("AdminController.CreateUser() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminController_DeleteUser(t *testing.T) {
|
||||
type args struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
adminController := AdminController{
|
||||
iam: &IAMServiceMock{
|
||||
DeleteUserAccountFunc: func(access string) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
app.Patch("/delete-user", adminController.DeleteUser)
|
||||
|
||||
appErr := fiber.New()
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appErr.Patch("/delete-user", adminController.DeleteUser)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
args args
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Admin-delete-user-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/delete-user?access=test", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Admin-delete-user-invalid-requester-role",
|
||||
app: appErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/delete-user?access=test", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AdminController.DeleteUser() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("AdminController.DeleteUser() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminController_ListUsers(t *testing.T) {
|
||||
type args struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
adminController := AdminController{
|
||||
iam: &IAMServiceMock{
|
||||
ListUserAccountsFunc: func() ([]auth.Account, error) {
|
||||
return []auth.Account{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
adminControllerErr := AdminController{
|
||||
iam: &IAMServiceMock{
|
||||
ListUserAccountsFunc: func() ([]auth.Account, error) {
|
||||
return []auth.Account{}, fmt.Errorf("server error")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
appErr := fiber.New()
|
||||
|
||||
appErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appErr.Patch("/list-users", adminControllerErr.ListUsers)
|
||||
|
||||
appRoleErr := fiber.New()
|
||||
|
||||
appRoleErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appRoleErr.Patch("/list-users", adminController.ListUsers)
|
||||
|
||||
appSucc := fiber.New()
|
||||
|
||||
appSucc.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appSucc.Patch("/list-users", adminController.ListUsers)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
args args
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Admin-list-users-access-denied",
|
||||
app: appRoleErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/list-users", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Admin-list-users-iam-error",
|
||||
app: appErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/list-users", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Admin-list-users-success",
|
||||
app: appSucc,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/list-users", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AdminController.ListUsers() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("AdminController.ListUsers() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminController_ChangeBucketOwner(t *testing.T) {
|
||||
type args struct {
|
||||
req *http.Request
|
||||
}
|
||||
adminController := AdminController{
|
||||
be: &BackendMock{
|
||||
ChangeBucketOwnerFunc: func(contextMoqParam context.Context, bucket, newOwner string) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
iam: &IAMServiceMock{
|
||||
GetUserAccountFunc: func(access string) (auth.Account, error) {
|
||||
return auth.Account{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
adminControllerIamErr := AdminController{
|
||||
iam: &IAMServiceMock{
|
||||
GetUserAccountFunc: func(access string) (auth.Account, error) {
|
||||
return auth.Account{}, fmt.Errorf("unknown server error")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
adminControllerIamAccDoesNotExist := AdminController{
|
||||
iam: &IAMServiceMock{
|
||||
GetUserAccountFunc: func(access string) (auth.Account, error) {
|
||||
return auth.Account{}, auth.ErrNoSuchUser
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
app.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
|
||||
|
||||
appRoleErr := fiber.New()
|
||||
|
||||
appRoleErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "user")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appRoleErr.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
|
||||
|
||||
appIamErr := fiber.New()
|
||||
|
||||
appIamErr.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appIamErr.Patch("/change-bucket-owner", adminControllerIamErr.ChangeBucketOwner)
|
||||
|
||||
appIamNoSuchUser := fiber.New()
|
||||
|
||||
appIamNoSuchUser.Use(func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("role", "admin")
|
||||
return ctx.Next()
|
||||
})
|
||||
|
||||
appIamNoSuchUser.Patch("/change-bucket-owner", adminControllerIamAccDoesNotExist.ChangeBucketOwner)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *fiber.App
|
||||
args args
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Change-bucket-owner-access-denied",
|
||||
app: appRoleErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/change-bucket-owner", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Change-bucket-owner-check-account-server-error",
|
||||
app: appIamErr,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/change-bucket-owner", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Change-bucket-owner-acc-does-not-exist",
|
||||
app: appIamNoSuchUser,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/change-bucket-owner", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Change-bucket-owner-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPatch, "/change-bucket-owner?bucket=bucket&owner=owner", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 201,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AdminController.ChangeBucketOwner() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("AdminController.ChangeBucketOwner() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
206
s3api/controllers/iam_moq_test.go
Normal file
206
s3api/controllers/iam_moq_test.go
Normal file
@@ -0,0 +1,206 @@
|
||||
// Code generated by moq; DO NOT EDIT.
|
||||
// github.com/matryer/moq
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ensure, that IAMServiceMock does implement auth.IAMService.
|
||||
// If this is not the case, regenerate this file with moq.
|
||||
var _ auth.IAMService = &IAMServiceMock{}
|
||||
|
||||
// IAMServiceMock is a mock implementation of auth.IAMService.
|
||||
//
|
||||
// func TestSomethingThatUsesIAMService(t *testing.T) {
|
||||
//
|
||||
// // make and configure a mocked auth.IAMService
|
||||
// mockedIAMService := &IAMServiceMock{
|
||||
// CreateAccountFunc: func(access string, account auth.Account) error {
|
||||
// panic("mock out the CreateAccount method")
|
||||
// },
|
||||
// DeleteUserAccountFunc: func(access string) error {
|
||||
// panic("mock out the DeleteUserAccount method")
|
||||
// },
|
||||
// GetUserAccountFunc: func(access string) (auth.Account, error) {
|
||||
// panic("mock out the GetUserAccount method")
|
||||
// },
|
||||
// ListUserAccountsFunc: func() ([]auth.Account, error) {
|
||||
// panic("mock out the ListUserAccounts method")
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// // use mockedIAMService in code that requires auth.IAMService
|
||||
// // and then make assertions.
|
||||
//
|
||||
// }
|
||||
type IAMServiceMock struct {
|
||||
// CreateAccountFunc mocks the CreateAccount method.
|
||||
CreateAccountFunc func(access string, account auth.Account) error
|
||||
|
||||
// DeleteUserAccountFunc mocks the DeleteUserAccount method.
|
||||
DeleteUserAccountFunc func(access string) error
|
||||
|
||||
// GetUserAccountFunc mocks the GetUserAccount method.
|
||||
GetUserAccountFunc func(access string) (auth.Account, error)
|
||||
|
||||
// ListUserAccountsFunc mocks the ListUserAccounts method.
|
||||
ListUserAccountsFunc func() ([]auth.Account, error)
|
||||
|
||||
// calls tracks calls to the methods.
|
||||
calls struct {
|
||||
// CreateAccount holds details about calls to the CreateAccount method.
|
||||
CreateAccount []struct {
|
||||
// Access is the access argument value.
|
||||
Access string
|
||||
// Account is the account argument value.
|
||||
Account auth.Account
|
||||
}
|
||||
// DeleteUserAccount holds details about calls to the DeleteUserAccount method.
|
||||
DeleteUserAccount []struct {
|
||||
// Access is the access argument value.
|
||||
Access string
|
||||
}
|
||||
// GetUserAccount holds details about calls to the GetUserAccount method.
|
||||
GetUserAccount []struct {
|
||||
// Access is the access argument value.
|
||||
Access string
|
||||
}
|
||||
// ListUserAccounts holds details about calls to the ListUserAccounts method.
|
||||
ListUserAccounts []struct {
|
||||
}
|
||||
}
|
||||
lockCreateAccount sync.RWMutex
|
||||
lockDeleteUserAccount sync.RWMutex
|
||||
lockGetUserAccount sync.RWMutex
|
||||
lockListUserAccounts sync.RWMutex
|
||||
}
|
||||
|
||||
// CreateAccount calls CreateAccountFunc.
|
||||
func (mock *IAMServiceMock) CreateAccount(access string, account auth.Account) error {
|
||||
if mock.CreateAccountFunc == nil {
|
||||
panic("IAMServiceMock.CreateAccountFunc: method is nil but IAMService.CreateAccount was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Access string
|
||||
Account auth.Account
|
||||
}{
|
||||
Access: access,
|
||||
Account: account,
|
||||
}
|
||||
mock.lockCreateAccount.Lock()
|
||||
mock.calls.CreateAccount = append(mock.calls.CreateAccount, callInfo)
|
||||
mock.lockCreateAccount.Unlock()
|
||||
return mock.CreateAccountFunc(access, account)
|
||||
}
|
||||
|
||||
// CreateAccountCalls gets all the calls that were made to CreateAccount.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedIAMService.CreateAccountCalls())
|
||||
func (mock *IAMServiceMock) CreateAccountCalls() []struct {
|
||||
Access string
|
||||
Account auth.Account
|
||||
} {
|
||||
var calls []struct {
|
||||
Access string
|
||||
Account auth.Account
|
||||
}
|
||||
mock.lockCreateAccount.RLock()
|
||||
calls = mock.calls.CreateAccount
|
||||
mock.lockCreateAccount.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// DeleteUserAccount calls DeleteUserAccountFunc.
|
||||
func (mock *IAMServiceMock) DeleteUserAccount(access string) error {
|
||||
if mock.DeleteUserAccountFunc == nil {
|
||||
panic("IAMServiceMock.DeleteUserAccountFunc: method is nil but IAMService.DeleteUserAccount was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Access string
|
||||
}{
|
||||
Access: access,
|
||||
}
|
||||
mock.lockDeleteUserAccount.Lock()
|
||||
mock.calls.DeleteUserAccount = append(mock.calls.DeleteUserAccount, callInfo)
|
||||
mock.lockDeleteUserAccount.Unlock()
|
||||
return mock.DeleteUserAccountFunc(access)
|
||||
}
|
||||
|
||||
// DeleteUserAccountCalls gets all the calls that were made to DeleteUserAccount.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedIAMService.DeleteUserAccountCalls())
|
||||
func (mock *IAMServiceMock) DeleteUserAccountCalls() []struct {
|
||||
Access string
|
||||
} {
|
||||
var calls []struct {
|
||||
Access string
|
||||
}
|
||||
mock.lockDeleteUserAccount.RLock()
|
||||
calls = mock.calls.DeleteUserAccount
|
||||
mock.lockDeleteUserAccount.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetUserAccount calls GetUserAccountFunc.
|
||||
func (mock *IAMServiceMock) GetUserAccount(access string) (auth.Account, error) {
|
||||
if mock.GetUserAccountFunc == nil {
|
||||
panic("IAMServiceMock.GetUserAccountFunc: method is nil but IAMService.GetUserAccount was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Access string
|
||||
}{
|
||||
Access: access,
|
||||
}
|
||||
mock.lockGetUserAccount.Lock()
|
||||
mock.calls.GetUserAccount = append(mock.calls.GetUserAccount, callInfo)
|
||||
mock.lockGetUserAccount.Unlock()
|
||||
return mock.GetUserAccountFunc(access)
|
||||
}
|
||||
|
||||
// GetUserAccountCalls gets all the calls that were made to GetUserAccount.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedIAMService.GetUserAccountCalls())
|
||||
func (mock *IAMServiceMock) GetUserAccountCalls() []struct {
|
||||
Access string
|
||||
} {
|
||||
var calls []struct {
|
||||
Access string
|
||||
}
|
||||
mock.lockGetUserAccount.RLock()
|
||||
calls = mock.calls.GetUserAccount
|
||||
mock.lockGetUserAccount.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// ListUserAccounts calls ListUserAccountsFunc.
|
||||
func (mock *IAMServiceMock) ListUserAccounts() ([]auth.Account, error) {
|
||||
if mock.ListUserAccountsFunc == nil {
|
||||
panic("IAMServiceMock.ListUserAccountsFunc: method is nil but IAMService.ListUserAccounts was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
}{}
|
||||
mock.lockListUserAccounts.Lock()
|
||||
mock.calls.ListUserAccounts = append(mock.calls.ListUserAccounts, callInfo)
|
||||
mock.lockListUserAccounts.Unlock()
|
||||
return mock.ListUserAccountsFunc()
|
||||
}
|
||||
|
||||
// ListUserAccountsCalls gets all the calls that were made to ListUserAccounts.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedIAMService.ListUserAccountsCalls())
|
||||
func (mock *IAMServiceMock) ListUserAccountsCalls() []struct {
|
||||
} {
|
||||
var calls []struct {
|
||||
}
|
||||
mock.lockListUserAccounts.RLock()
|
||||
calls = mock.calls.ListUserAccounts
|
||||
mock.lockListUserAccounts.RUnlock()
|
||||
return calls
|
||||
}
|
||||
61
s3api/middlewares/acl-parser.go
Normal file
61
s3api/middlewares/acl-parser.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
func AclParser(be backend.Backend, logger s3log.AuditLogger) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
isRoot, access := ctx.Locals("isRoot").(bool), ctx.Locals("access").(string)
|
||||
path := ctx.Path()
|
||||
pathParts := strings.Split(path, "/")
|
||||
bucket := pathParts[1]
|
||||
if path == "/" && ctx.Method() == http.MethodGet {
|
||||
return ctx.Next()
|
||||
}
|
||||
if ctx.Method() == http.MethodPatch {
|
||||
return ctx.Next()
|
||||
}
|
||||
if len(pathParts) == 2 && pathParts[1] != "" && ctx.Method() == http.MethodPut && !ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
if err := auth.IsAdmin(access, isRoot); err != nil {
|
||||
return controllers.SendXMLResponse(ctx, nil, err, &controllers.MetaOpts{Logger: logger, Action: "CreateBucket"})
|
||||
}
|
||||
return ctx.Next()
|
||||
}
|
||||
//TODO: provide correct action names for the logger, after implementing DetectAction middleware
|
||||
data, err := be.GetBucketAcl(ctx.Context(), &s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
parsedAcl, err := auth.ParseACL(data)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
ctx.Locals("parsedAcl", parsedAcl)
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,8 @@ package middlewares
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -29,10 +31,12 @@ import (
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
YYYYMMDD = "20060102"
|
||||
)
|
||||
|
||||
type RootUserConfig struct {
|
||||
@@ -40,74 +44,114 @@ type RootUserConfig struct {
|
||||
Secret string
|
||||
}
|
||||
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string, debug bool) fiber.Handler {
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, logger s3log.AuditLogger, region string, debug bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
ctx.Locals("region", region)
|
||||
ctx.Locals("startTime", time.Now())
|
||||
authorization := ctx.Get("Authorization")
|
||||
if authorization == "" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
// Check the signature version
|
||||
authParts := strings.Split(authorization, " ")
|
||||
if len(authParts) < 4 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
|
||||
}
|
||||
if authParts[0] != "AWS4-HMAC-SHA256" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported))
|
||||
authParts := strings.Split(authorization, ",")
|
||||
for i, el := range authParts {
|
||||
authParts[i] = strings.TrimSpace(el)
|
||||
}
|
||||
|
||||
credKv := strings.Split(authParts[1], "=")
|
||||
if len(authParts) != 3 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
startParts := strings.Split(authParts[0], " ")
|
||||
|
||||
if startParts[0] != "AWS4-HMAC-SHA256" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
credKv := strings.Split(startParts[1], "=")
|
||||
if len(credKv) != 2 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
// Credential variables validation
|
||||
creds := strings.Split(credKv[1], "/")
|
||||
if len(creds) < 4 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
|
||||
if len(creds) != 5 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if creds[4] != "aws4_request" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureTerminationStr), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if creds[3] != "s3" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureIncorrService), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if creds[2] != region {
|
||||
return controllers.SendResponse(ctx, s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Credential should be scoped to a valid Region, not %v", creds[2]),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
signHdrKv := strings.Split(authParts[2][:len(authParts[2])-1], "=")
|
||||
// Validate the dates difference
|
||||
err := validateDate(creds[1])
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
ctx.Locals("access", creds[0])
|
||||
ctx.Locals("isRoot", creds[0] == root.Access)
|
||||
|
||||
signHdrKv := strings.Split(authParts[1], "=")
|
||||
if len(signHdrKv) != 2 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
signedHdrs := strings.Split(signHdrKv[1], ";")
|
||||
|
||||
account, err := acct.getAccount(creds[0])
|
||||
if err == auth.ErrNoSuchUser {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, err)
|
||||
return controllers.SendResponse(ctx, err, &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
ctx.Locals("role", account.Role)
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
if date[:8] != creds[1] {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
hashPayloadHeader := ctx.Get("X-Amz-Content-Sha256")
|
||||
ok := isSpecialPayload(hashPayloadHeader)
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if hashPayloadHeader != hexPayload {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch))
|
||||
if !ok {
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if hashPayloadHeader != hexPayload {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new http request instance from fasthttp request
|
||||
req, err := utils.CreateHttpRequestFromCtx(ctx, signedHdrs)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
@@ -115,31 +159,28 @@ func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, region string,
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{
|
||||
AccessKeyID: creds[0],
|
||||
SecretAccessKey: account.Secret,
|
||||
}, req, hexPayload, creds[3], region, tdate, func(options *v4.SignerOptions) {
|
||||
}, req, hashPayloadHeader, creds[3], region, tdate, func(options *v4.SignerOptions) {
|
||||
options.DisableURIPathEscaping = true
|
||||
if debug {
|
||||
options.LogSigning = true
|
||||
options.Logger = logging.NewStandardLogger(os.Stderr)
|
||||
}
|
||||
})
|
||||
if signErr != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
parts := strings.Split(req.Header.Get("Authorization"), " ")
|
||||
if len(parts) < 4 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
calculatedSign := strings.Split(parts[3], "=")[1]
|
||||
expectedSign := strings.Split(authParts[3], "=")[1]
|
||||
expectedSign := strings.Split(authParts[2], "=")[1]
|
||||
|
||||
if expectedSign != calculatedSign {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
ctx.Locals("role", account.Role)
|
||||
ctx.Locals("access", creds[0])
|
||||
ctx.Locals("isRoot", creds[0] == root.Access)
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -159,3 +200,41 @@ func (a accounts) getAccount(access string) (auth.Account, error) {
|
||||
|
||||
return a.iam.GetUserAccount(access)
|
||||
}
|
||||
|
||||
func isSpecialPayload(str string) bool {
|
||||
specialValues := map[string]bool{
|
||||
"UNSIGNED-PAYLOAD": true,
|
||||
"STREAMING-UNSIGNED-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD": true,
|
||||
"STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER": true,
|
||||
}
|
||||
|
||||
return specialValues[str]
|
||||
}
|
||||
|
||||
func validateDate(date string) error {
|
||||
credDate, err := time.Parse(YYYYMMDD, date)
|
||||
if err != nil {
|
||||
return s3err.GetAPIError(s3err.ErrSignatureDateDoesNotMatch)
|
||||
}
|
||||
|
||||
today := time.Now()
|
||||
if credDate.Year() > today.Year() || (credDate.Year() == today.Year() && credDate.YearDay() > today.YearDay()) {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature not yet current: %s is still later than %s", credDate.Format(YYYYMMDD), today.Format(YYYYMMDD)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
if credDate.Year() < today.Year() || (credDate.Year() == today.Year() && credDate.YearDay() < today.YearDay()) {
|
||||
return s3err.APIError{
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: fmt.Sprintf("Signature expired: %s is now earlier than %s", credDate.Format(YYYYMMDD), today.Format(YYYYMMDD)),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,9 +21,10 @@ import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
func VerifyMD5Body() fiber.Handler {
|
||||
func VerifyMD5Body(logger s3log.AuditLogger) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
incomingSum := ctx.Get("Content-Md5")
|
||||
if incomingSum == "" {
|
||||
@@ -34,10 +35,9 @@ func VerifyMD5Body() fiber.Handler {
|
||||
calculatedSum := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if incomingSum != calculatedSum {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest))
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
41
s3api/middlewares/url-decoder.go
Normal file
41
s3api/middlewares/url-decoder.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
func DecodeURL(logger s3log.AuditLogger) fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
reqURL := ctx.Request().URI().String()
|
||||
decoded, err := url.Parse(reqURL)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
ctx.Path(decoded.Path)
|
||||
decodedURL, err := url.QueryUnescape(reqURL)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidURI), &controllers.MetaOpts{Logger: logger})
|
||||
}
|
||||
ctx.Request().SetRequestURI(decodedURL)
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
@@ -19,19 +19,28 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3ApiRouter struct{}
|
||||
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService) {
|
||||
s3ApiController := controllers.New(be)
|
||||
adminController := controllers.AdminController{IAMService: iam}
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender) {
|
||||
s3ApiController := controllers.New(be, iam, logger, evs)
|
||||
adminController := controllers.NewAdminController(iam, be)
|
||||
|
||||
// TODO: think of better routing system
|
||||
app.Post("/create-user", adminController.CreateUser)
|
||||
// CreateUser admin api
|
||||
app.Patch("/create-user", adminController.CreateUser)
|
||||
|
||||
// DeleteUsers admin api
|
||||
app.Patch("/delete-user", adminController.DeleteUser)
|
||||
|
||||
// ListUsers admin api
|
||||
app.Patch("/list-users", adminController.ListUsers)
|
||||
|
||||
// ChangeBucketOwner
|
||||
app.Patch("/change-bucket-owner", adminController.ChangeBucketOwner)
|
||||
|
||||
// Admin Delete api
|
||||
app.Delete("/delete-user", adminController.DeleteUser)
|
||||
// ListBuckets action
|
||||
app.Get("/", s3ApiController.ListBuckets)
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestS3ApiRouter_Init(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam)
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam, nil, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"github.com/versity/versitygw/auth"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
"github.com/versity/versitygw/s3event"
|
||||
"github.com/versity/versitygw/s3log"
|
||||
)
|
||||
|
||||
type S3ApiServer struct {
|
||||
@@ -33,7 +35,7 @@ type S3ApiServer struct {
|
||||
debug bool
|
||||
}
|
||||
|
||||
func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, opts ...Option) (*S3ApiServer, error) {
|
||||
func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port, region string, iam auth.IAMService, l s3log.AuditLogger, evs s3event.S3EventSender, opts ...Option) (*S3ApiServer, error) {
|
||||
server := &S3ApiServer{
|
||||
app: app,
|
||||
backend: be,
|
||||
@@ -47,13 +49,15 @@ func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, po
|
||||
|
||||
// Logging middlewares
|
||||
app.Use(logger.New())
|
||||
app.Use(middlewares.DecodeURL(l))
|
||||
app.Use(middlewares.RequestLogger(server.debug))
|
||||
|
||||
// Authentication middlewares
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, region, server.debug))
|
||||
app.Use(middlewares.VerifyMD5Body())
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, l, region, server.debug))
|
||||
app.Use(middlewares.VerifyMD5Body(l))
|
||||
app.Use(middlewares.AclParser(be, l))
|
||||
|
||||
server.router.Init(app, be, iam)
|
||||
server.router.Init(app, be, iam, l, evs)
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -63,7 +64,7 @@ func TestNew(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.root,
|
||||
tt.args.port, "us-east-1", &auth.IAMServiceInternal{})
|
||||
tt.args.port, "us-east-1", &auth.IAMServiceInternal{}, nil, nil)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -82,15 +83,26 @@ func TestS3ApiServer_Serve(t *testing.T) {
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Return error when serving S3 api server with invalid address",
|
||||
name: "Serve-invalid-address",
|
||||
wantErr: true,
|
||||
sa: &S3ApiServer{
|
||||
app: fiber.New(),
|
||||
backend: backend.BackendUnsupported{},
|
||||
port: "Wrong address",
|
||||
port: "Invalid address",
|
||||
router: &S3ApiRouter{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Serve-invalid-address-with-certificate",
|
||||
wantErr: true,
|
||||
sa: &S3ApiServer{
|
||||
app: fiber.New(),
|
||||
backend: backend.BackendUnsupported{},
|
||||
port: "Invalid address",
|
||||
router: &S3ApiRouter{},
|
||||
cert: &tls.Certificate{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -19,10 +19,18 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
var (
|
||||
bucketNameRegexp = regexp.MustCompile(`^[a-z0-9][a-z0-9.-]+[a-z0-9]$`)
|
||||
bucketNameIpRegexp = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`)
|
||||
)
|
||||
|
||||
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
|
||||
@@ -72,6 +80,17 @@ func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
|
||||
}
|
||||
}
|
||||
|
||||
func ParseUint(str string) (int32, error) {
|
||||
if str == "" {
|
||||
return -1, nil
|
||||
}
|
||||
num, err := strconv.ParseUint(str, 10, 16)
|
||||
if err != nil {
|
||||
return -1, s3err.GetAPIError(s3err.ErrInvalidMaxKeys)
|
||||
}
|
||||
return int32(num), nil
|
||||
}
|
||||
|
||||
type CustomHeader struct {
|
||||
Key string
|
||||
Value string
|
||||
@@ -83,6 +102,22 @@ func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
|
||||
}
|
||||
}
|
||||
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
return false
|
||||
}
|
||||
// Checks to contain only digits, lowercase letters, dot, hyphen.
|
||||
// Checks to start and end with only digits and lowercase letters.
|
||||
if !bucketNameRegexp.MatchString(bucket) {
|
||||
return false
|
||||
}
|
||||
// Checks not to be a valid IP address
|
||||
if bucketNameIpRegexp.MatchString(bucket) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func includeHeader(hdr string, signedHdrs []string) bool {
|
||||
for _, shdr := range signedHdrs {
|
||||
if strings.EqualFold(hdr, shdr) {
|
||||
|
||||
@@ -117,3 +117,156 @@ func TestGetUserMetaData(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_includeHeader(t *testing.T) {
|
||||
type args struct {
|
||||
hdr string
|
||||
signedHdrs []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "include-header-falsy-case",
|
||||
args: args{
|
||||
hdr: "Content-Type",
|
||||
signedHdrs: []string{"X-Amz-Acl", "Content-Encoding"},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "include-header-falsy-case",
|
||||
args: args{
|
||||
hdr: "Content-Type",
|
||||
signedHdrs: []string{"X-Amz-Acl", "Content-Type"},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := includeHeader(tt.args.hdr, tt.args.signedHdrs); got != tt.want {
|
||||
t.Errorf("includeHeader() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidBucketName(t *testing.T) {
|
||||
type args struct {
|
||||
bucket string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "IsValidBucketName-short-name",
|
||||
args: args{
|
||||
bucket: "a",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "IsValidBucketName-start-with-hyphen",
|
||||
args: args{
|
||||
bucket: "-bucket",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "IsValidBucketName-start-with-dot",
|
||||
args: args{
|
||||
bucket: ".bucket",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "IsValidBucketName-contain-invalid-character",
|
||||
args: args{
|
||||
bucket: "my@bucket",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "IsValidBucketName-end-with-hyphen",
|
||||
args: args{
|
||||
bucket: "bucket-",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "IsValidBucketName-end-with-dot",
|
||||
args: args{
|
||||
bucket: "bucket.",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "IsValidBucketName-valid-bucket-name",
|
||||
args: args{
|
||||
bucket: "my-bucket",
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := IsValidBucketName(tt.args.bucket); got != tt.want {
|
||||
t.Errorf("IsValidBucketName() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUint(t *testing.T) {
|
||||
type args struct {
|
||||
str string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want int32
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Parse-uint-empty-string",
|
||||
args: args{
|
||||
str: "",
|
||||
},
|
||||
want: -1,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Parse-uint-invalid-number-string",
|
||||
args: args{
|
||||
str: "bla",
|
||||
},
|
||||
want: -1,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Parse-uint-invalid-negative-number",
|
||||
args: args{
|
||||
str: "-5",
|
||||
},
|
||||
want: -1,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParseUint(tt.args.str)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseMaxKeys() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("ParseMaxKeys() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +97,9 @@ const (
|
||||
ErrNegativeExpires
|
||||
ErrMaximumExpires
|
||||
ErrSignatureDoesNotMatch
|
||||
ErrSignatureDateDoesNotMatch
|
||||
ErrSignatureTerminationStr
|
||||
ErrSignatureIncorrService
|
||||
ErrContentSHA256Mismatch
|
||||
ErrInvalidAccessKeyID
|
||||
ErrRequestNotReadyYet
|
||||
@@ -106,7 +109,10 @@ const (
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
ErrInvalidObjectState
|
||||
ErrInvalidRange
|
||||
ErrInvalidURI
|
||||
|
||||
// Non-AWS errors
|
||||
ErrExistingObjectIsDirectory
|
||||
ErrObjectParentIsFile
|
||||
)
|
||||
@@ -187,13 +193,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "We encountered an internal error, please try again.",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
|
||||
ErrInvalidPart: {
|
||||
Code: "InvalidPart",
|
||||
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
ErrInvalidCopyDest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
|
||||
@@ -284,7 +288,6 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Signature header missing Signature field.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
ErrUnsignedHeaders: {
|
||||
Code: "AccessDenied",
|
||||
Description: "There were headers present in the request which were not signed",
|
||||
@@ -320,25 +323,36 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
ErrInvalidAccessKeyID: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "The access key ID you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
ErrRequestNotReadyYet: {
|
||||
Code: "AccessDenied",
|
||||
Description: "Request is not valid yet",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
ErrSignatureDoesNotMatch: {
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
ErrSignatureDateDoesNotMatch: {
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: "Date in Credential scope does not match YYYYMMDD from ISO-8601 version of date from HTTP",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrSignatureTerminationStr: {
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: "Credential should be scoped with a valid terminator: 'aws4_request'",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrSignatureIncorrService: {
|
||||
Code: "SignatureDoesNotMatch",
|
||||
Description: "Credential should be scoped to correct service: s3",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrContentSHA256Mismatch: {
|
||||
Code: "XAmzContentSHA256Mismatch",
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
@@ -374,6 +388,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The operation is not valid for the current state of the object",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidRange: {
|
||||
Code: "InvalidRange",
|
||||
Description: "The requested range is not valid for the request. Try another range.",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrInvalidURI: {
|
||||
Code: "InvalidURI",
|
||||
Description: "The specified URI couldn't be parsed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrExistingObjectIsDirectory: {
|
||||
Code: "ExistingObjectIsDirectory",
|
||||
Description: "Existing Object is a directory.",
|
||||
|
||||
130
s3event/event.go
Normal file
130
s3event/event.go
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3event
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type S3EventSender interface {
|
||||
SendEvent(ctx *fiber.Ctx, meta EventMeta)
|
||||
}
|
||||
|
||||
type EventMeta struct {
|
||||
BucketOwner string
|
||||
EventName EventType
|
||||
ObjectSize int64
|
||||
ObjectETag *string
|
||||
VersionId *string
|
||||
}
|
||||
|
||||
type EventFields struct {
|
||||
Records []EventSchema
|
||||
}
|
||||
|
||||
type EventType string
|
||||
|
||||
const (
|
||||
EventObjectPut EventType = "s3:ObjectCreated:Put"
|
||||
EventObjectCopy EventType = "s3:ObjectCreated:Copy"
|
||||
EventCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
|
||||
EventObjectDelete EventType = "s3:ObjectRemoved:Delete"
|
||||
EventObjectRestoreCompleted EventType = "s3:ObjectRestore:Completed"
|
||||
EventObjectTaggingPut EventType = "s3:ObjectTagging:Put"
|
||||
EventObjectTaggingDelete EventType = "s3:ObjectTagging:Delete"
|
||||
EventObjectAclPut EventType = "s3:ObjectAcl:Put"
|
||||
// Not supported
|
||||
// EventObjectRestorePost EventType = "s3:ObjectRestore:Post"
|
||||
// EventObjectRestoreDelete EventType = "s3:ObjectRestore:Delete"
|
||||
)
|
||||
|
||||
type EventSchema struct {
|
||||
EventVersion string `json:"eventVersion"`
|
||||
EventSource string `json:"eventSource"`
|
||||
AwsRegion string `json:"awsRegion"`
|
||||
EventTime string `json:"eventTime"`
|
||||
EventName EventType `json:"eventName"`
|
||||
UserIdentity EventUserIdentity `json:"userIdentity"`
|
||||
RequestParameters EventRequestParams `json:"requestParameters"`
|
||||
ResponseElements EventResponseElements `json:"responseElements"`
|
||||
S3 EventS3Data `json:"s3"`
|
||||
GlacierEventData EventGlacierData `json:"glacierEventData"`
|
||||
}
|
||||
|
||||
type EventUserIdentity struct {
|
||||
PrincipalId string `json:"PrincipalId"`
|
||||
}
|
||||
|
||||
type EventRequestParams struct {
|
||||
SourceIPAddress string `json:"sourceIPAddress"`
|
||||
}
|
||||
|
||||
type EventResponseElements struct {
|
||||
RequestId string `json:"x-amz-request-id"`
|
||||
HostId string `json:"x-amz-id-2"`
|
||||
}
|
||||
|
||||
type EventS3Data struct {
|
||||
S3SchemaVersion string `json:"s3SchemaVersion"`
|
||||
ConfigurationId string `json:"configurationId"`
|
||||
Bucket EventS3BucketData `json:"bucket"`
|
||||
Object EventObjectData `json:"object"`
|
||||
}
|
||||
|
||||
type EventGlacierData struct {
|
||||
RestoreEventData EventRestoreData `json:"restoreEventData"`
|
||||
}
|
||||
|
||||
type EventRestoreData struct {
|
||||
LifecycleRestorationExpiryTime string `json:"lifecycleRestorationExpiryTime"`
|
||||
LifecycleRestoreStorageClass string `json:"lifecycleRestoreStorageClass"`
|
||||
}
|
||||
|
||||
type EventS3BucketData struct {
|
||||
Name string `json:"name"`
|
||||
OwnerIdentity EventUserIdentity `json:"ownerIdentity"`
|
||||
Arn string `json:"arn"`
|
||||
}
|
||||
|
||||
type EventObjectData struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size"`
|
||||
ETag *string `json:"eTag"`
|
||||
VersionId *string `json:"versionId"`
|
||||
Sequencer string `json:"sequencer"`
|
||||
}
|
||||
|
||||
type EventConfig struct {
|
||||
KafkaURL string
|
||||
KafkaTopic string
|
||||
KafkaTopicKey string
|
||||
NatsURL string
|
||||
NatsTopic string
|
||||
}
|
||||
|
||||
func InitEventSender(cfg *EventConfig) (S3EventSender, error) {
|
||||
if cfg.KafkaURL != "" && cfg.NatsURL != "" {
|
||||
return nil, fmt.Errorf("there should be specified one of the following: kafka, nats")
|
||||
}
|
||||
if cfg.NatsURL != "" {
|
||||
return InitNatsEventService(cfg.NatsURL, cfg.NatsTopic)
|
||||
}
|
||||
if cfg.KafkaURL != "" {
|
||||
return InitKafkaEventService(cfg.KafkaURL, cfg.KafkaTopic, cfg.KafkaTopicKey)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
153
s3event/kafka.go
Normal file
153
s3event/kafka.go
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/segmentio/kafka-go"
|
||||
)
|
||||
|
||||
var sequencer = 0
|
||||
|
||||
type Kafka struct {
|
||||
key string
|
||||
writer *kafka.Writer
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func InitKafkaEventService(url, topic, key string) (S3EventSender, error) {
|
||||
if topic == "" {
|
||||
return nil, fmt.Errorf("kafka message topic should be specified")
|
||||
}
|
||||
|
||||
w := kafka.NewWriter(kafka.WriterConfig{
|
||||
Brokers: []string{url},
|
||||
Topic: topic,
|
||||
Balancer: &kafka.LeastBytes{},
|
||||
BatchTimeout: 5 * time.Millisecond,
|
||||
})
|
||||
|
||||
msg := map[string]string{
|
||||
"Service": "S3",
|
||||
"Event": "s3:TestEvent",
|
||||
"Time": time.Now().Format(time.RFC3339),
|
||||
"Bucket": "Test-Bucket",
|
||||
}
|
||||
|
||||
msgJSON, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
message := kafka.Message{
|
||||
Key: []byte(key),
|
||||
Value: msgJSON,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err = w.WriteMessages(ctx, message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Kafka{
|
||||
key: key,
|
||||
writer: w,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ks *Kafka) SendEvent(ctx *fiber.Ctx, meta EventMeta) {
|
||||
ks.mu.Lock()
|
||||
defer ks.mu.Unlock()
|
||||
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
|
||||
schema := EventSchema{
|
||||
EventVersion: "2.2",
|
||||
EventSource: "aws:s3",
|
||||
AwsRegion: ctx.Locals("region").(string),
|
||||
EventTime: time.Now().Format(time.RFC3339),
|
||||
EventName: meta.EventName,
|
||||
UserIdentity: EventUserIdentity{
|
||||
PrincipalId: ctx.Locals("access").(string),
|
||||
},
|
||||
RequestParameters: EventRequestParams{
|
||||
SourceIPAddress: ctx.IP(),
|
||||
},
|
||||
ResponseElements: EventResponseElements{
|
||||
RequestId: ctx.Get("X-Amz-Request-Id"),
|
||||
HostId: ctx.Get("X-Amx-Id-2"),
|
||||
},
|
||||
S3: EventS3Data{
|
||||
S3SchemaVersion: "1.0",
|
||||
// This field will come up after implementing per bucket notifications
|
||||
ConfigurationId: "kafka-global",
|
||||
Bucket: EventS3BucketData{
|
||||
Name: bucket,
|
||||
OwnerIdentity: EventUserIdentity{
|
||||
PrincipalId: ctx.Locals("access").(string),
|
||||
},
|
||||
Arn: fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/")),
|
||||
},
|
||||
Object: EventObjectData{
|
||||
Key: object,
|
||||
Size: meta.ObjectSize,
|
||||
ETag: meta.ObjectETag,
|
||||
VersionId: meta.VersionId,
|
||||
Sequencer: genSequencer(),
|
||||
},
|
||||
},
|
||||
GlacierEventData: EventGlacierData{
|
||||
// Not supported
|
||||
RestoreEventData: EventRestoreData{},
|
||||
},
|
||||
}
|
||||
|
||||
ks.send([]EventSchema{schema})
|
||||
}
|
||||
|
||||
func (ks *Kafka) send(evnt []EventSchema) {
|
||||
msg, err := json.Marshal(evnt)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse the event data: %v\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
message := kafka.Message{
|
||||
Key: []byte(ks.key),
|
||||
Value: msg,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err = ks.writer.WriteMessages(ctx, message)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to send kafka event: %v\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func genSequencer() string {
|
||||
sequencer = sequencer + 1
|
||||
return fmt.Sprintf("%X", sequencer)
|
||||
}
|
||||
112
s3event/nats.go
Normal file
112
s3event/nats.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3event
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
type NatsEventSender struct {
|
||||
topic string
|
||||
client *nats.Conn
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func InitNatsEventService(url, topic string) (S3EventSender, error) {
|
||||
if topic == "" {
|
||||
return nil, fmt.Errorf("nats message topic should be specified")
|
||||
}
|
||||
|
||||
client, err := nats.Connect(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &NatsEventSender{
|
||||
topic: topic,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ns *NatsEventSender) SendEvent(ctx *fiber.Ctx, meta EventMeta) {
|
||||
ns.mu.Lock()
|
||||
defer ns.mu.Unlock()
|
||||
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
|
||||
schema := EventSchema{
|
||||
EventVersion: "2.2",
|
||||
EventSource: "aws:s3",
|
||||
AwsRegion: ctx.Locals("region").(string),
|
||||
EventTime: time.Now().Format(time.RFC3339),
|
||||
EventName: meta.EventName,
|
||||
UserIdentity: EventUserIdentity{
|
||||
PrincipalId: ctx.Locals("access").(string),
|
||||
},
|
||||
RequestParameters: EventRequestParams{
|
||||
SourceIPAddress: ctx.IP(),
|
||||
},
|
||||
ResponseElements: EventResponseElements{
|
||||
RequestId: ctx.Get("X-Amz-Request-Id"),
|
||||
HostId: ctx.Get("X-Amx-Id-2"),
|
||||
},
|
||||
S3: EventS3Data{
|
||||
S3SchemaVersion: "1.0",
|
||||
// This field will come up after implementing per bucket notifications
|
||||
ConfigurationId: "nats-global",
|
||||
Bucket: EventS3BucketData{
|
||||
Name: bucket,
|
||||
OwnerIdentity: EventUserIdentity{
|
||||
PrincipalId: ctx.Locals("access").(string),
|
||||
},
|
||||
Arn: fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/")),
|
||||
},
|
||||
Object: EventObjectData{
|
||||
Key: object,
|
||||
Size: meta.ObjectSize,
|
||||
ETag: meta.ObjectETag,
|
||||
VersionId: meta.VersionId,
|
||||
Sequencer: genSequencer(),
|
||||
},
|
||||
},
|
||||
GlacierEventData: EventGlacierData{
|
||||
// Not supported
|
||||
RestoreEventData: EventRestoreData{},
|
||||
},
|
||||
}
|
||||
|
||||
ns.send([]EventSchema{schema})
|
||||
}
|
||||
|
||||
func (ns *NatsEventSender) send(evnt []EventSchema) {
|
||||
msg, err := json.Marshal(evnt)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse the event data: %v\n", err.Error())
|
||||
}
|
||||
|
||||
err = ns.client.Publish(ns.topic, msg)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to send nats event: %v\n", err.Error())
|
||||
}
|
||||
}
|
||||
112
s3log/audit-logger.go
Normal file
112
s3log/audit-logger.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3log
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type AuditLogger interface {
|
||||
Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta)
|
||||
HangUp() error
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
type LogMeta struct {
|
||||
BucketOwner string
|
||||
ObjectSize int64
|
||||
Action string
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
LogFile string
|
||||
WebhookURL string
|
||||
}
|
||||
|
||||
type LogFields struct {
|
||||
BucketOwner string
|
||||
Bucket string
|
||||
Time time.Time
|
||||
RemoteIP string
|
||||
Requester string
|
||||
RequestID string
|
||||
Operation string
|
||||
Key string
|
||||
RequestURI string
|
||||
HttpStatus int
|
||||
ErrorCode string
|
||||
BytesSent int
|
||||
ObjectSize int64
|
||||
TotalTime int64
|
||||
TurnAroundTime int64
|
||||
Referer string
|
||||
UserAgent string
|
||||
VersionID string
|
||||
HostID string
|
||||
SignatureVersion string
|
||||
CipherSuite string
|
||||
AuthenticationType string
|
||||
HostHeader string
|
||||
TLSVersion string
|
||||
AccessPointARN string
|
||||
AclRequired string
|
||||
}
|
||||
|
||||
func InitLogger(cfg *LogConfig) (AuditLogger, error) {
|
||||
if cfg.WebhookURL != "" && cfg.LogFile != "" {
|
||||
return nil, fmt.Errorf("there should be specified one of the following: file, webhook")
|
||||
}
|
||||
if cfg.WebhookURL != "" {
|
||||
return InitWebhookLogger(cfg.WebhookURL)
|
||||
}
|
||||
if cfg.LogFile != "" {
|
||||
return InitFileLogger(cfg.LogFile)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func genID() string {
|
||||
src := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
b := make([]byte, 8)
|
||||
|
||||
if _, err := src.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return strings.ToUpper(hex.EncodeToString(b))
|
||||
}
|
||||
|
||||
func getTLSVersionName(version uint16) string {
|
||||
switch version {
|
||||
case tls.VersionTLS10:
|
||||
return "TLSv1.0"
|
||||
case tls.VersionTLS11:
|
||||
return "TLSv1.1"
|
||||
case tls.VersionTLS12:
|
||||
return "TLSv1.2"
|
||||
case tls.VersionTLS13:
|
||||
return "TLSv1.3"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
230
s3log/file.go
Normal file
230
s3log/file.go
Normal file
@@ -0,0 +1,230 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3log
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
logFileMode = 0600
|
||||
timeFormat = "02/January/2006:15:04:05 -0700"
|
||||
)
|
||||
|
||||
// FileLogger is a local file audit log
|
||||
type FileLogger struct {
|
||||
logfile string
|
||||
f *os.File
|
||||
gotErr bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var _ AuditLogger = &FileLogger{}
|
||||
|
||||
// InitFileLogger initializes audit logs to local file
|
||||
func InitFileLogger(logname string) (AuditLogger, error) {
|
||||
f, err := os.OpenFile(logname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open log: %w", err)
|
||||
}
|
||||
|
||||
f.WriteString(fmt.Sprintf("log starts %v\n", time.Now()))
|
||||
|
||||
return &FileLogger{logfile: logname, f: f}, nil
|
||||
}
|
||||
|
||||
// Log sends log message to file logger
|
||||
func (f *FileLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if f.gotErr {
|
||||
return
|
||||
}
|
||||
|
||||
lf := LogFields{}
|
||||
|
||||
access := "-"
|
||||
reqURI := ctx.OriginalURL()
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
errorCode := ""
|
||||
httpStatus := 200
|
||||
startTime := ctx.Locals("startTime").(time.Time)
|
||||
tlsConnState := ctx.Context().TLSConnectionState()
|
||||
if tlsConnState != nil {
|
||||
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
|
||||
lf.TLSVersion = getTLSVersionName(tlsConnState.Version)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
serr, ok := err.(s3err.APIError)
|
||||
if ok {
|
||||
errorCode = serr.Code
|
||||
httpStatus = serr.HTTPStatusCode
|
||||
} else {
|
||||
errorCode = err.Error()
|
||||
httpStatus = 500
|
||||
}
|
||||
}
|
||||
|
||||
switch ctx.Locals("access").(type) {
|
||||
case string:
|
||||
access = ctx.Locals("access").(string)
|
||||
}
|
||||
|
||||
lf.BucketOwner = meta.BucketOwner
|
||||
lf.Bucket = bucket
|
||||
lf.Time = time.Now()
|
||||
lf.RemoteIP = ctx.IP()
|
||||
lf.Requester = access
|
||||
lf.RequestID = genID()
|
||||
lf.Operation = meta.Action
|
||||
lf.Key = object
|
||||
lf.RequestURI = reqURI
|
||||
lf.HttpStatus = httpStatus
|
||||
lf.ErrorCode = errorCode
|
||||
lf.BytesSent = len(body)
|
||||
lf.ObjectSize = meta.ObjectSize
|
||||
lf.TotalTime = time.Since(startTime).Milliseconds()
|
||||
lf.TurnAroundTime = time.Since(startTime).Milliseconds()
|
||||
lf.Referer = ctx.Get("Referer")
|
||||
lf.UserAgent = ctx.Get("User-Agent")
|
||||
lf.VersionID = ctx.Query("versionId")
|
||||
lf.HostID = ctx.Get("X-Amz-Id-2")
|
||||
lf.SignatureVersion = "SigV4"
|
||||
lf.AuthenticationType = "AuthHeader"
|
||||
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
|
||||
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
|
||||
lf.AclRequired = "Yes"
|
||||
|
||||
f.writeLog(lf)
|
||||
}
|
||||
|
||||
func (f *FileLogger) writeLog(lf LogFields) {
|
||||
if lf.BucketOwner == "" {
|
||||
lf.BucketOwner = "-"
|
||||
}
|
||||
if lf.Bucket == "" {
|
||||
lf.Bucket = "-"
|
||||
}
|
||||
if lf.RemoteIP == "" {
|
||||
lf.RemoteIP = "-"
|
||||
}
|
||||
if lf.Requester == "" {
|
||||
lf.Requester = "-"
|
||||
}
|
||||
if lf.Operation == "" {
|
||||
lf.Operation = "-"
|
||||
}
|
||||
if lf.Key == "" {
|
||||
lf.Key = "-"
|
||||
}
|
||||
if lf.RequestURI == "" {
|
||||
lf.RequestURI = "-"
|
||||
}
|
||||
if lf.ErrorCode == "" {
|
||||
lf.ErrorCode = "-"
|
||||
}
|
||||
if lf.Referer == "" {
|
||||
lf.Referer = "-"
|
||||
}
|
||||
if lf.UserAgent == "" {
|
||||
lf.UserAgent = "-"
|
||||
}
|
||||
if lf.VersionID == "" {
|
||||
lf.VersionID = "-"
|
||||
}
|
||||
if lf.HostID == "" {
|
||||
lf.HostID = "-"
|
||||
}
|
||||
if lf.CipherSuite == "" {
|
||||
lf.CipherSuite = "-"
|
||||
}
|
||||
if lf.HostHeader == "" {
|
||||
lf.HostHeader = "-"
|
||||
}
|
||||
if lf.TLSVersion == "" {
|
||||
lf.TLSVersion = "-"
|
||||
}
|
||||
|
||||
log := fmt.Sprintf("%v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v %v\n",
|
||||
lf.BucketOwner,
|
||||
lf.Bucket,
|
||||
fmt.Sprintf("[%v]", lf.Time.Format(timeFormat)),
|
||||
lf.RemoteIP,
|
||||
lf.Requester,
|
||||
lf.RequestID,
|
||||
lf.Operation,
|
||||
lf.Key,
|
||||
lf.RequestURI,
|
||||
lf.HttpStatus,
|
||||
lf.ErrorCode,
|
||||
lf.BytesSent,
|
||||
lf.ObjectSize,
|
||||
lf.TotalTime,
|
||||
lf.TurnAroundTime,
|
||||
lf.Referer,
|
||||
lf.UserAgent,
|
||||
lf.VersionID,
|
||||
lf.HostID,
|
||||
lf.SignatureVersion,
|
||||
lf.CipherSuite,
|
||||
lf.AuthenticationType,
|
||||
lf.HostHeader,
|
||||
lf.TLSVersion,
|
||||
lf.AccessPointARN,
|
||||
lf.AclRequired,
|
||||
)
|
||||
|
||||
_, err := f.f.WriteString(log)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error writing to log file: %v\n", err)
|
||||
// TODO: do we need to terminate on log error?
|
||||
// set err for now so that we don't spew errors
|
||||
f.gotErr = true
|
||||
}
|
||||
}
|
||||
|
||||
// HangUp closes current logfile handle and opens a new one
|
||||
// typically needed for log rotations
|
||||
func (f *FileLogger) HangUp() error {
|
||||
err := f.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close log: %w", err)
|
||||
}
|
||||
|
||||
f.f, err = os.OpenFile(f.logfile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open log: %w", err)
|
||||
}
|
||||
|
||||
f.f.WriteString(fmt.Sprintf("log starts %v\n", time.Now()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown closes logfile handle
|
||||
func (f *FileLogger) Shutdown() error {
|
||||
return f.f.Close()
|
||||
}
|
||||
156
s3log/webhook.go
Normal file
156
s3log/webhook.go
Normal file
@@ -0,0 +1,156 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
// WebhookLogger is a webhook URL audit log
|
||||
type WebhookLogger struct {
|
||||
mu sync.Mutex
|
||||
url string
|
||||
}
|
||||
|
||||
var _ AuditLogger = &WebhookLogger{}
|
||||
|
||||
// InitWebhookLogger initializes audit logs to webhook URL
|
||||
func InitWebhookLogger(url string) (AuditLogger, error) {
|
||||
client := &http.Client{
|
||||
Timeout: 3 * time.Second,
|
||||
}
|
||||
_, err := client.Post(url, "application/json", nil)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && !err.Timeout() {
|
||||
return nil, fmt.Errorf("unreachable webhook url: %w", err)
|
||||
}
|
||||
}
|
||||
return &WebhookLogger{
|
||||
url: url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Log sends log message to webhook
|
||||
func (wl *WebhookLogger) Log(ctx *fiber.Ctx, err error, body []byte, meta LogMeta) {
|
||||
wl.mu.Lock()
|
||||
defer wl.mu.Unlock()
|
||||
|
||||
lf := LogFields{}
|
||||
|
||||
access := "-"
|
||||
reqURI := ctx.OriginalURL()
|
||||
path := strings.Split(ctx.Path(), "/")
|
||||
bucket, object := path[1], strings.Join(path[2:], "/")
|
||||
errorCode := ""
|
||||
httpStatus := 200
|
||||
startTime := ctx.Locals("startTime").(time.Time)
|
||||
tlsConnState := ctx.Context().TLSConnectionState()
|
||||
if tlsConnState != nil {
|
||||
lf.CipherSuite = tls.CipherSuiteName(tlsConnState.CipherSuite)
|
||||
lf.TLSVersion = getTLSVersionName(tlsConnState.Version)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
serr, ok := err.(s3err.APIError)
|
||||
if ok {
|
||||
errorCode = serr.Code
|
||||
httpStatus = serr.HTTPStatusCode
|
||||
} else {
|
||||
errorCode = err.Error()
|
||||
httpStatus = 500
|
||||
}
|
||||
}
|
||||
|
||||
switch ctx.Locals("access").(type) {
|
||||
case string:
|
||||
access = ctx.Locals("access").(string)
|
||||
}
|
||||
|
||||
lf.BucketOwner = meta.BucketOwner
|
||||
lf.Bucket = bucket
|
||||
lf.Time = time.Now()
|
||||
lf.RemoteIP = ctx.IP()
|
||||
lf.Requester = access
|
||||
lf.RequestID = genID()
|
||||
lf.Operation = meta.Action
|
||||
lf.Key = object
|
||||
lf.RequestURI = reqURI
|
||||
lf.HttpStatus = httpStatus
|
||||
lf.ErrorCode = errorCode
|
||||
lf.BytesSent = len(body)
|
||||
lf.ObjectSize = meta.ObjectSize
|
||||
lf.TotalTime = time.Since(startTime).Milliseconds()
|
||||
lf.TurnAroundTime = time.Since(startTime).Milliseconds()
|
||||
lf.Referer = ctx.Get("Referer")
|
||||
lf.UserAgent = ctx.Get("User-Agent")
|
||||
lf.VersionID = ctx.Query("versionId")
|
||||
lf.HostID = ctx.Get("X-Amz-Id-2")
|
||||
lf.SignatureVersion = "SigV4"
|
||||
lf.AuthenticationType = "AuthHeader"
|
||||
lf.HostHeader = fmt.Sprintf("s3.%v.amazonaws.com", ctx.Locals("region").(string))
|
||||
lf.AccessPointARN = fmt.Sprintf("arn:aws:s3:::%v", strings.Join(path, "/"))
|
||||
lf.AclRequired = "Yes"
|
||||
|
||||
wl.sendLog(lf)
|
||||
}
|
||||
|
||||
func (wl *WebhookLogger) sendLog(lf LogFields) {
|
||||
jsonLog, err := json.Marshal(lf)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to parse the log data: %v\n", err.Error())
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, wl.url, bytes.NewReader(jsonLog))
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
go makeRequest(req)
|
||||
}
|
||||
|
||||
func makeRequest(req *http.Request) {
|
||||
client := &http.Client{
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
_, err := client.Do(req)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && !err.Timeout() {
|
||||
fmt.Fprintf(os.Stderr, "error sending webhook log: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HangUp does nothing for webhooks
|
||||
func (wl *WebhookLogger) HangUp() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown does nothing for webhooks
|
||||
func (wl *WebhookLogger) Shutdown() error {
|
||||
return nil
|
||||
}
|
||||
@@ -16,6 +16,8 @@ package s3response
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
// Part describes part metadata.
|
||||
@@ -27,7 +29,7 @@ type Part struct {
|
||||
}
|
||||
|
||||
// ListPartsResponse - s3 api list parts response.
|
||||
type ListPartsResponse struct {
|
||||
type ListPartsResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
|
||||
|
||||
Bucket string
|
||||
@@ -50,7 +52,7 @@ type ListPartsResponse struct {
|
||||
}
|
||||
|
||||
// ListMultipartUploadsResponse - s3 api list multipart uploads response.
|
||||
type ListMultipartUploadsResponse struct {
|
||||
type ListMultipartUploadsResult struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
|
||||
|
||||
Bucket string
|
||||
@@ -94,3 +96,41 @@ type Owner struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
type Tag struct {
|
||||
Key string `xml:"Key"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
type TagSet struct {
|
||||
Tags []Tag `xml:"Tag"`
|
||||
}
|
||||
|
||||
type Tagging struct {
|
||||
TagSet TagSet `xml:"TagSet"`
|
||||
}
|
||||
|
||||
type DeleteObjects struct {
|
||||
Objects []types.ObjectIdentifier `xml:"Object"`
|
||||
}
|
||||
|
||||
type DeleteObjectsResult struct {
|
||||
Deleted []types.DeletedObject
|
||||
Error []types.Error
|
||||
}
|
||||
type SelectObjectContentPayload struct {
|
||||
Expression *string
|
||||
ExpressionType types.ExpressionType
|
||||
RequestProgress *types.RequestProgress
|
||||
InputSerialization *types.InputSerialization
|
||||
OutputSerialization *types.OutputSerialization
|
||||
ScanRange *types.ScanRange
|
||||
}
|
||||
|
||||
type SelectObjectContentResult struct {
|
||||
Records *types.RecordsEvent
|
||||
Stats *types.StatsEvent
|
||||
Progress *types.ProgressEvent
|
||||
Cont *string
|
||||
End *string
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user