mirror of
https://github.com/versity/versitygw.git
synced 2026-01-24 20:12:01 +00:00
Compare commits
103 Commits
api-unit-t
...
v0.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a95d03c498 | ||
|
|
feace16fa9 | ||
|
|
33e1d39138 | ||
|
|
115910eafe | ||
|
|
ef06d11d7c | ||
|
|
2697edd40a | ||
|
|
f88cb9fa7f | ||
|
|
38bb042a32 | ||
|
|
7682defa95 | ||
|
|
12df87577b | ||
|
|
92a763e53a | ||
|
|
c3aaf1538e | ||
|
|
c7625c9b58 | ||
|
|
50357ce61a | ||
|
|
160a99cbbd | ||
|
|
0350215e2e | ||
|
|
de346816fc | ||
|
|
f1ac6b808b | ||
|
|
8ade0c96cf | ||
|
|
f4400edaa0 | ||
|
|
f337aa288d | ||
|
|
cd45036ebf | ||
|
|
002c427e7d | ||
|
|
e75baad56c | ||
|
|
6b16dd76bd | ||
|
|
20b6c1c266 | ||
|
|
1717d45664 | ||
|
|
8f27e88198 | ||
|
|
39e1399664 | ||
|
|
d526569d13 | ||
|
|
69be1dcd1e | ||
|
|
a0f3b0bf2c | ||
|
|
83b494a91f | ||
|
|
bec87757a3 | ||
|
|
3cfee3a032 | ||
|
|
07ddf620a4 | ||
|
|
b6f3ea3350 | ||
|
|
ffd7c20223 | ||
|
|
40f0aa8b05 | ||
|
|
7dc1c7f4c1 | ||
|
|
9c9fb95892 | ||
|
|
c3f181d22c | ||
|
|
c9e72f4080 | ||
|
|
f9a52a5a3c | ||
|
|
5f914a68e6 | ||
|
|
489bb3e899 | ||
|
|
04bbe61826 | ||
|
|
8e86acf20b | ||
|
|
f174308e3f | ||
|
|
ecd28bc2f7 | ||
|
|
510cf6ed57 | ||
|
|
c0cc170f78 | ||
|
|
04ab589aeb | ||
|
|
b8cb3f774d | ||
|
|
981894aef2 | ||
|
|
4d7c12def3 | ||
|
|
a20413c5e4 | ||
|
|
88372f36c8 | ||
|
|
9f66269b2e | ||
|
|
a881893dc2 | ||
|
|
a04689e53d | ||
|
|
effda027af | ||
|
|
130bb4b013 | ||
|
|
93212ccce9 | ||
|
|
5cbcf0c900 | ||
|
|
380b4e476b | ||
|
|
8b79fb24de | ||
|
|
f08da34711 | ||
|
|
74b28283bf | ||
|
|
c9320ea6ce | ||
|
|
83ddf5c82a | ||
|
|
207088fade | ||
|
|
0a35aaf428 | ||
|
|
89d613b268 | ||
|
|
2ca274b850 | ||
|
|
c21c7be439 | ||
|
|
aa00a89e5c | ||
|
|
cc1fb2cffe | ||
|
|
0bab1117d4 | ||
|
|
355e99a7ef | ||
|
|
9469dbc76f | ||
|
|
c16fe6f110 | ||
|
|
3c3516822f | ||
|
|
0121ea6c7f | ||
|
|
296aeb1960 | ||
|
|
7391dccf58 | ||
|
|
56a8638933 | ||
|
|
41db361f86 | ||
|
|
2664ed6e96 | ||
|
|
d2c2cdbabc | ||
|
|
c5de938637 | ||
|
|
70f5e0fac9 | ||
|
|
b41dfd653c | ||
|
|
dcdc62411e | ||
|
|
09d42c92fd | ||
|
|
50b0e454e6 | ||
|
|
e85f764f08 | ||
|
|
264096becf | ||
|
|
01be7a2a6b | ||
|
|
16df0311e9 | ||
|
|
7e4521f1ee | ||
|
|
6d7fffffaf | ||
|
|
e3828fbeb6 |
11
.github/workflows/go.yml
vendored
11
.github/workflows/go.yml
vendored
@@ -23,5 +23,16 @@ jobs:
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Build
|
||||
run: go build -o versitygw cmd/versitygw/*.go
|
||||
|
||||
- name: Test
|
||||
run: go test -v -timeout 30s -tags=github ./...
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
shell: bash
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -7,6 +7,8 @@
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
cmd/versitygw/versitygw
|
||||
/versitygw
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
@@ -22,3 +24,11 @@ go.work
|
||||
|
||||
# ignore IntelliJ directories
|
||||
.idea
|
||||
|
||||
# auto generated VERSION file
|
||||
VERSION
|
||||
|
||||
# build output
|
||||
/versitygw.spec
|
||||
*.tar
|
||||
*.tar.gz
|
||||
|
||||
73
Makefile
Normal file
73
Makefile
Normal file
@@ -0,0 +1,73 @@
|
||||
# Copyright 2023 Versity Software
|
||||
# This file is licensed under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Go parameters
|
||||
GOCMD=go
|
||||
GOBUILD=$(GOCMD) build
|
||||
GOCLEAN=$(GOCMD) clean
|
||||
GOTEST=$(GOCMD) test
|
||||
|
||||
BIN=versitygw
|
||||
|
||||
VERSION := $(shell if test -e VERSION; then cat VERSION; else git describe --abbrev=0 --tags HEAD; fi)
|
||||
BUILD := $(shell git rev-parse --short HEAD || echo release-rpm)
|
||||
TIME := `date -u '+%Y-%m-%d_%I:%M:%S%p'`
|
||||
|
||||
LDFLAGS=-ldflags "-X=main.Build=$(BUILD) -X=main.BuildTime=$(TIME) -X=main.Version=$(VERSION)"
|
||||
|
||||
all: build
|
||||
|
||||
build: $(BIN)
|
||||
|
||||
.PHONY: $(BIN)
|
||||
$(BIN):
|
||||
$(GOBUILD) $(LDFLAGS) -o $(BIN) cmd/$(BIN)/*.go
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
$(GOTEST) ./...
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
# note this requires staticcheck be in your PATH:
|
||||
# export PATH=$PATH:~/go/bin
|
||||
# go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
staticcheck ./...
|
||||
golint ./...
|
||||
gofmt -s -l .
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(GOCLEAN)
|
||||
|
||||
.PHONY: cleanall
|
||||
cleanall: clean
|
||||
rm -f $(BIN)
|
||||
rm -f versitygw-*.tar
|
||||
rm -f versitygw-*.tar.gz
|
||||
rm -f versitygw.spec
|
||||
|
||||
%.spec: %.spec.in
|
||||
sed -e 's/@@VERSION@@/$(VERSION)/g' < $< > $@+
|
||||
mv $@+ $@
|
||||
|
||||
TARFILE = $(BIN)-$(VERSION).tar
|
||||
|
||||
dist: $(BIN).spec
|
||||
echo $(VERSION) >VERSION
|
||||
git archive --format=tar --prefix $(BIN)-$(VERSION)/ HEAD > $(TARFILE)
|
||||
@ tar rf $(TARFILE) --transform="s@\(.*\)@$(BIN)-$(VERSION)/\1@" $(BIN).spec VERSION
|
||||
rm -f VERSION
|
||||
rm -f $(BIN).spec
|
||||
gzip -f $(TARFILE)
|
||||
2
NOTICE
Normal file
2
NOTICE
Normal file
@@ -0,0 +1,2 @@
|
||||
versitygw - Versity S3 Gateway
|
||||
Copyright 2023 Versity Software
|
||||
64
README.md
Normal file
64
README.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
|
||||
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/versity/versitygw/blob/assets/assets/logo-white.svg">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://github.com/versity/versitygw/blob/assets/assets/logo.svg">
|
||||
<a href="https://www.versity.com"><img alt="Versity Software logo image." src="https://github.com/versity/versitygw/blob/assets/assets/logo.svg"></a>
|
||||
</picture>
|
||||
|
||||
[](https://github.com/versity/versitygw/blob/main/LICENSE)
|
||||
|
||||
The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
|
||||
|
||||
Current status: Alpha, in development not yet suited for production use
|
||||
|
||||
See project [documentation](https://github.com/versity/versitygw/wiki) on the wiki.
|
||||
|
||||
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and file-based storage systems. The Versity Gateway bridges the gap between S3-reliant applications and file storage systems, enabling enhanced compatibility and integration with file based systems while offering exceptional scalability.
|
||||
|
||||
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
|
||||
|
||||
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage and Versity’s open source ScoutFS filesystem.
|
||||
|
||||
The gateway is completely stateless. Multiple Versity Gateway instances may be deployed in a cluster to increase aggregate throughput. The Versity Gateway’s stateless architecture allows any request to be serviced by any gateway thereby distributing workloads and enhancing performance. Load balancers may be used to evenly distribute requests across the cluster of gateways for optimal performance.
|
||||
|
||||
The S3 HTTP(S) server and routing is implemented using the [Fiber](https://gofiber.io) web framework. This framework is actively developed with a focus on performance. S3 API compatibility leverages the official [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) whenever possible for maximum service compatibility with AWS S3.
|
||||
|
||||
## Getting Started
|
||||
See the [Quickstart](https://github.com/versity/versitygw/wiki/Quickstart) documentation.
|
||||
|
||||
### Run the gateway with posix backend:
|
||||
|
||||
```
|
||||
mkdir /tmp/vgw
|
||||
ROOT_ACCESS_KEY="testuser" ROOT_SECRET_KEY="secret" ./versitygw --port :10000 posix /tmp/vgw
|
||||
```
|
||||
This will enable an S3 server on the current host listening on port 10000 and hosting the directory `/tmp/vgw`.
|
||||
|
||||
To get the usage output, run the following:
|
||||
|
||||
```
|
||||
./versitygw --help
|
||||
```
|
||||
|
||||
The command format is
|
||||
|
||||
```
|
||||
versitygw [global options] command [command options] [arguments...]
|
||||
```
|
||||
The global options are specified before the backend type and the backend options are specified after.
|
||||
|
||||
***
|
||||
|
||||
#### Versity gives you clarity and control over your archival storage, so you can allocate more resources to your core mission.
|
||||
|
||||
### Contact
|
||||
info@versity.com <br />
|
||||
+1 844 726 8826
|
||||
|
||||
### @versitysoftware
|
||||
[](https://www.linkedin.com/company/versity/)
|
||||
[](https://twitter.com/VersitySoftware)
|
||||
[](https://www.facebook.com/versitysoftware)
|
||||
[](https://www.instagram.com/versitysoftware/)
|
||||
|
||||
137
backend/auth/iam.go
Normal file
137
backend/auth/iam.go
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type Account struct {
|
||||
Secret string `json:"secret"`
|
||||
Role string `json:"role"`
|
||||
Region string `json:"region"`
|
||||
}
|
||||
|
||||
type IAMConfig struct {
|
||||
AccessAccounts map[string]Account `json:"accessAccounts"`
|
||||
}
|
||||
|
||||
type AccountsCache struct {
|
||||
mu sync.Mutex
|
||||
Accounts map[string]Account
|
||||
}
|
||||
|
||||
func (c *AccountsCache) getAccount(access string) *Account {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
acc, ok := c.Accounts[access]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &acc
|
||||
}
|
||||
|
||||
func (c *AccountsCache) updateAccounts() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
var data IAMConfig
|
||||
|
||||
file, err := os.ReadFile("users.json")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading config file: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(file, &data); err != nil {
|
||||
return fmt.Errorf("error parsing the data: %w", err)
|
||||
}
|
||||
|
||||
c.Accounts = data.AccessAccounts
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type IAMService interface {
|
||||
GetIAMConfig() (*IAMConfig, error)
|
||||
CreateAccount(access string, account *Account) error
|
||||
GetUserAccount(access string) *Account
|
||||
}
|
||||
|
||||
type IAMServiceUnsupported struct {
|
||||
accCache *AccountsCache
|
||||
}
|
||||
|
||||
var _ IAMService = &IAMServiceUnsupported{}
|
||||
|
||||
func New() IAMService {
|
||||
return &IAMServiceUnsupported{accCache: &AccountsCache{Accounts: map[string]Account{}}}
|
||||
}
|
||||
|
||||
func (IAMServiceUnsupported) GetIAMConfig() (*IAMConfig, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (s IAMServiceUnsupported) CreateAccount(access string, account *Account) error {
|
||||
var data IAMConfig
|
||||
|
||||
file, err := os.ReadFile("users.json")
|
||||
if err != nil {
|
||||
data = IAMConfig{AccessAccounts: map[string]Account{
|
||||
access: *account,
|
||||
}}
|
||||
} else {
|
||||
if err := json.Unmarshal(file, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, ok := data.AccessAccounts[access]
|
||||
if ok {
|
||||
return fmt.Errorf("user with the given access already exists")
|
||||
}
|
||||
|
||||
data.AccessAccounts[access] = *account
|
||||
}
|
||||
|
||||
updatedJSON, err := json.MarshalIndent(data, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile("users.json", updatedJSON, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s IAMServiceUnsupported) GetUserAccount(access string) *Account {
|
||||
acc := s.accCache.getAccount(access)
|
||||
if acc == nil {
|
||||
err := s.accCache.updateAccounts()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.accCache.getAccount(access)
|
||||
}
|
||||
|
||||
return acc
|
||||
}
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
@@ -6,47 +20,46 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/scoutgw/s3err"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
//go:generate moq -out backend_moq_test.go . Backend
|
||||
//go:generate moq -out ../s3api/controllers/backend_moq_test.go . Backend
|
||||
//go:generate moq -out ../s3api/controllers/backend_moq_test.go -pkg controllers . Backend
|
||||
type Backend interface {
|
||||
fmt.Stringer
|
||||
GetIAMConfig() ([]byte, error)
|
||||
Shutdown()
|
||||
|
||||
ListBuckets() (*s3.ListBucketsOutput, error)
|
||||
HeadBucket(bucket string) (*s3.HeadBucketOutput, error)
|
||||
GetBucketAcl(bucket string) (*s3.GetBucketAclOutput, error)
|
||||
PutBucket(bucket string) error
|
||||
PutBucketAcl(*s3.PutBucketAclInput) error
|
||||
DeleteBucket(bucket string) error
|
||||
|
||||
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error)
|
||||
AbortMultipartUpload(*s3.AbortMultipartUploadInput) error
|
||||
ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error)
|
||||
ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
|
||||
CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error)
|
||||
PutObjectPart(bucket, object, uploadID string, part int, r io.Reader) (etag string, err error)
|
||||
PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error)
|
||||
|
||||
PutObject(bucket, object string, r io.Reader) (string, error)
|
||||
HeadObject(bucket, object string, etag string) (*s3.HeadObjectOutput, error)
|
||||
GetObject(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error)
|
||||
PutObject(*s3.PutObjectInput) (string, error)
|
||||
HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
|
||||
GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error)
|
||||
CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
|
||||
ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
|
||||
ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
|
||||
DeleteObject(bucket, object string) error
|
||||
DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error
|
||||
PutBucketAcl(*s3.PutBucketAclInput) error
|
||||
PutObjectAcl(*s3.PutObjectAclInput) error
|
||||
RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error
|
||||
UploadPart(bucket, object, uploadId string, Body io.ReadSeeker) (*s3.UploadPartOutput, error)
|
||||
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
|
||||
|
||||
IsTaggingSupported() bool
|
||||
GetTags(bucket, object string) (map[string]string, error)
|
||||
SetTags(bucket, object string, tags map[string]string) error
|
||||
RemoveTags(bucket, object string) error
|
||||
@@ -59,10 +72,6 @@ var _ Backend = &BackendUnsupported{}
|
||||
func New() Backend {
|
||||
return &BackendUnsupported{}
|
||||
}
|
||||
|
||||
func (BackendUnsupported) GetIAMConfig() ([]byte, error) {
|
||||
return nil, fmt.Errorf("not supported")
|
||||
}
|
||||
func (BackendUnsupported) Shutdown() {}
|
||||
func (BackendUnsupported) String() string {
|
||||
return "Unsupported"
|
||||
@@ -107,20 +116,20 @@ func (BackendUnsupported) CompleteMultipartUpload(bucket, object, uploadID strin
|
||||
func (BackendUnsupported) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
return s3response.ListMultipartUploadsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
return s3response.ListPartsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectPart(bucket, object, uploadID string, part int, r io.Reader) (etag string, err error) {
|
||||
func (BackendUnsupported) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) PutObject(bucket, object string, r io.Reader) (string, error) {
|
||||
func (BackendUnsupported) PutObject(*s3.PutObjectInput) (string, error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObject(bucket, object string) error {
|
||||
@@ -129,10 +138,10 @@ func (BackendUnsupported) DeleteObject(bucket, object string) error {
|
||||
func (BackendUnsupported) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObject(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
|
||||
func (BackendUnsupported) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadObject(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
func (BackendUnsupported) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error) {
|
||||
@@ -144,20 +153,19 @@ func (BackendUnsupported) GetObjectAttributes(bucket, object string, attributes
|
||||
func (BackendUnsupported) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
func (BackendUnsupported) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
func (BackendUnsupported) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) IsTaggingSupported() bool { return false }
|
||||
func (BackendUnsupported) GetTags(bucket, object string) (map[string]string, error) {
|
||||
return nil, fmt.Errorf("not supported")
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) SetTags(bucket, object string, tags map[string]string) error {
|
||||
return fmt.Errorf("not supported")
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) RemoveTags(bucket, object string) error {
|
||||
return fmt.Errorf("not supported")
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package backend
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
@@ -47,10 +48,7 @@ var _ Backend = &BackendMock{}
|
||||
// GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) {
|
||||
// panic("mock out the GetBucketAcl method")
|
||||
// },
|
||||
// GetIAMConfigFunc: func() ([]byte, error) {
|
||||
// panic("mock out the GetIAMConfig method")
|
||||
// },
|
||||
// GetObjectFunc: func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
|
||||
// GetObjectFunc: func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
// panic("mock out the GetObject method")
|
||||
// },
|
||||
// GetObjectAclFunc: func(bucket string, object string) (*s3.GetObjectAclOutput, error) {
|
||||
@@ -65,25 +63,22 @@ var _ Backend = &BackendMock{}
|
||||
// HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
// panic("mock out the HeadBucket method")
|
||||
// },
|
||||
// HeadObjectFunc: func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
// HeadObjectFunc: func(bucket string, object string) (*s3.HeadObjectOutput, error) {
|
||||
// panic("mock out the HeadObject method")
|
||||
// },
|
||||
// IsTaggingSupportedFunc: func() bool {
|
||||
// panic("mock out the IsTaggingSupported method")
|
||||
// },
|
||||
// ListBucketsFunc: func() (*s3.ListBucketsOutput, error) {
|
||||
// panic("mock out the ListBuckets method")
|
||||
// },
|
||||
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
|
||||
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
// panic("mock out the ListMultipartUploads method")
|
||||
// },
|
||||
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
|
||||
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
// panic("mock out the ListObjectParts method")
|
||||
// },
|
||||
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
// panic("mock out the ListObjects method")
|
||||
// },
|
||||
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
// panic("mock out the ListObjectsV2 method")
|
||||
// },
|
||||
// PutBucketFunc: func(bucket string) error {
|
||||
@@ -92,13 +87,13 @@ var _ Backend = &BackendMock{}
|
||||
// PutBucketAclFunc: func(putBucketAclInput *s3.PutBucketAclInput) error {
|
||||
// panic("mock out the PutBucketAcl method")
|
||||
// },
|
||||
// PutObjectFunc: func(bucket string, object string, r io.Reader) (string, error) {
|
||||
// PutObjectFunc: func(putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
// panic("mock out the PutObject method")
|
||||
// },
|
||||
// PutObjectAclFunc: func(putObjectAclInput *s3.PutObjectAclInput) error {
|
||||
// panic("mock out the PutObjectAcl method")
|
||||
// },
|
||||
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
|
||||
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
// panic("mock out the PutObjectPart method")
|
||||
// },
|
||||
// RemoveTagsFunc: func(bucket string, object string) error {
|
||||
@@ -156,11 +151,8 @@ type BackendMock struct {
|
||||
// GetBucketAclFunc mocks the GetBucketAcl method.
|
||||
GetBucketAclFunc func(bucket string) (*s3.GetBucketAclOutput, error)
|
||||
|
||||
// GetIAMConfigFunc mocks the GetIAMConfig method.
|
||||
GetIAMConfigFunc func() ([]byte, error)
|
||||
|
||||
// GetObjectFunc mocks the GetObject method.
|
||||
GetObjectFunc func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error)
|
||||
GetObjectFunc func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
|
||||
|
||||
// GetObjectAclFunc mocks the GetObjectAcl method.
|
||||
GetObjectAclFunc func(bucket string, object string) (*s3.GetObjectAclOutput, error)
|
||||
@@ -175,25 +167,22 @@ type BackendMock struct {
|
||||
HeadBucketFunc func(bucket string) (*s3.HeadBucketOutput, error)
|
||||
|
||||
// HeadObjectFunc mocks the HeadObject method.
|
||||
HeadObjectFunc func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error)
|
||||
|
||||
// IsTaggingSupportedFunc mocks the IsTaggingSupported method.
|
||||
IsTaggingSupportedFunc func() bool
|
||||
HeadObjectFunc func(bucket string, object string) (*s3.HeadObjectOutput, error)
|
||||
|
||||
// ListBucketsFunc mocks the ListBuckets method.
|
||||
ListBucketsFunc func() (*s3.ListBucketsOutput, error)
|
||||
|
||||
// ListMultipartUploadsFunc mocks the ListMultipartUploads method.
|
||||
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
|
||||
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
|
||||
|
||||
// ListObjectPartsFunc mocks the ListObjectParts method.
|
||||
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error)
|
||||
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
|
||||
|
||||
// ListObjectsFunc mocks the ListObjects method.
|
||||
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
|
||||
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
|
||||
|
||||
// ListObjectsV2Func mocks the ListObjectsV2 method.
|
||||
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
|
||||
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
|
||||
|
||||
// PutBucketFunc mocks the PutBucket method.
|
||||
PutBucketFunc func(bucket string) error
|
||||
@@ -202,13 +191,13 @@ type BackendMock struct {
|
||||
PutBucketAclFunc func(putBucketAclInput *s3.PutBucketAclInput) error
|
||||
|
||||
// PutObjectFunc mocks the PutObject method.
|
||||
PutObjectFunc func(bucket string, object string, r io.Reader) (string, error)
|
||||
PutObjectFunc func(putObjectInput *s3.PutObjectInput) (string, error)
|
||||
|
||||
// PutObjectAclFunc mocks the PutObjectAcl method.
|
||||
PutObjectAclFunc func(putObjectAclInput *s3.PutObjectAclInput) error
|
||||
|
||||
// PutObjectPartFunc mocks the PutObjectPart method.
|
||||
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error)
|
||||
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error)
|
||||
|
||||
// RemoveTagsFunc mocks the RemoveTags method.
|
||||
RemoveTagsFunc func(bucket string, object string) error
|
||||
@@ -304,23 +293,16 @@ type BackendMock struct {
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// GetIAMConfig holds details about calls to the GetIAMConfig method.
|
||||
GetIAMConfig []struct {
|
||||
}
|
||||
// GetObject holds details about calls to the GetObject method.
|
||||
GetObject []struct {
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// StartOffset is the startOffset argument value.
|
||||
StartOffset int64
|
||||
// Length is the length argument value.
|
||||
Length int64
|
||||
// AcceptRange is the acceptRange argument value.
|
||||
AcceptRange string
|
||||
// Writer is the writer argument value.
|
||||
Writer io.Writer
|
||||
// Etag is the etag argument value.
|
||||
Etag string
|
||||
}
|
||||
// GetObjectAcl holds details about calls to the GetObjectAcl method.
|
||||
GetObjectAcl []struct {
|
||||
@@ -356,11 +338,6 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// Etag is the etag argument value.
|
||||
Etag string
|
||||
}
|
||||
// IsTaggingSupported holds details about calls to the IsTaggingSupported method.
|
||||
IsTaggingSupported []struct {
|
||||
}
|
||||
// ListBuckets holds details about calls to the ListBuckets method.
|
||||
ListBuckets []struct {
|
||||
@@ -421,12 +398,8 @@ type BackendMock struct {
|
||||
}
|
||||
// PutObject holds details about calls to the PutObject method.
|
||||
PutObject []struct {
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// R is the r argument value.
|
||||
R io.Reader
|
||||
// PutObjectInput is the putObjectInput argument value.
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
}
|
||||
// PutObjectAcl holds details about calls to the PutObjectAcl method.
|
||||
PutObjectAcl []struct {
|
||||
@@ -443,6 +416,8 @@ type BackendMock struct {
|
||||
UploadID string
|
||||
// Part is the part argument value.
|
||||
Part int
|
||||
// Length is the length argument value.
|
||||
Length int64
|
||||
// R is the r argument value.
|
||||
R io.Reader
|
||||
}
|
||||
@@ -503,14 +478,12 @@ type BackendMock struct {
|
||||
lockDeleteObject sync.RWMutex
|
||||
lockDeleteObjects sync.RWMutex
|
||||
lockGetBucketAcl sync.RWMutex
|
||||
lockGetIAMConfig sync.RWMutex
|
||||
lockGetObject sync.RWMutex
|
||||
lockGetObjectAcl sync.RWMutex
|
||||
lockGetObjectAttributes sync.RWMutex
|
||||
lockGetTags sync.RWMutex
|
||||
lockHeadBucket sync.RWMutex
|
||||
lockHeadObject sync.RWMutex
|
||||
lockIsTaggingSupported sync.RWMutex
|
||||
lockListBuckets sync.RWMutex
|
||||
lockListMultipartUploads sync.RWMutex
|
||||
lockListObjectParts sync.RWMutex
|
||||
@@ -870,57 +843,26 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetIAMConfig calls GetIAMConfigFunc.
|
||||
func (mock *BackendMock) GetIAMConfig() ([]byte, error) {
|
||||
if mock.GetIAMConfigFunc == nil {
|
||||
panic("BackendMock.GetIAMConfigFunc: method is nil but Backend.GetIAMConfig was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
}{}
|
||||
mock.lockGetIAMConfig.Lock()
|
||||
mock.calls.GetIAMConfig = append(mock.calls.GetIAMConfig, callInfo)
|
||||
mock.lockGetIAMConfig.Unlock()
|
||||
return mock.GetIAMConfigFunc()
|
||||
}
|
||||
|
||||
// GetIAMConfigCalls gets all the calls that were made to GetIAMConfig.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.GetIAMConfigCalls())
|
||||
func (mock *BackendMock) GetIAMConfigCalls() []struct {
|
||||
} {
|
||||
var calls []struct {
|
||||
}
|
||||
mock.lockGetIAMConfig.RLock()
|
||||
calls = mock.calls.GetIAMConfig
|
||||
mock.lockGetIAMConfig.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetObject calls GetObjectFunc.
|
||||
func (mock *BackendMock) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
|
||||
func (mock *BackendMock) GetObject(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
if mock.GetObjectFunc == nil {
|
||||
panic("BackendMock.GetObjectFunc: method is nil but Backend.GetObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Bucket string
|
||||
Object string
|
||||
StartOffset int64
|
||||
Length int64
|
||||
AcceptRange string
|
||||
Writer io.Writer
|
||||
Etag string
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
StartOffset: startOffset,
|
||||
Length: length,
|
||||
AcceptRange: acceptRange,
|
||||
Writer: writer,
|
||||
Etag: etag,
|
||||
}
|
||||
mock.lockGetObject.Lock()
|
||||
mock.calls.GetObject = append(mock.calls.GetObject, callInfo)
|
||||
mock.lockGetObject.Unlock()
|
||||
return mock.GetObjectFunc(bucket, object, startOffset, length, writer, etag)
|
||||
return mock.GetObjectFunc(bucket, object, acceptRange, writer)
|
||||
}
|
||||
|
||||
// GetObjectCalls gets all the calls that were made to GetObject.
|
||||
@@ -930,18 +872,14 @@ func (mock *BackendMock) GetObject(bucket string, object string, startOffset int
|
||||
func (mock *BackendMock) GetObjectCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
StartOffset int64
|
||||
Length int64
|
||||
AcceptRange string
|
||||
Writer io.Writer
|
||||
Etag string
|
||||
} {
|
||||
var calls []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
StartOffset int64
|
||||
Length int64
|
||||
AcceptRange string
|
||||
Writer io.Writer
|
||||
Etag string
|
||||
}
|
||||
mock.lockGetObject.RLock()
|
||||
calls = mock.calls.GetObject
|
||||
@@ -1094,23 +1032,21 @@ func (mock *BackendMock) HeadBucketCalls() []struct {
|
||||
}
|
||||
|
||||
// HeadObject calls HeadObjectFunc.
|
||||
func (mock *BackendMock) HeadObject(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
func (mock *BackendMock) HeadObject(bucket string, object string) (*s3.HeadObjectOutput, error) {
|
||||
if mock.HeadObjectFunc == nil {
|
||||
panic("BackendMock.HeadObjectFunc: method is nil but Backend.HeadObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Etag string
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Etag: etag,
|
||||
}
|
||||
mock.lockHeadObject.Lock()
|
||||
mock.calls.HeadObject = append(mock.calls.HeadObject, callInfo)
|
||||
mock.lockHeadObject.Unlock()
|
||||
return mock.HeadObjectFunc(bucket, object, etag)
|
||||
return mock.HeadObjectFunc(bucket, object)
|
||||
}
|
||||
|
||||
// HeadObjectCalls gets all the calls that were made to HeadObject.
|
||||
@@ -1120,12 +1056,10 @@ func (mock *BackendMock) HeadObject(bucket string, object string, etag string) (
|
||||
func (mock *BackendMock) HeadObjectCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Etag string
|
||||
} {
|
||||
var calls []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Etag string
|
||||
}
|
||||
mock.lockHeadObject.RLock()
|
||||
calls = mock.calls.HeadObject
|
||||
@@ -1133,33 +1067,6 @@ func (mock *BackendMock) HeadObjectCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// IsTaggingSupported calls IsTaggingSupportedFunc.
|
||||
func (mock *BackendMock) IsTaggingSupported() bool {
|
||||
if mock.IsTaggingSupportedFunc == nil {
|
||||
panic("BackendMock.IsTaggingSupportedFunc: method is nil but Backend.IsTaggingSupported was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
}{}
|
||||
mock.lockIsTaggingSupported.Lock()
|
||||
mock.calls.IsTaggingSupported = append(mock.calls.IsTaggingSupported, callInfo)
|
||||
mock.lockIsTaggingSupported.Unlock()
|
||||
return mock.IsTaggingSupportedFunc()
|
||||
}
|
||||
|
||||
// IsTaggingSupportedCalls gets all the calls that were made to IsTaggingSupported.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.IsTaggingSupportedCalls())
|
||||
func (mock *BackendMock) IsTaggingSupportedCalls() []struct {
|
||||
} {
|
||||
var calls []struct {
|
||||
}
|
||||
mock.lockIsTaggingSupported.RLock()
|
||||
calls = mock.calls.IsTaggingSupported
|
||||
mock.lockIsTaggingSupported.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// ListBuckets calls ListBucketsFunc.
|
||||
func (mock *BackendMock) ListBuckets() (*s3.ListBucketsOutput, error) {
|
||||
if mock.ListBucketsFunc == nil {
|
||||
@@ -1188,7 +1095,7 @@ func (mock *BackendMock) ListBucketsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListMultipartUploads calls ListMultipartUploadsFunc.
|
||||
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
|
||||
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
if mock.ListMultipartUploadsFunc == nil {
|
||||
panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called")
|
||||
}
|
||||
@@ -1220,7 +1127,7 @@ func (mock *BackendMock) ListMultipartUploadsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListObjectParts calls ListObjectPartsFunc.
|
||||
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
|
||||
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
if mock.ListObjectPartsFunc == nil {
|
||||
panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called")
|
||||
}
|
||||
@@ -1268,7 +1175,7 @@ func (mock *BackendMock) ListObjectPartsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListObjects calls ListObjectsFunc.
|
||||
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
if mock.ListObjectsFunc == nil {
|
||||
panic("BackendMock.ListObjectsFunc: method is nil but Backend.ListObjects was just called")
|
||||
}
|
||||
@@ -1316,7 +1223,7 @@ func (mock *BackendMock) ListObjectsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListObjectsV2 calls ListObjectsV2Func.
|
||||
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
if mock.ListObjectsV2Func == nil {
|
||||
panic("BackendMock.ListObjectsV2Func: method is nil but Backend.ListObjectsV2 was just called")
|
||||
}
|
||||
@@ -1428,23 +1335,19 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObject calls PutObjectFunc.
|
||||
func (mock *BackendMock) PutObject(bucket string, object string, r io.Reader) (string, error) {
|
||||
func (mock *BackendMock) PutObject(putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
if mock.PutObjectFunc == nil {
|
||||
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Bucket string
|
||||
Object string
|
||||
R io.Reader
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
R: r,
|
||||
PutObjectInput: putObjectInput,
|
||||
}
|
||||
mock.lockPutObject.Lock()
|
||||
mock.calls.PutObject = append(mock.calls.PutObject, callInfo)
|
||||
mock.lockPutObject.Unlock()
|
||||
return mock.PutObjectFunc(bucket, object, r)
|
||||
return mock.PutObjectFunc(putObjectInput)
|
||||
}
|
||||
|
||||
// PutObjectCalls gets all the calls that were made to PutObject.
|
||||
@@ -1452,14 +1355,10 @@ func (mock *BackendMock) PutObject(bucket string, object string, r io.Reader) (s
|
||||
//
|
||||
// len(mockedBackend.PutObjectCalls())
|
||||
func (mock *BackendMock) PutObjectCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
R io.Reader
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
} {
|
||||
var calls []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
R io.Reader
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
}
|
||||
mock.lockPutObject.RLock()
|
||||
calls = mock.calls.PutObject
|
||||
@@ -1500,7 +1399,7 @@ func (mock *BackendMock) PutObjectAclCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObjectPart calls PutObjectPartFunc.
|
||||
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
|
||||
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
if mock.PutObjectPartFunc == nil {
|
||||
panic("BackendMock.PutObjectPartFunc: method is nil but Backend.PutObjectPart was just called")
|
||||
}
|
||||
@@ -1509,18 +1408,20 @@ func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID st
|
||||
Object string
|
||||
UploadID string
|
||||
Part int
|
||||
Length int64
|
||||
R io.Reader
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
UploadID: uploadID,
|
||||
Part: part,
|
||||
Length: length,
|
||||
R: r,
|
||||
}
|
||||
mock.lockPutObjectPart.Lock()
|
||||
mock.calls.PutObjectPart = append(mock.calls.PutObjectPart, callInfo)
|
||||
mock.lockPutObjectPart.Unlock()
|
||||
return mock.PutObjectPartFunc(bucket, object, uploadID, part, r)
|
||||
return mock.PutObjectPartFunc(bucket, object, uploadID, part, length, r)
|
||||
}
|
||||
|
||||
// PutObjectPartCalls gets all the calls that were made to PutObjectPart.
|
||||
@@ -1532,6 +1433,7 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
|
||||
Object string
|
||||
UploadID string
|
||||
Part int
|
||||
Length int64
|
||||
R io.Reader
|
||||
} {
|
||||
var calls []struct {
|
||||
@@ -1539,6 +1441,7 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
|
||||
Object string
|
||||
UploadID string
|
||||
Part int
|
||||
Length int64
|
||||
R io.Reader
|
||||
}
|
||||
mock.lockPutObjectPart.RLock()
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
@@ -7,7 +21,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/scoutgw/s3err"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func TestBackend_ListBuckets(t *testing.T) {
|
||||
|
||||
@@ -1,11 +1,34 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// RFC3339TimeFormat RFC3339 time format
|
||||
RFC3339TimeFormat = "2006-01-02T15:04:05.999Z"
|
||||
)
|
||||
|
||||
func IsValidBucketName(name string) bool { return true }
|
||||
|
||||
type ByBucketName []types.Bucket
|
||||
@@ -14,6 +37,12 @@ func (d ByBucketName) Len() int { return len(d) }
|
||||
func (d ByBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d ByBucketName) Less(i, j int) bool { return *d[i].Name < *d[j].Name }
|
||||
|
||||
type ByObjectName []types.Object
|
||||
|
||||
func (d ByObjectName) Len() int { return len(d) }
|
||||
func (d ByObjectName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d ByObjectName) Less(i, j int) bool { return *d[i].Key < *d[j].Key }
|
||||
|
||||
func GetStringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
@@ -21,3 +50,36 @@ func GetStringPtr(s string) *string {
|
||||
func GetTimePtr(t time.Time) *time.Time {
|
||||
return &t
|
||||
}
|
||||
|
||||
func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
if acceptRange == "" {
|
||||
return 0, file.Size(), nil
|
||||
}
|
||||
|
||||
rangeKv := strings.Split(acceptRange, "=")
|
||||
|
||||
if len(rangeKv) < 2 {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
}
|
||||
|
||||
bRange := strings.Split(rangeKv[1], "-")
|
||||
if len(bRange) < 2 {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
}
|
||||
|
||||
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
}
|
||||
|
||||
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
}
|
||||
|
||||
if endOffset < startOffset {
|
||||
return 0, 0, errors.New("invalid range parameter")
|
||||
}
|
||||
|
||||
return int64(startOffset), int64(endOffset - startOffset + 1), nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,28 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package posix
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -17,11 +34,14 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/scoutgw/s3err"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
type Posix struct {
|
||||
rootfd *os.File
|
||||
rootdir string
|
||||
backend.BackendUnsupported
|
||||
}
|
||||
|
||||
@@ -32,13 +52,30 @@ const (
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
onameAttr = "user.objname"
|
||||
tagHdr = "X-Amz-Tagging"
|
||||
dirObjKey = "user.dirisobject"
|
||||
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
)
|
||||
|
||||
var (
|
||||
newObjUID = 0
|
||||
newObjGID = 0
|
||||
)
|
||||
func New(rootdir string) (*Posix, error) {
|
||||
err := os.Chdir(rootdir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("chdir %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
f, err := os.Open(rootdir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open %v: %w", rootdir, err)
|
||||
}
|
||||
|
||||
return &Posix{rootfd: f, rootdir: rootdir}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) Shutdown() {
|
||||
p.rootfd.Close()
|
||||
}
|
||||
|
||||
func (p *Posix) Sring() string {
|
||||
return "Posix Gateway"
|
||||
}
|
||||
|
||||
func (p *Posix) ListBuckets() (*s3.ListBucketsOutput, error) {
|
||||
entries, err := os.ReadDir(".")
|
||||
@@ -74,7 +111,7 @@ func (p *Posix) ListBuckets() (*s3.ListBucketsOutput, error) {
|
||||
|
||||
func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
_, err := os.Lstat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -98,7 +135,7 @@ func (p *Posix) PutBucket(bucket string) error {
|
||||
|
||||
func (p *Posix) DeleteBucket(bucket string) error {
|
||||
names, err := os.ReadDir(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -109,7 +146,7 @@ func (p *Posix) DeleteBucket(bucket string) error {
|
||||
// if .sgwtmp is only item in directory
|
||||
// then clean this up before trying to remove the bucket
|
||||
err = os.RemoveAll(filepath.Join(bucket, metaTmpDir))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove temp dir: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -130,7 +167,7 @@ func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.C
|
||||
object := *mpu.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -178,7 +215,7 @@ func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.C
|
||||
|
||||
func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -195,8 +232,10 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
|
||||
// check all parts ok
|
||||
last := len(parts) - 1
|
||||
partsize := int64(0)
|
||||
var totalsize int64
|
||||
for i, p := range parts {
|
||||
fi, err := os.Lstat(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber)))
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber))
|
||||
fi, err := os.Lstat(partPath)
|
||||
if err != nil {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
@@ -204,17 +243,25 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
|
||||
if i == 0 {
|
||||
partsize = fi.Size()
|
||||
}
|
||||
totalsize += fi.Size()
|
||||
// all parts except the last need to be the same size
|
||||
if i < last && partsize != fi.Size() {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(partPath, "user.etag")
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
parts[i].ETag = &etag
|
||||
}
|
||||
|
||||
f, err := openTmpFile(".")
|
||||
f, err := openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, totalsize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer f.cleanup()
|
||||
|
||||
for _, p := range parts {
|
||||
pf, err := os.Open(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber)))
|
||||
@@ -236,15 +283,12 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
|
||||
dir := filepath.Dir(objname)
|
||||
if dir != "" {
|
||||
if err = mkdirAll(dir, os.FileMode(0755), bucket, object); err != nil {
|
||||
if err != nil && os.IsExist(err) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrObjectParentIsFile)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("make object parent directories: %w", err)
|
||||
return nil, s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = linkTmpFile(f, objname)
|
||||
err = f.link()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("link object in namespace: %w", err)
|
||||
}
|
||||
@@ -268,15 +312,6 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
|
||||
return nil, fmt.Errorf("set etag attr: %w", err)
|
||||
}
|
||||
|
||||
if newObjUID != 0 || newObjGID != 0 {
|
||||
err = os.Chown(objname, newObjUID, newObjGID)
|
||||
if err != nil {
|
||||
// cleanup object if returning error
|
||||
os.Remove(objname)
|
||||
return nil, fmt.Errorf("set object uid/gid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup tmp dirs
|
||||
os.RemoveAll(upiddir)
|
||||
// use Remove for objdir in case there are still other uploads
|
||||
@@ -295,7 +330,7 @@ func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte,
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
_, err := os.Stat(filepath.Join(objdir, uploadID))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return [32]byte{}, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -304,7 +339,7 @@ func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte,
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func loadUserMetaData(path string, m map[string]string) (tag, contentType, contentEncoding string) {
|
||||
func loadUserMetaData(path string, m map[string]string) (contentType, contentEncoding string) {
|
||||
ents, err := xattr.List(path)
|
||||
if err != nil || len(ents) == 0 {
|
||||
return
|
||||
@@ -324,16 +359,7 @@ func loadUserMetaData(path string, m map[string]string) (tag, contentType, conte
|
||||
m[strings.TrimPrefix(e, "user.")] = string(b)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(path, "user."+tagHdr)
|
||||
tag = string(b)
|
||||
if err != nil {
|
||||
tag = ""
|
||||
}
|
||||
if tag != "" {
|
||||
m[tagHdr] = tag
|
||||
}
|
||||
|
||||
b, err = xattr.Get(path, "user.content-type")
|
||||
b, err := xattr.Get(path, "user.content-type")
|
||||
contentType = string(b)
|
||||
if err != nil {
|
||||
contentType = ""
|
||||
@@ -406,12 +432,6 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
|
||||
}
|
||||
return s3err.GetAPIError(s3err.ErrObjectParentIsFile)
|
||||
}
|
||||
if newObjUID != 0 || newObjGID != 0 {
|
||||
err = os.Chown(path, newObjUID, newObjGID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set parent ownership: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -443,7 +463,7 @@ func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
|
||||
uploadID := *mpu.UploadId
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -467,24 +487,40 @@ func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
|
||||
func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
bucket := *mpu.Bucket
|
||||
var delimiter string
|
||||
if mpu.Delimiter != nil {
|
||||
delimiter = *mpu.Delimiter
|
||||
}
|
||||
var prefix string
|
||||
if mpu.Prefix != nil {
|
||||
prefix = *mpu.Prefix
|
||||
}
|
||||
|
||||
var lmu s3response.ListMultipartUploadsResponse
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return lmu, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
return lmu, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
// ignore readdir error and use the empty list returned
|
||||
objs, _ := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir))
|
||||
|
||||
var uploads []types.MultipartUpload
|
||||
var uploads []s3response.Upload
|
||||
|
||||
keyMarker := *mpu.KeyMarker
|
||||
uploadIDMarker := *mpu.UploadIdMarker
|
||||
var keyMarker string
|
||||
if mpu.KeyMarker != nil {
|
||||
keyMarker = *mpu.KeyMarker
|
||||
}
|
||||
var uploadIDMarker string
|
||||
if mpu.UploadIdMarker != nil {
|
||||
uploadIDMarker = *mpu.UploadIdMarker
|
||||
}
|
||||
var pastMarker bool
|
||||
if keyMarker == "" && uploadIDMarker == "" {
|
||||
pastMarker = true
|
||||
@@ -500,7 +536,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (*s3.Lis
|
||||
continue
|
||||
}
|
||||
objectName := string(b)
|
||||
if !strings.HasPrefix(objectName, *mpu.Prefix) {
|
||||
if mpu.Prefix != nil && !strings.HasPrefix(objectName, *mpu.Prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -526,64 +562,71 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (*s3.Lis
|
||||
upiddir := filepath.Join(bucket, metaTmpMultipartDir, obj.Name(), upid.Name())
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
|
||||
fi, err := upid.Info()
|
||||
if err != nil {
|
||||
return lmu, fmt.Errorf("stat %q: %w", upid.Name(), err)
|
||||
}
|
||||
|
||||
uploadID := upid.Name()
|
||||
uploads = append(uploads, types.MultipartUpload{
|
||||
Key: &objectName,
|
||||
UploadId: &uploadID,
|
||||
uploads = append(uploads, s3response.Upload{
|
||||
Key: objectName,
|
||||
UploadID: uploadID,
|
||||
Initiated: fi.ModTime().Format(backend.RFC3339TimeFormat),
|
||||
})
|
||||
if len(uploads) == int(mpu.MaxUploads) {
|
||||
return &s3.ListMultipartUploadsOutput{
|
||||
Bucket: &bucket,
|
||||
Delimiter: mpu.Delimiter,
|
||||
return s3response.ListMultipartUploadsResponse{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
IsTruncated: i != len(objs) || j != len(upids),
|
||||
KeyMarker: &keyMarker,
|
||||
MaxUploads: mpu.MaxUploads,
|
||||
NextKeyMarker: &objectName,
|
||||
NextUploadIdMarker: &uploadID,
|
||||
Prefix: mpu.Prefix,
|
||||
UploadIdMarker: mpu.UploadIdMarker,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
NextKeyMarker: objectName,
|
||||
NextUploadIDMarker: uploadID,
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: uploads,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &s3.ListMultipartUploadsOutput{
|
||||
Bucket: &bucket,
|
||||
Delimiter: mpu.Delimiter,
|
||||
KeyMarker: &keyMarker,
|
||||
MaxUploads: mpu.MaxUploads,
|
||||
Prefix: mpu.Prefix,
|
||||
UploadIdMarker: mpu.UploadIdMarker,
|
||||
return s3response.ListMultipartUploadsResponse{
|
||||
Bucket: bucket,
|
||||
Delimiter: delimiter,
|
||||
KeyMarker: keyMarker,
|
||||
MaxUploads: int(mpu.MaxUploads),
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
Uploads: uploads,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
|
||||
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
var lpr s3response.ListPartsResponse
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
return lpr, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
sum, err := p.checkUploadIDExists(bucket, object, uploadID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return lpr, err
|
||||
}
|
||||
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
ents, err := os.ReadDir(filepath.Join(objdir, uploadID))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrNoSuchUpload)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readdir upload: %w", err)
|
||||
return lpr, fmt.Errorf("readdir upload: %w", err)
|
||||
}
|
||||
|
||||
var parts []types.Part
|
||||
var parts []s3response.Part
|
||||
for _, e := range ents {
|
||||
pn, _ := strconv.Atoi(e.Name())
|
||||
if pn <= partNumberMarker {
|
||||
@@ -602,10 +645,10 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
|
||||
continue
|
||||
}
|
||||
|
||||
parts = append(parts, types.Part{
|
||||
PartNumber: int32(pn),
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
parts = append(parts, s3response.Part{
|
||||
PartNumber: pn,
|
||||
ETag: etag,
|
||||
LastModified: fi.ModTime().Format(backend.RFC3339TimeFormat),
|
||||
Size: fi.Size(),
|
||||
})
|
||||
}
|
||||
@@ -614,12 +657,12 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
|
||||
func(i int, j int) bool { return parts[i].PartNumber < parts[j].PartNumber })
|
||||
|
||||
oldLen := len(parts)
|
||||
if len(parts) > maxParts {
|
||||
if maxParts > 0 && len(parts) > maxParts {
|
||||
parts = parts[:maxParts]
|
||||
}
|
||||
newLen := len(parts)
|
||||
|
||||
nextpart := int32(0)
|
||||
nextpart := 0
|
||||
if len(parts) != 0 {
|
||||
nextpart = parts[len(parts)-1].PartNumber
|
||||
}
|
||||
@@ -628,15 +671,15 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
|
||||
upiddir := filepath.Join(objdir, uploadID)
|
||||
loadUserMetaData(upiddir, userMetaData)
|
||||
|
||||
return &s3.ListPartsOutput{
|
||||
Bucket: &bucket,
|
||||
return s3response.ListPartsResponse{
|
||||
Bucket: bucket,
|
||||
IsTruncated: oldLen != newLen,
|
||||
Key: &object,
|
||||
MaxParts: int32(maxParts),
|
||||
NextPartNumberMarker: backend.GetStringPtr(fmt.Sprintf("%v", nextpart)),
|
||||
PartNumberMarker: backend.GetStringPtr(fmt.Sprintf("%v", partNumberMarker)),
|
||||
Key: object,
|
||||
MaxParts: maxParts,
|
||||
NextPartNumberMarker: nextpart,
|
||||
PartNumberMarker: partNumberMarker,
|
||||
Parts: parts,
|
||||
UploadId: &uploadID,
|
||||
UploadID: uploadID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -644,20 +687,25 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
|
||||
// func (p *Posix) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
|
||||
// }
|
||||
|
||||
func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, r io.Reader) (string, error) {
|
||||
func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
f, err := openTmpFile(".")
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", part))
|
||||
|
||||
f, err := openTmpFile(filepath.Join(bucket, objdir),
|
||||
bucket, partPath, length)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer f.cleanup()
|
||||
|
||||
hash := md5.New()
|
||||
tr := io.TeeReader(r, hash)
|
||||
@@ -666,103 +714,87 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, r io.Re
|
||||
return "", fmt.Errorf("write part data: %w", err)
|
||||
}
|
||||
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", part))
|
||||
|
||||
err = linkTmpFile(f, partPath)
|
||||
err = f.link()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("link object in namespace: %w", err)
|
||||
}
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum[:])
|
||||
etag := hex.EncodeToString(dataSum)
|
||||
xattr.Set(partPath, "user.etag", []byte(etag))
|
||||
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutObject(bucket, object string, r io.Reader) (string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
|
||||
_, err := os.Stat(*po.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
name := filepath.Join(bucket, object)
|
||||
name := filepath.Join(*po.Bucket, *po.Key)
|
||||
|
||||
etag := ""
|
||||
|
||||
if strings.HasSuffix(object, "/") {
|
||||
if strings.HasSuffix(*po.Key, "/") {
|
||||
// object is directory
|
||||
err = mkdirAll(name, os.FileMode(0755), bucket, object)
|
||||
err = mkdirAll(name, os.FileMode(0755), *po.Bucket, *po.Key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TODO: set user attrs
|
||||
//for k, v := range metadata {
|
||||
// xattr.Set(name, "user."+k, []byte(v))
|
||||
//}
|
||||
for k, v := range po.Metadata {
|
||||
xattr.Set(name, "user."+k, []byte(v))
|
||||
}
|
||||
|
||||
// set our tag that this dir was specifically put
|
||||
xattr.Set(name, dirObjKey, nil)
|
||||
} else {
|
||||
// object is file
|
||||
f, err := openTmpFile(".")
|
||||
// set etag attribute to signify this dir was specifically put
|
||||
xattr.Set(name, "user.etag", []byte(emptyMD5))
|
||||
|
||||
return emptyMD5, nil
|
||||
}
|
||||
|
||||
// object is file
|
||||
f, err := openTmpFile(filepath.Join(*po.Bucket, metaTmpDir),
|
||||
*po.Bucket, *po.Key, po.ContentLength)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
defer f.cleanup()
|
||||
|
||||
hash := md5.New()
|
||||
rdr := io.TeeReader(po.Body, hash)
|
||||
_, err = io.Copy(f, rdr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("write object data: %w", err)
|
||||
}
|
||||
dir := filepath.Dir(name)
|
||||
if dir != "" {
|
||||
err = mkdirAll(dir, os.FileMode(0755), *po.Bucket, *po.Key)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("open temp file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// TODO: fallocate based on content length
|
||||
|
||||
hash := md5.New()
|
||||
rdr := io.TeeReader(r, hash)
|
||||
_, err = io.Copy(f, rdr)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return "", fmt.Errorf("write object data: %w", err)
|
||||
}
|
||||
dir := filepath.Dir(name)
|
||||
if dir != "" {
|
||||
err = mkdirAll(dir, os.FileMode(0755), bucket, object)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return "", fmt.Errorf("make object parent directories: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = linkTmpFile(f, name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("link object in namespace: %w", err)
|
||||
}
|
||||
|
||||
// TODO: set user attrs
|
||||
//for k, v := range metadata {
|
||||
// xattr.Set(name, "user."+k, []byte(v))
|
||||
//}
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum[:])
|
||||
xattr.Set(name, "user.etag", []byte(etag))
|
||||
|
||||
if newObjUID != 0 || newObjGID != 0 {
|
||||
err = os.Chown(name, newObjUID, newObjGID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("set object uid/gid: %v", err)
|
||||
}
|
||||
return "", s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
|
||||
}
|
||||
}
|
||||
|
||||
err = f.link()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("link object in namespace: %w", err)
|
||||
}
|
||||
|
||||
for k, v := range po.Metadata {
|
||||
xattr.Set(name, "user."+k, []byte(v))
|
||||
}
|
||||
|
||||
dataSum := hash.Sum(nil)
|
||||
etag := hex.EncodeToString(dataSum[:])
|
||||
xattr.Set(name, "user.etag", []byte(etag))
|
||||
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteObject(bucket, object string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -770,7 +802,7 @@ func (p *Posix) DeleteObject(bucket, object string) error {
|
||||
}
|
||||
|
||||
os.Remove(filepath.Join(bucket, object))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -795,7 +827,7 @@ func (p *Posix) removeParents(bucket, object string) error {
|
||||
break
|
||||
}
|
||||
|
||||
_, err := xattr.Get(parent, dirObjKey)
|
||||
_, err := xattr.Get(parent, "user.etag")
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
@@ -822,9 +854,9 @@ func (p *Posix) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetObject(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
|
||||
func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -833,20 +865,25 @@ func (p *Posix) GetObject(bucket, object string, startOffset, length int64, writ
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
fi, err := os.Stat(objPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
startOffset, length, err := backend.ParseRange(fi, acceptRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if startOffset+length > fi.Size() {
|
||||
// TODO: is ErrInvalidRequest correct here?
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
|
||||
}
|
||||
|
||||
f, err := os.Open(objPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -862,23 +899,34 @@ func (p *Posix) GetObject(bucket, object string, startOffset, length int64, writ
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
|
||||
_, contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, "user.etag")
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
tags, err := p.getXattrTags(bucket, object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object tags: %w", err)
|
||||
}
|
||||
|
||||
// TODO: fill range request header?
|
||||
// TODO: parse tags for tag count?
|
||||
return &s3.GetObjectOutput{
|
||||
AcceptRanges: &acceptRange,
|
||||
ContentLength: length,
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentType: &contentType,
|
||||
ETag: &etag,
|
||||
LastModified: backend.GetTimePtr(fi.ModTime()),
|
||||
Metadata: userMetaData,
|
||||
TagCount: int32(len(tags)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) HeadObject(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -887,7 +935,7 @@ func (p *Posix) HeadObject(bucket, object string, etag string) (*s3.HeadObjectOu
|
||||
|
||||
objPath := filepath.Join(bucket, object)
|
||||
fi, err := os.Stat(objPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -895,10 +943,14 @@ func (p *Posix) HeadObject(bucket, object string, etag string) (*s3.HeadObjectOu
|
||||
}
|
||||
|
||||
userMetaData := make(map[string]string)
|
||||
_, contentType, contentEncoding := loadUserMetaData(filepath.Join(bucket, objPath), userMetaData)
|
||||
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
|
||||
|
||||
b, err := xattr.Get(objPath, "user.etag")
|
||||
etag := string(b)
|
||||
if err != nil {
|
||||
etag = ""
|
||||
}
|
||||
|
||||
// TODO: fill accept ranges request header?
|
||||
// TODO: do we need to get etag from xattr?
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: fi.Size(),
|
||||
ContentType: &contentType,
|
||||
@@ -911,7 +963,7 @@ func (p *Posix) HeadObject(bucket, object string, etag string) (*s3.HeadObjectOu
|
||||
|
||||
func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
_, err := os.Stat(srcBucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -919,7 +971,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
|
||||
}
|
||||
|
||||
_, err = os.Stat(DstBucket)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -928,7 +980,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
|
||||
|
||||
objPath := filepath.Join(srcBucket, srcObject)
|
||||
f, err := os.Open(objPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -936,7 +988,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
etag, err := p.PutObject(DstBucket, dstObject, f)
|
||||
etag, err := p.PutObject(&s3.PutObjectInput{Bucket: &DstBucket, Key: &dstObject, Body: f})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -954,9 +1006,171 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
func(path string) (bool, error) {
|
||||
_, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
|
||||
if isNoAttr(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, func(path string) (string, error) {
|
||||
etag, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
|
||||
return string(etag), err
|
||||
}, []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
|
||||
return &s3.ListObjectsOutput{
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: int32(maxkeys),
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
}, nil
|
||||
}
|
||||
func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
|
||||
func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
func(path string) (bool, error) {
|
||||
_, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
|
||||
if isNoAttr(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, func(path string) (string, error) {
|
||||
etag, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
|
||||
return string(etag), err
|
||||
}, []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
|
||||
return &s3.ListObjectsV2Output{
|
||||
CommonPrefixes: results.CommonPrefixes,
|
||||
Contents: results.Objects,
|
||||
Delimiter: &delim,
|
||||
IsTruncated: results.Truncated,
|
||||
ContinuationToken: &marker,
|
||||
MaxKeys: int32(maxkeys),
|
||||
Name: &bucket,
|
||||
NextContinuationToken: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetTags(bucket, object string) (map[string]string, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
return p.getXattrTags(bucket, object)
|
||||
}
|
||||
|
||||
func (p *Posix) getXattrTags(bucket, object string) (map[string]string, error) {
|
||||
tags := make(map[string]string)
|
||||
b, err := xattr.Get(filepath.Join(bucket, object), "user."+tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if isNoAttr(err) {
|
||||
return tags, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get tags: %w", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(b, &tags)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshal tags: %w", err)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (p *Posix) SetTags(bucket, object string, tags map[string]string) error {
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
err = xattr.Remove(filepath.Join(bucket, object), "user."+tagHdr)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove tags: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
|
||||
err = xattr.Set(filepath.Join(bucket, object), "user."+tagHdr, b)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("set tags: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) RemoveTags(bucket, object string) error {
|
||||
return p.SetTags(bucket, object, nil)
|
||||
}
|
||||
|
||||
func isNoAttr(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
xerr, ok := err.(*xattr.Error)
|
||||
if ok && xerr.Err == xattr.ENOATTR {
|
||||
return true
|
||||
}
|
||||
if err == syscall.ENODATA {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,14 +1,89 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package posix
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func openTmpFile(dir string) (*os.File, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
type tmpfile struct {
|
||||
f *os.File
|
||||
bucket string
|
||||
objname string
|
||||
size int64
|
||||
}
|
||||
|
||||
func linkTmpFile(f *os.File, path string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
func openTmpFile(dir, bucket, obj string, size int64) (*tmpfile, error) {
|
||||
// Create a temp file for upload while in progress (see link comments below).
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tmpfile{f: f, bucket: bucket, objname: obj, size: size}, nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
tempname := tmp.f.Name()
|
||||
// cleanup in case anything goes wrong, if rename succeeds then
|
||||
// this will no longer exist
|
||||
defer os.Remove(tempname)
|
||||
|
||||
// We use Rename as the atomic operation for object puts. The upload is
|
||||
// written to a temp file to not conflict with any other simultaneous
|
||||
// uploads. The final operation is to move the temp file into place for
|
||||
// the object. This ensures the object semantics of last upload completed
|
||||
// wins and is not some combination of writes from simultaneous uploads.
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
|
||||
err = tmp.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close tmpfile: %w", err)
|
||||
}
|
||||
|
||||
err = os.Rename(tempname, objPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
if int64(len(b)) > tmp.size {
|
||||
return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
|
||||
}
|
||||
|
||||
n, err := tmp.f.Write(b)
|
||||
tmp.size -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
tmp.f.Close()
|
||||
}
|
||||
|
||||
@@ -1,31 +1,105 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package posix
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const procfddir = "/proc/self/fd"
|
||||
|
||||
func openTmpFile(dir string) (*os.File, error) {
|
||||
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return os.NewFile(uintptr(fd), filepath.Join(procfddir, strconv.Itoa(fd))), nil
|
||||
type tmpfile struct {
|
||||
f *os.File
|
||||
bucket string
|
||||
objname string
|
||||
isOTmp bool
|
||||
size int64
|
||||
}
|
||||
|
||||
func linkTmpFile(f *os.File, path string) error {
|
||||
err := os.Remove(path)
|
||||
func openTmpFile(dir, bucket, obj string, size int64) (*tmpfile, error) {
|
||||
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
|
||||
// This can help reduce contention within the namespace (parent directories),
|
||||
// etc. And will auto cleanup the inode on close if we never link this
|
||||
// file descriptor into the namespace.
|
||||
// Not all filesystems support this, so fallback to CreateTemp for when
|
||||
// this is not supported.
|
||||
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, 0666)
|
||||
if err != nil {
|
||||
// O_TMPFILE not supported, try fallback
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp := &tmpfile{f: f, bucket: bucket, objname: obj, size: size}
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 {
|
||||
tmp.falloc()
|
||||
}
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
|
||||
// later to link file into namespace
|
||||
f := os.NewFile(uintptr(fd), filepath.Join(procfddir, strconv.Itoa(fd)))
|
||||
|
||||
tmp := &tmpfile{f: f, isOTmp: true, size: size}
|
||||
// falloc is best effort, its fine if this fails
|
||||
if size > 0 {
|
||||
tmp.falloc()
|
||||
}
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) falloc() error {
|
||||
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fallocate: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
// We use Linkat/Rename as the atomic operation for object puts. The
|
||||
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
|
||||
// with any other simultaneous uploads. The final operation is to move the
|
||||
// temp file into place for the object. This ensures the object semantics
|
||||
// of last upload completed wins and is not some combination of writes
|
||||
// from simultaneous uploads.
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale part: %w", err)
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
|
||||
if !tmp.isOTmp {
|
||||
// O_TMPFILE not suported, use fallback
|
||||
return tmp.fallbackLink()
|
||||
}
|
||||
|
||||
procdir, err := os.Open(procfddir)
|
||||
@@ -34,17 +108,56 @@ func linkTmpFile(f *os.File, path string) error {
|
||||
}
|
||||
defer procdir.Close()
|
||||
|
||||
dir, err := os.Open(filepath.Dir(path))
|
||||
dir, err := os.Open(filepath.Dir(objPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("open parent dir: %w", err)
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
err = unix.Linkat(int(procdir.Fd()), filepath.Base(f.Name()),
|
||||
int(dir.Fd()), filepath.Base(path), unix.AT_SYMLINK_FOLLOW)
|
||||
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
|
||||
int(dir.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
|
||||
if err != nil {
|
||||
return fmt.Errorf("link tmpfile: %w", err)
|
||||
}
|
||||
|
||||
err = tmp.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) fallbackLink() error {
|
||||
tempname := tmp.f.Name()
|
||||
// cleanup in case anything goes wrong, if rename succeeds then
|
||||
// this will no longer exist
|
||||
defer os.Remove(tempname)
|
||||
|
||||
err := tmp.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close tmpfile: %w", err)
|
||||
}
|
||||
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err = os.Rename(tempname, objPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
if int64(len(b)) > tmp.size {
|
||||
return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
|
||||
}
|
||||
|
||||
n, err := tmp.f.Write(b)
|
||||
tmp.size -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
tmp.f.Close()
|
||||
}
|
||||
|
||||
89
backend/posix/posix_windows.go
Normal file
89
backend/posix/posix_windows.go
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package posix
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type tmpfile struct {
|
||||
f *os.File
|
||||
bucket string
|
||||
objname string
|
||||
size int64
|
||||
}
|
||||
|
||||
func openTmpFile(dir, bucket, obj string, size int64) (*tmpfile, error) {
|
||||
// Create a temp file for upload while in progress (see link comments below).
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("make temp dir: %w", err)
|
||||
}
|
||||
f, err := os.CreateTemp(dir,
|
||||
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tmpfile{f: f, bucket: bucket, objname: obj, size: size}, nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) link() error {
|
||||
tempname := tmp.f.Name()
|
||||
// cleanup in case anything goes wrong, if rename succeeds then
|
||||
// this will no longer exist
|
||||
defer os.Remove(tempname)
|
||||
|
||||
// We use Rename as the atomic operation for object puts. The upload is
|
||||
// written to a temp file to not conflict with any other simultaneous
|
||||
// uploads. The final operation is to move the temp file into place for
|
||||
// the object. This ensures the object semantics of last upload completed
|
||||
// wins and is not some combination of writes from simultaneous uploads.
|
||||
objPath := filepath.Join(tmp.bucket, tmp.objname)
|
||||
err := os.Remove(objPath)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove stale path: %w", err)
|
||||
}
|
||||
|
||||
err = tmp.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close tmpfile: %w", err)
|
||||
}
|
||||
|
||||
err = os.Rename(tempname, objPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename tmpfile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) Write(b []byte) (int, error) {
|
||||
if int64(len(b)) > tmp.size {
|
||||
return 0, fmt.Errorf("write exceeds content length")
|
||||
}
|
||||
|
||||
n, err := tmp.f.Write(b)
|
||||
tmp.size -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (tmp *tmpfile) cleanup() {
|
||||
tmp.f.Close()
|
||||
}
|
||||
@@ -1,8 +1,22 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package scoutfs
|
||||
|
||||
import (
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/scoutgw/backend/posix"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
)
|
||||
|
||||
type ScoutFS struct {
|
||||
|
||||
242
backend/walk.go
Normal file
242
backend/walk.go
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
type WalkResults struct {
|
||||
CommonPrefixes []types.CommonPrefix
|
||||
Objects []types.Object
|
||||
Truncated bool
|
||||
NextMarker string
|
||||
}
|
||||
|
||||
type DirObjCheck func(path string) (bool, error)
|
||||
type GetETag func(path string) (string, error)
|
||||
|
||||
// Walk walks the supplied fs.FS and returns results compatible with list
|
||||
// objects responses
|
||||
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, dirchk DirObjCheck, getetag GetETag, skipdirs []string) (WalkResults, error) {
|
||||
cpmap := make(map[string]struct{})
|
||||
var objects []types.Object
|
||||
|
||||
var pastMarker bool
|
||||
if marker == "" {
|
||||
pastMarker = true
|
||||
}
|
||||
|
||||
var pastMax bool
|
||||
var newMarker string
|
||||
var truncated bool
|
||||
|
||||
err := fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pastMax {
|
||||
newMarker = path
|
||||
truncated = true
|
||||
return fs.SkipAll
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
// Ignore the root directory
|
||||
if path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
if contains(d.Name(), skipdirs) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
// If prefix is defined and the directory does not match prefix,
|
||||
// do not descend into the directory because nothing will
|
||||
// match this prefix. Make sure to append the / at the end of
|
||||
// directories since this is implied as a directory path name.
|
||||
// If path is a prefix of prefix, then path could still be
|
||||
// building to match. So only skip if path isnt a prefix of prefix
|
||||
// and prefix isnt a prefix of path.
|
||||
if prefix != "" &&
|
||||
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
|
||||
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
// TODO: can we do better here rather than a second readdir
|
||||
// per directory?
|
||||
ents, err := fs.ReadDir(fileSystem, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("readdir %q: %w", path, err)
|
||||
}
|
||||
if len(ents) == 0 {
|
||||
dirobj, err := dirchk(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("directory object check %q: %w", path, err)
|
||||
}
|
||||
if dirobj {
|
||||
fi, err := d.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("dir info %q: %w", path, err)
|
||||
}
|
||||
etag, err := getetag(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get etag %q: %w", path, err)
|
||||
}
|
||||
path := path + "/"
|
||||
objects = append(objects, types.Object{
|
||||
ETag: &etag,
|
||||
Key: &path,
|
||||
LastModified: GetTimePtr(fi.ModTime()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if !pastMarker {
|
||||
if path != marker {
|
||||
return nil
|
||||
}
|
||||
pastMarker = true
|
||||
}
|
||||
|
||||
// If object doesnt have prefix, dont include in results.
|
||||
if prefix != "" && !strings.HasPrefix(path, prefix) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if delimiter == "" {
|
||||
// If no delimeter specified, then all files with matching
|
||||
// prefix are included in results
|
||||
fi, err := d.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get info for %v: %w", path, err)
|
||||
}
|
||||
etag, err := getetag(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get etag %q: %w", path, err)
|
||||
}
|
||||
|
||||
objects = append(objects, types.Object{
|
||||
ETag: &etag,
|
||||
Key: &path,
|
||||
LastModified: GetTimePtr(fi.ModTime()),
|
||||
Size: fi.Size(),
|
||||
})
|
||||
|
||||
if max > 0 && (len(objects)+len(cpmap)) == max {
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since delimiter is specified, we only want results that
|
||||
// do not contain the delimiter beyond the prefix. If the
|
||||
// delimiter exists past the prefix, then the substring
|
||||
// between the prefix and delimiter is part of common prefixes.
|
||||
//
|
||||
// For example:
|
||||
// prefix = A/
|
||||
// delimeter = /
|
||||
// and objects:
|
||||
// A/file
|
||||
// A/B/file
|
||||
// B/C
|
||||
// would return:
|
||||
// objects: A/file
|
||||
// common prefix: A/B/
|
||||
//
|
||||
// Note: No obects are included past the common prefix since
|
||||
// these are all rolled up into the common prefix.
|
||||
// Note: The delimeter can be anything, so we have to operate on
|
||||
// the full path without any assumptions on posix directory heirarchy
|
||||
// here. Usually the delimeter will be "/", but thats not required.
|
||||
suffix := strings.TrimPrefix(path, prefix)
|
||||
before, _, found := strings.Cut(suffix, delimiter)
|
||||
if !found {
|
||||
fi, err := d.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get info for %v: %w", path, err)
|
||||
}
|
||||
etag, err := getetag(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get etag %q: %w", path, err)
|
||||
}
|
||||
objects = append(objects, types.Object{
|
||||
ETag: &etag,
|
||||
Key: &path,
|
||||
LastModified: GetTimePtr(fi.ModTime()),
|
||||
Size: fi.Size(),
|
||||
})
|
||||
if (len(objects) + len(cpmap)) == max {
|
||||
pastMax = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Common prefixes are a set, so should not have duplicates.
|
||||
// These are abstractly a "directory", so need to include the
|
||||
// delimeter at the end.
|
||||
cpmap[prefix+before+delimiter] = struct{}{}
|
||||
if (len(objects) + len(cpmap)) == max {
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return WalkResults{}, err
|
||||
}
|
||||
|
||||
var commonPrefixStrings []string
|
||||
for k := range cpmap {
|
||||
commonPrefixStrings = append(commonPrefixStrings, k)
|
||||
}
|
||||
sort.Strings(commonPrefixStrings)
|
||||
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
|
||||
for _, cp := range commonPrefixStrings {
|
||||
pfx := cp
|
||||
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
|
||||
Prefix: &pfx,
|
||||
})
|
||||
}
|
||||
|
||||
return WalkResults{
|
||||
CommonPrefixes: commonPrefixes,
|
||||
Objects: objects,
|
||||
Truncated: truncated,
|
||||
NextMarker: newMarker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func contains(a string, strs []string) bool {
|
||||
for _, s := range strs {
|
||||
if s == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
167
backend/walk_test.go
Normal file
167
backend/walk_test.go
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/backend"
|
||||
)
|
||||
|
||||
type walkTest struct {
|
||||
fsys fs.FS
|
||||
expected backend.WalkResults
|
||||
dc backend.DirObjCheck
|
||||
}
|
||||
|
||||
func gettag(string) (string, error) { return "myetag", nil }
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
tests := []walkTest{
|
||||
{
|
||||
// test case from
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html
|
||||
fsys: fstest.MapFS{
|
||||
"sample.jpg": {},
|
||||
"photos/2006/January/sample.jpg": {},
|
||||
"photos/2006/February/sample2.jpg": {},
|
||||
"photos/2006/February/sample3.jpg": {},
|
||||
"photos/2006/February/sample4.jpg": {},
|
||||
},
|
||||
expected: backend.WalkResults{
|
||||
CommonPrefixes: []types.CommonPrefix{{
|
||||
Prefix: backend.GetStringPtr("photos/"),
|
||||
}},
|
||||
Objects: []types.Object{{
|
||||
Key: backend.GetStringPtr("sample.jpg"),
|
||||
}},
|
||||
},
|
||||
dc: func(string) (bool, error) { return false, nil },
|
||||
},
|
||||
{
|
||||
// test case single dir/single file
|
||||
fsys: fstest.MapFS{
|
||||
"test/file": {},
|
||||
},
|
||||
expected: backend.WalkResults{
|
||||
CommonPrefixes: []types.CommonPrefix{{
|
||||
Prefix: backend.GetStringPtr("test/"),
|
||||
}},
|
||||
Objects: []types.Object{},
|
||||
},
|
||||
dc: func(string) (bool, error) { return true, nil },
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
res, err := backend.Walk(tt.fsys, "", "/", "", 1000, tt.dc, gettag, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("walk: %v", err)
|
||||
}
|
||||
|
||||
compareResults(res, tt.expected, t)
|
||||
}
|
||||
}
|
||||
|
||||
func compareResults(got, wanted backend.WalkResults, t *testing.T) {
|
||||
if !compareCommonPrefix(got.CommonPrefixes, wanted.CommonPrefixes) {
|
||||
t.Errorf("unexpected common prefix, got %v wanted %v",
|
||||
printCommonPrefixes(got.CommonPrefixes),
|
||||
printCommonPrefixes(wanted.CommonPrefixes))
|
||||
}
|
||||
|
||||
if !compareObjects(got.Objects, wanted.Objects) {
|
||||
t.Errorf("unexpected object, got %v wanted %v",
|
||||
printObjects(got.Objects),
|
||||
printObjects(wanted.Objects))
|
||||
}
|
||||
}
|
||||
|
||||
func compareCommonPrefix(a, b []types.CommonPrefix) bool {
|
||||
if len(a) == 0 && len(b) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, cp := range a {
|
||||
if containsCommonPrefix(cp, b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func containsCommonPrefix(c types.CommonPrefix, list []types.CommonPrefix) bool {
|
||||
for _, cp := range list {
|
||||
if *c.Prefix == *cp.Prefix {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func printCommonPrefixes(list []types.CommonPrefix) string {
|
||||
res := "["
|
||||
for _, cp := range list {
|
||||
if res == "[" {
|
||||
res = res + *cp.Prefix
|
||||
} else {
|
||||
res = res + ", " + *cp.Prefix
|
||||
}
|
||||
}
|
||||
return res + "]"
|
||||
}
|
||||
|
||||
func compareObjects(a, b []types.Object) bool {
|
||||
if len(a) == 0 && len(b) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, cp := range a {
|
||||
if containsObject(cp, b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func containsObject(c types.Object, list []types.Object) bool {
|
||||
for _, cp := range list {
|
||||
if *c.Key == *cp.Key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func printObjects(list []types.Object) string {
|
||||
res := "["
|
||||
for _, cp := range list {
|
||||
if res == "[" {
|
||||
res = res + *cp.Key
|
||||
} else {
|
||||
res = res + ", " + *cp.Key
|
||||
}
|
||||
}
|
||||
return res + "]"
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/scoutgw/s3api"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := fiber.New(fiber.Config{})
|
||||
back := backend.New()
|
||||
if api, err := s3api.New(app, back, ":7070"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if err = api.Serve(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
145
cmd/versitygw/admin.go
Normal file
145
cmd/versitygw/admin.go
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
adminAccess string
|
||||
adminSecret string
|
||||
adminRegion string
|
||||
)
|
||||
|
||||
func adminCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "admin",
|
||||
Usage: "admin CLI tool",
|
||||
Description: `admin CLI tool for interacting with admin api.
|
||||
Here is the available api list:
|
||||
create-user
|
||||
`,
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "create-user",
|
||||
Usage: "Create a new user",
|
||||
Action: createUser,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "access value for the new user",
|
||||
Required: true,
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "secret",
|
||||
Usage: "secret value for the new user",
|
||||
Required: true,
|
||||
Aliases: []string{"s"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "role",
|
||||
Usage: "role for the new user",
|
||||
Required: true,
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "region",
|
||||
Usage: "s3 region string for the user",
|
||||
Value: "us-east-1",
|
||||
Aliases: []string{"rg"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
// TODO: create a configuration file for this
|
||||
&cli.StringFlag{
|
||||
Name: "adminAccess",
|
||||
Usage: "admin access account",
|
||||
EnvVars: []string{"ADMIN_ACCESS_KEY_ID", "ADMIN_ACCESS_KEY"},
|
||||
Aliases: []string{"aa"},
|
||||
Destination: &adminAccess,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "adminSecret",
|
||||
Usage: "admin secret access key",
|
||||
EnvVars: []string{"ADMIN_SECRET_ACCESS_KEY", "ADMIN_SECRET_KEY"},
|
||||
Aliases: []string{"as"},
|
||||
Destination: &adminSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "adminRegion",
|
||||
Usage: "s3 region string",
|
||||
Value: "us-east-1",
|
||||
Destination: &adminRegion,
|
||||
Aliases: []string{"ar"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createUser(ctx *cli.Context) error {
|
||||
access, secret, role, region := ctx.String("access"), ctx.String("secret"), ctx.String("role"), ctx.String("region")
|
||||
if access == "" || secret == "" || region == "" {
|
||||
return fmt.Errorf("invalid input parameters for the new user")
|
||||
}
|
||||
if role != "admin" && role != "user" {
|
||||
return fmt.Errorf("invalid input parameter for role")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://localhost:7070/create-user?access=%v&secret=%v&role=%v®ion=%v", access, secret, role, region), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
hashedPayload := sha256.Sum256([]byte{})
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
|
||||
if signErr != nil {
|
||||
return fmt.Errorf("failed to sign the request: %w", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send the request: %w", err)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s", body)
|
||||
|
||||
return nil
|
||||
}
|
||||
173
cmd/versitygw/main.go
Normal file
173
cmd/versitygw/main.go
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/auth"
|
||||
"github.com/versity/versitygw/s3api"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
)
|
||||
|
||||
var (
|
||||
port string
|
||||
rootUserAccess string
|
||||
rootUserSecret string
|
||||
region string
|
||||
certFile, keyFile string
|
||||
debug bool
|
||||
)
|
||||
|
||||
var (
|
||||
// Version is the latest tag (set within Makefile)
|
||||
Version = "git"
|
||||
// Build is the commit hash (set within Makefile)
|
||||
Build = "norev"
|
||||
// BuildTime is the date/time of build (set within Makefile)
|
||||
BuildTime = "none"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := initApp()
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
posixCommand(),
|
||||
adminCommand(),
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func initApp() *cli.App {
|
||||
return &cli.App{
|
||||
Name: "versitygw",
|
||||
Usage: "Start S3 gateway service with specified backend storage.",
|
||||
Description: `The S3 gateway is an S3 protocol translator that allows an S3 client
|
||||
to access the supported backend storage as if it was a native S3 service.`,
|
||||
Action: func(ctx *cli.Context) error {
|
||||
return ctx.App.Command("help").Run(ctx)
|
||||
},
|
||||
Flags: initFlags(),
|
||||
}
|
||||
}
|
||||
|
||||
func initFlags() []cli.Flag {
|
||||
return []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "version",
|
||||
Usage: "list versitygw version",
|
||||
Aliases: []string{"v"},
|
||||
Action: func(*cli.Context, bool) error {
|
||||
fmt.Println("Version :", Version)
|
||||
fmt.Println("Build :", Build)
|
||||
fmt.Println("BuildTime:", BuildTime)
|
||||
os.Exit(0)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "port",
|
||||
Usage: "gateway listen address <ip>:<port> or :<port>",
|
||||
Value: ":7070",
|
||||
Destination: &port,
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access",
|
||||
Usage: "root user access key",
|
||||
EnvVars: []string{"ROOT_ACCESS_KEY_ID", "ROOT_ACCESS_KEY"},
|
||||
Aliases: []string{"a"},
|
||||
Destination: &rootUserAccess,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "secret",
|
||||
Usage: "root user secret access key",
|
||||
EnvVars: []string{"ROOT_SECRET_ACCESS_KEY", "ROOT_SECRET_KEY"},
|
||||
Aliases: []string{"s"},
|
||||
Destination: &rootUserSecret,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "region",
|
||||
Usage: "s3 region string",
|
||||
Value: "us-east-1",
|
||||
Destination: ®ion,
|
||||
Aliases: []string{"r"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cert",
|
||||
Usage: "TLS cert file",
|
||||
Destination: &certFile,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "TLS key file",
|
||||
Destination: &keyFile,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Destination: &debug,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runGateway(be backend.Backend) error {
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "versitygw",
|
||||
ServerHeader: "VERSITYGW",
|
||||
BodyLimit: 5 * 1024 * 1024 * 1024,
|
||||
})
|
||||
|
||||
var opts []s3api.Option
|
||||
|
||||
if certFile != "" || keyFile != "" {
|
||||
if certFile == "" {
|
||||
return fmt.Errorf("TLS key specified without cert file")
|
||||
}
|
||||
if keyFile == "" {
|
||||
return fmt.Errorf("TLS cert specified without key file")
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tls: load certs: %v", err)
|
||||
}
|
||||
opts = append(opts, s3api.WithTLS(cert))
|
||||
}
|
||||
|
||||
if debug {
|
||||
opts = append(opts, s3api.WithDebug())
|
||||
}
|
||||
|
||||
srv, err := s3api.New(app, be, middlewares.RootUserConfig{
|
||||
Access: rootUserAccess,
|
||||
Secret: rootUserSecret,
|
||||
Region: region,
|
||||
}, port, auth.New(), opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init gateway: %v", err)
|
||||
}
|
||||
|
||||
return srv.Serve()
|
||||
}
|
||||
53
cmd/versitygw/posix.go
Normal file
53
cmd/versitygw/posix.go
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/versity/versitygw/backend/posix"
|
||||
)
|
||||
|
||||
func posixCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "posix",
|
||||
Usage: "posix filesystem storage backend",
|
||||
Description: `Any posix filesystem that supports extended attributes. The top level
|
||||
directory for the gateway must be provided. All sub directories of the
|
||||
top level directory are treated as buckets, and all files/directories
|
||||
below the "bucket directory" are treated as the objects. The object
|
||||
name is split on "/" separator to translate to posix storage.
|
||||
For example:
|
||||
top level: /mnt/fs/gwroot
|
||||
bucket: mybucket
|
||||
object: a/b/c/myobject
|
||||
will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
|
||||
Action: runPosix,
|
||||
}
|
||||
}
|
||||
|
||||
func runPosix(ctx *cli.Context) error {
|
||||
if ctx.NArg() == 0 {
|
||||
return fmt.Errorf("no directory provided for operation")
|
||||
}
|
||||
|
||||
be, err := posix.New(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("init posix: %v", err)
|
||||
}
|
||||
|
||||
return runGateway(be)
|
||||
}
|
||||
14
go.mod
14
go.mod
@@ -1,14 +1,16 @@
|
||||
module github.com/versity/scoutgw
|
||||
module github.com/versity/versitygw
|
||||
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.18.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1
|
||||
github.com/gofiber/fiber/v2 v2.45.0
|
||||
github.com/valyala/fasthttp v1.47.0
|
||||
github.com/aws/smithy-go v1.13.5
|
||||
github.com/gofiber/fiber/v2 v2.46.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/urfave/cli/v2 v2.25.4
|
||||
github.com/valyala/fasthttp v1.47.0
|
||||
golang.org/x/sys v0.8.0
|
||||
)
|
||||
|
||||
@@ -22,16 +24,18 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/klauspost/compress v1.16.5 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee // indirect
|
||||
github.com/tinylib/msgp v1.1.8 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
)
|
||||
|
||||
16
go.sum
16
go.sum
@@ -22,9 +22,11 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1 h1:O+9nAy9Bb6bJFTpeNFtd9UfHbgxO1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1/go.mod h1:J9kLNzEiHSeGMyN7238EjJmBpCniVzFda75Gxl/NqB8=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/gofiber/fiber/v2 v2.45.0 h1:p4RpkJT9GAW6parBSbcNFH2ApnAuW3OzaQzbOCoDu+s=
|
||||
github.com/gofiber/fiber/v2 v2.45.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc=
|
||||
github.com/gofiber/fiber/v2 v2.46.0 h1:wkkWotblsGVlLjXj2dpgKQAYHtXumsK/HyFugQM68Ns=
|
||||
github.com/gofiber/fiber/v2 v2.46.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
@@ -36,8 +38,8 @@ github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
@@ -49,6 +51,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
|
||||
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8=
|
||||
github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4=
|
||||
@@ -58,12 +62,16 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
|
||||
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
|
||||
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
|
||||
github.com/urfave/cli/v2 v2.25.4 h1:HyYwPrTO3im9rYhUff/ZNs78eolxt0nJ4LN+9yJKSH4=
|
||||
github.com/urfave/cli/v2 v2.25.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.47.0 h1:y7moDoxYzMooFpT5aHgNgVOQDrS3qlkfiP9mDtGGK9c=
|
||||
github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
||||
49
s3api/controllers/admin.go
Normal file
49
s3api/controllers/admin.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/backend/auth"
|
||||
)
|
||||
|
||||
type AdminController struct {
|
||||
IAMService auth.IAMService
|
||||
}
|
||||
|
||||
func NewAdminController() AdminController {
|
||||
return AdminController{IAMService: auth.New()}
|
||||
}
|
||||
|
||||
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
|
||||
access, secret, role, region := ctx.Query("access"), ctx.Query("secret"), ctx.Query("role"), ctx.Query("region")
|
||||
requesterRole := ctx.Locals("role")
|
||||
|
||||
if requesterRole != "admin" {
|
||||
return fmt.Errorf("access denied: only admin users have access to this resource")
|
||||
}
|
||||
|
||||
user := auth.Account{Secret: secret, Role: role, Region: region}
|
||||
|
||||
err := c.IAMService.CreateAccount(access, &user)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create a user: %w", err)
|
||||
}
|
||||
|
||||
ctx.SendString("The user has been created successfully")
|
||||
return nil
|
||||
}
|
||||
@@ -4,23 +4,23 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ensure, that BackendMock does implement Backend.
|
||||
// Ensure, that BackendMock does implement backend.Backend.
|
||||
// If this is not the case, regenerate this file with moq.
|
||||
var _ backend.Backend = &BackendMock{}
|
||||
|
||||
// BackendMock is a mock implementation of Backend.
|
||||
// BackendMock is a mock implementation of backend.Backend.
|
||||
//
|
||||
// func TestSomethingThatUsesBackend(t *testing.T) {
|
||||
//
|
||||
// // make and configure a mocked Backend
|
||||
// // make and configure a mocked backend.Backend
|
||||
// mockedBackend := &BackendMock{
|
||||
// AbortMultipartUploadFunc: func(abortMultipartUploadInput *s3.AbortMultipartUploadInput) error {
|
||||
// panic("mock out the AbortMultipartUpload method")
|
||||
@@ -49,10 +49,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) {
|
||||
// panic("mock out the GetBucketAcl method")
|
||||
// },
|
||||
// GetIAMConfigFunc: func() ([]byte, error) {
|
||||
// panic("mock out the GetIAMConfig method")
|
||||
// },
|
||||
// GetObjectFunc: func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
|
||||
// GetObjectFunc: func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
// panic("mock out the GetObject method")
|
||||
// },
|
||||
// GetObjectAclFunc: func(bucket string, object string) (*s3.GetObjectAclOutput, error) {
|
||||
@@ -67,25 +64,22 @@ var _ backend.Backend = &BackendMock{}
|
||||
// HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
// panic("mock out the HeadBucket method")
|
||||
// },
|
||||
// HeadObjectFunc: func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
// HeadObjectFunc: func(bucket string, object string) (*s3.HeadObjectOutput, error) {
|
||||
// panic("mock out the HeadObject method")
|
||||
// },
|
||||
// IsTaggingSupportedFunc: func() bool {
|
||||
// panic("mock out the IsTaggingSupported method")
|
||||
// },
|
||||
// ListBucketsFunc: func() (*s3.ListBucketsOutput, error) {
|
||||
// panic("mock out the ListBuckets method")
|
||||
// },
|
||||
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
|
||||
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
// panic("mock out the ListMultipartUploads method")
|
||||
// },
|
||||
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
|
||||
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
// panic("mock out the ListObjectParts method")
|
||||
// },
|
||||
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
// panic("mock out the ListObjects method")
|
||||
// },
|
||||
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
// panic("mock out the ListObjectsV2 method")
|
||||
// },
|
||||
// PutBucketFunc: func(bucket string) error {
|
||||
@@ -94,13 +88,13 @@ var _ backend.Backend = &BackendMock{}
|
||||
// PutBucketAclFunc: func(putBucketAclInput *s3.PutBucketAclInput) error {
|
||||
// panic("mock out the PutBucketAcl method")
|
||||
// },
|
||||
// PutObjectFunc: func(bucket string, object string, r io.Reader) (string, error) {
|
||||
// PutObjectFunc: func(putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
// panic("mock out the PutObject method")
|
||||
// },
|
||||
// PutObjectAclFunc: func(putObjectAclInput *s3.PutObjectAclInput) error {
|
||||
// panic("mock out the PutObjectAcl method")
|
||||
// },
|
||||
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
|
||||
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
// panic("mock out the PutObjectPart method")
|
||||
// },
|
||||
// RemoveTagsFunc: func(bucket string, object string) error {
|
||||
@@ -126,7 +120,7 @@ var _ backend.Backend = &BackendMock{}
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// // use mockedBackend in code that requires Backend
|
||||
// // use mockedBackend in code that requires backend.Backend
|
||||
// // and then make assertions.
|
||||
//
|
||||
// }
|
||||
@@ -158,11 +152,8 @@ type BackendMock struct {
|
||||
// GetBucketAclFunc mocks the GetBucketAcl method.
|
||||
GetBucketAclFunc func(bucket string) (*s3.GetBucketAclOutput, error)
|
||||
|
||||
// GetIAMConfigFunc mocks the GetIAMConfig method.
|
||||
GetIAMConfigFunc func() ([]byte, error)
|
||||
|
||||
// GetObjectFunc mocks the GetObject method.
|
||||
GetObjectFunc func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error)
|
||||
GetObjectFunc func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
|
||||
|
||||
// GetObjectAclFunc mocks the GetObjectAcl method.
|
||||
GetObjectAclFunc func(bucket string, object string) (*s3.GetObjectAclOutput, error)
|
||||
@@ -177,25 +168,22 @@ type BackendMock struct {
|
||||
HeadBucketFunc func(bucket string) (*s3.HeadBucketOutput, error)
|
||||
|
||||
// HeadObjectFunc mocks the HeadObject method.
|
||||
HeadObjectFunc func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error)
|
||||
|
||||
// IsTaggingSupportedFunc mocks the IsTaggingSupported method.
|
||||
IsTaggingSupportedFunc func() bool
|
||||
HeadObjectFunc func(bucket string, object string) (*s3.HeadObjectOutput, error)
|
||||
|
||||
// ListBucketsFunc mocks the ListBuckets method.
|
||||
ListBucketsFunc func() (*s3.ListBucketsOutput, error)
|
||||
|
||||
// ListMultipartUploadsFunc mocks the ListMultipartUploads method.
|
||||
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
|
||||
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
|
||||
|
||||
// ListObjectPartsFunc mocks the ListObjectParts method.
|
||||
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error)
|
||||
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
|
||||
|
||||
// ListObjectsFunc mocks the ListObjects method.
|
||||
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
|
||||
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
|
||||
|
||||
// ListObjectsV2Func mocks the ListObjectsV2 method.
|
||||
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
|
||||
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
|
||||
|
||||
// PutBucketFunc mocks the PutBucket method.
|
||||
PutBucketFunc func(bucket string) error
|
||||
@@ -204,13 +192,13 @@ type BackendMock struct {
|
||||
PutBucketAclFunc func(putBucketAclInput *s3.PutBucketAclInput) error
|
||||
|
||||
// PutObjectFunc mocks the PutObject method.
|
||||
PutObjectFunc func(bucket string, object string, r io.Reader) (string, error)
|
||||
PutObjectFunc func(putObjectInput *s3.PutObjectInput) (string, error)
|
||||
|
||||
// PutObjectAclFunc mocks the PutObjectAcl method.
|
||||
PutObjectAclFunc func(putObjectAclInput *s3.PutObjectAclInput) error
|
||||
|
||||
// PutObjectPartFunc mocks the PutObjectPart method.
|
||||
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error)
|
||||
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error)
|
||||
|
||||
// RemoveTagsFunc mocks the RemoveTags method.
|
||||
RemoveTagsFunc func(bucket string, object string) error
|
||||
@@ -306,23 +294,16 @@ type BackendMock struct {
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
}
|
||||
// GetIAMConfig holds details about calls to the GetIAMConfig method.
|
||||
GetIAMConfig []struct {
|
||||
}
|
||||
// GetObject holds details about calls to the GetObject method.
|
||||
GetObject []struct {
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// StartOffset is the startOffset argument value.
|
||||
StartOffset int64
|
||||
// Length is the length argument value.
|
||||
Length int64
|
||||
// AcceptRange is the acceptRange argument value.
|
||||
AcceptRange string
|
||||
// Writer is the writer argument value.
|
||||
Writer io.Writer
|
||||
// Etag is the etag argument value.
|
||||
Etag string
|
||||
}
|
||||
// GetObjectAcl holds details about calls to the GetObjectAcl method.
|
||||
GetObjectAcl []struct {
|
||||
@@ -358,11 +339,6 @@ type BackendMock struct {
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// Etag is the etag argument value.
|
||||
Etag string
|
||||
}
|
||||
// IsTaggingSupported holds details about calls to the IsTaggingSupported method.
|
||||
IsTaggingSupported []struct {
|
||||
}
|
||||
// ListBuckets holds details about calls to the ListBuckets method.
|
||||
ListBuckets []struct {
|
||||
@@ -423,12 +399,8 @@ type BackendMock struct {
|
||||
}
|
||||
// PutObject holds details about calls to the PutObject method.
|
||||
PutObject []struct {
|
||||
// Bucket is the bucket argument value.
|
||||
Bucket string
|
||||
// Object is the object argument value.
|
||||
Object string
|
||||
// R is the r argument value.
|
||||
R io.Reader
|
||||
// PutObjectInput is the putObjectInput argument value.
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
}
|
||||
// PutObjectAcl holds details about calls to the PutObjectAcl method.
|
||||
PutObjectAcl []struct {
|
||||
@@ -445,6 +417,8 @@ type BackendMock struct {
|
||||
UploadID string
|
||||
// Part is the part argument value.
|
||||
Part int
|
||||
// Length is the length argument value.
|
||||
Length int64
|
||||
// R is the r argument value.
|
||||
R io.Reader
|
||||
}
|
||||
@@ -505,14 +479,12 @@ type BackendMock struct {
|
||||
lockDeleteObject sync.RWMutex
|
||||
lockDeleteObjects sync.RWMutex
|
||||
lockGetBucketAcl sync.RWMutex
|
||||
lockGetIAMConfig sync.RWMutex
|
||||
lockGetObject sync.RWMutex
|
||||
lockGetObjectAcl sync.RWMutex
|
||||
lockGetObjectAttributes sync.RWMutex
|
||||
lockGetTags sync.RWMutex
|
||||
lockHeadBucket sync.RWMutex
|
||||
lockHeadObject sync.RWMutex
|
||||
lockIsTaggingSupported sync.RWMutex
|
||||
lockListBuckets sync.RWMutex
|
||||
lockListMultipartUploads sync.RWMutex
|
||||
lockListObjectParts sync.RWMutex
|
||||
@@ -872,57 +844,26 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetIAMConfig calls GetIAMConfigFunc.
|
||||
func (mock *BackendMock) GetIAMConfig() ([]byte, error) {
|
||||
if mock.GetIAMConfigFunc == nil {
|
||||
panic("BackendMock.GetIAMConfigFunc: method is nil but Backend.GetIAMConfig was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
}{}
|
||||
mock.lockGetIAMConfig.Lock()
|
||||
mock.calls.GetIAMConfig = append(mock.calls.GetIAMConfig, callInfo)
|
||||
mock.lockGetIAMConfig.Unlock()
|
||||
return mock.GetIAMConfigFunc()
|
||||
}
|
||||
|
||||
// GetIAMConfigCalls gets all the calls that were made to GetIAMConfig.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.GetIAMConfigCalls())
|
||||
func (mock *BackendMock) GetIAMConfigCalls() []struct {
|
||||
} {
|
||||
var calls []struct {
|
||||
}
|
||||
mock.lockGetIAMConfig.RLock()
|
||||
calls = mock.calls.GetIAMConfig
|
||||
mock.lockGetIAMConfig.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// GetObject calls GetObjectFunc.
|
||||
func (mock *BackendMock) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
|
||||
func (mock *BackendMock) GetObject(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
if mock.GetObjectFunc == nil {
|
||||
panic("BackendMock.GetObjectFunc: method is nil but Backend.GetObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Bucket string
|
||||
Object string
|
||||
StartOffset int64
|
||||
Length int64
|
||||
AcceptRange string
|
||||
Writer io.Writer
|
||||
Etag string
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
StartOffset: startOffset,
|
||||
Length: length,
|
||||
AcceptRange: acceptRange,
|
||||
Writer: writer,
|
||||
Etag: etag,
|
||||
}
|
||||
mock.lockGetObject.Lock()
|
||||
mock.calls.GetObject = append(mock.calls.GetObject, callInfo)
|
||||
mock.lockGetObject.Unlock()
|
||||
return mock.GetObjectFunc(bucket, object, startOffset, length, writer, etag)
|
||||
return mock.GetObjectFunc(bucket, object, acceptRange, writer)
|
||||
}
|
||||
|
||||
// GetObjectCalls gets all the calls that were made to GetObject.
|
||||
@@ -932,18 +873,14 @@ func (mock *BackendMock) GetObject(bucket string, object string, startOffset int
|
||||
func (mock *BackendMock) GetObjectCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
StartOffset int64
|
||||
Length int64
|
||||
AcceptRange string
|
||||
Writer io.Writer
|
||||
Etag string
|
||||
} {
|
||||
var calls []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
StartOffset int64
|
||||
Length int64
|
||||
AcceptRange string
|
||||
Writer io.Writer
|
||||
Etag string
|
||||
}
|
||||
mock.lockGetObject.RLock()
|
||||
calls = mock.calls.GetObject
|
||||
@@ -1096,23 +1033,21 @@ func (mock *BackendMock) HeadBucketCalls() []struct {
|
||||
}
|
||||
|
||||
// HeadObject calls HeadObjectFunc.
|
||||
func (mock *BackendMock) HeadObject(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
func (mock *BackendMock) HeadObject(bucket string, object string) (*s3.HeadObjectOutput, error) {
|
||||
if mock.HeadObjectFunc == nil {
|
||||
panic("BackendMock.HeadObjectFunc: method is nil but Backend.HeadObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Etag string
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Etag: etag,
|
||||
}
|
||||
mock.lockHeadObject.Lock()
|
||||
mock.calls.HeadObject = append(mock.calls.HeadObject, callInfo)
|
||||
mock.lockHeadObject.Unlock()
|
||||
return mock.HeadObjectFunc(bucket, object, etag)
|
||||
return mock.HeadObjectFunc(bucket, object)
|
||||
}
|
||||
|
||||
// HeadObjectCalls gets all the calls that were made to HeadObject.
|
||||
@@ -1122,12 +1057,10 @@ func (mock *BackendMock) HeadObject(bucket string, object string, etag string) (
|
||||
func (mock *BackendMock) HeadObjectCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Etag string
|
||||
} {
|
||||
var calls []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Etag string
|
||||
}
|
||||
mock.lockHeadObject.RLock()
|
||||
calls = mock.calls.HeadObject
|
||||
@@ -1135,33 +1068,6 @@ func (mock *BackendMock) HeadObjectCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// IsTaggingSupported calls IsTaggingSupportedFunc.
|
||||
func (mock *BackendMock) IsTaggingSupported() bool {
|
||||
if mock.IsTaggingSupportedFunc == nil {
|
||||
panic("BackendMock.IsTaggingSupportedFunc: method is nil but Backend.IsTaggingSupported was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
}{}
|
||||
mock.lockIsTaggingSupported.Lock()
|
||||
mock.calls.IsTaggingSupported = append(mock.calls.IsTaggingSupported, callInfo)
|
||||
mock.lockIsTaggingSupported.Unlock()
|
||||
return mock.IsTaggingSupportedFunc()
|
||||
}
|
||||
|
||||
// IsTaggingSupportedCalls gets all the calls that were made to IsTaggingSupported.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedBackend.IsTaggingSupportedCalls())
|
||||
func (mock *BackendMock) IsTaggingSupportedCalls() []struct {
|
||||
} {
|
||||
var calls []struct {
|
||||
}
|
||||
mock.lockIsTaggingSupported.RLock()
|
||||
calls = mock.calls.IsTaggingSupported
|
||||
mock.lockIsTaggingSupported.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// ListBuckets calls ListBucketsFunc.
|
||||
func (mock *BackendMock) ListBuckets() (*s3.ListBucketsOutput, error) {
|
||||
if mock.ListBucketsFunc == nil {
|
||||
@@ -1190,7 +1096,7 @@ func (mock *BackendMock) ListBucketsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListMultipartUploads calls ListMultipartUploadsFunc.
|
||||
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
|
||||
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
if mock.ListMultipartUploadsFunc == nil {
|
||||
panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called")
|
||||
}
|
||||
@@ -1222,7 +1128,7 @@ func (mock *BackendMock) ListMultipartUploadsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListObjectParts calls ListObjectPartsFunc.
|
||||
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
|
||||
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
if mock.ListObjectPartsFunc == nil {
|
||||
panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called")
|
||||
}
|
||||
@@ -1270,7 +1176,7 @@ func (mock *BackendMock) ListObjectPartsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListObjects calls ListObjectsFunc.
|
||||
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
if mock.ListObjectsFunc == nil {
|
||||
panic("BackendMock.ListObjectsFunc: method is nil but Backend.ListObjects was just called")
|
||||
}
|
||||
@@ -1318,7 +1224,7 @@ func (mock *BackendMock) ListObjectsCalls() []struct {
|
||||
}
|
||||
|
||||
// ListObjectsV2 calls ListObjectsV2Func.
|
||||
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
if mock.ListObjectsV2Func == nil {
|
||||
panic("BackendMock.ListObjectsV2Func: method is nil but Backend.ListObjectsV2 was just called")
|
||||
}
|
||||
@@ -1430,23 +1336,19 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObject calls PutObjectFunc.
|
||||
func (mock *BackendMock) PutObject(bucket string, object string, r io.Reader) (string, error) {
|
||||
func (mock *BackendMock) PutObject(putObjectInput *s3.PutObjectInput) (string, error) {
|
||||
if mock.PutObjectFunc == nil {
|
||||
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
|
||||
}
|
||||
callInfo := struct {
|
||||
Bucket string
|
||||
Object string
|
||||
R io.Reader
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
R: r,
|
||||
PutObjectInput: putObjectInput,
|
||||
}
|
||||
mock.lockPutObject.Lock()
|
||||
mock.calls.PutObject = append(mock.calls.PutObject, callInfo)
|
||||
mock.lockPutObject.Unlock()
|
||||
return mock.PutObjectFunc(bucket, object, r)
|
||||
return mock.PutObjectFunc(putObjectInput)
|
||||
}
|
||||
|
||||
// PutObjectCalls gets all the calls that were made to PutObject.
|
||||
@@ -1454,14 +1356,10 @@ func (mock *BackendMock) PutObject(bucket string, object string, r io.Reader) (s
|
||||
//
|
||||
// len(mockedBackend.PutObjectCalls())
|
||||
func (mock *BackendMock) PutObjectCalls() []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
R io.Reader
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
} {
|
||||
var calls []struct {
|
||||
Bucket string
|
||||
Object string
|
||||
R io.Reader
|
||||
PutObjectInput *s3.PutObjectInput
|
||||
}
|
||||
mock.lockPutObject.RLock()
|
||||
calls = mock.calls.PutObject
|
||||
@@ -1502,7 +1400,7 @@ func (mock *BackendMock) PutObjectAclCalls() []struct {
|
||||
}
|
||||
|
||||
// PutObjectPart calls PutObjectPartFunc.
|
||||
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
|
||||
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
if mock.PutObjectPartFunc == nil {
|
||||
panic("BackendMock.PutObjectPartFunc: method is nil but Backend.PutObjectPart was just called")
|
||||
}
|
||||
@@ -1511,18 +1409,20 @@ func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID st
|
||||
Object string
|
||||
UploadID string
|
||||
Part int
|
||||
Length int64
|
||||
R io.Reader
|
||||
}{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
UploadID: uploadID,
|
||||
Part: part,
|
||||
Length: length,
|
||||
R: r,
|
||||
}
|
||||
mock.lockPutObjectPart.Lock()
|
||||
mock.calls.PutObjectPart = append(mock.calls.PutObjectPart, callInfo)
|
||||
mock.lockPutObjectPart.Unlock()
|
||||
return mock.PutObjectPartFunc(bucket, object, uploadID, part, r)
|
||||
return mock.PutObjectPartFunc(bucket, object, uploadID, part, length, r)
|
||||
}
|
||||
|
||||
// PutObjectPartCalls gets all the calls that were made to PutObjectPart.
|
||||
@@ -1534,6 +1434,7 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
|
||||
Object string
|
||||
UploadID string
|
||||
Part int
|
||||
Length int64
|
||||
R io.Reader
|
||||
} {
|
||||
var calls []struct {
|
||||
@@ -1541,6 +1442,7 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
|
||||
Object string
|
||||
UploadID string
|
||||
Part int
|
||||
Length int64
|
||||
R io.Reader
|
||||
}
|
||||
mock.lockPutObjectPart.RLock()
|
||||
|
||||
@@ -1,20 +1,37 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/scoutgw/s3err"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
type S3ApiController struct {
|
||||
@@ -27,82 +44,111 @@ func New(be backend.Backend) S3ApiController {
|
||||
|
||||
func (c S3ApiController) ListBuckets(ctx *fiber.Ctx) error {
|
||||
res, err := c.be.ListBuckets()
|
||||
return responce(ctx, res, err)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
bucket, key, keyEnd, uploadId, maxPartsStr, partNumberMarkerStr := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId"), ctx.Query("max-parts"), ctx.Query("part-number-marker")
|
||||
bucket := ctx.Params("bucket")
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
maxParts := ctx.QueryInt("max-parts", 0)
|
||||
partNumberMarker := ctx.QueryInt("part-number-marker", 0)
|
||||
acceptRange := ctx.Get("Range")
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
if uploadId != "" {
|
||||
maxParts, err := strconv.Atoi(maxPartsStr)
|
||||
if err != nil && maxPartsStr != "" {
|
||||
return errors.New("wrong api call")
|
||||
if maxParts < 0 || (maxParts == 0 && ctx.Query("max-parts") != "") {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidMaxParts))
|
||||
}
|
||||
|
||||
partNumberMarker, err := strconv.Atoi(partNumberMarkerStr)
|
||||
if err != nil && partNumberMarkerStr != "" {
|
||||
return errors.New("wrong api call")
|
||||
if partNumberMarker < 0 || (partNumberMarker == 0 && ctx.Query("part-number-marker") != "") {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker))
|
||||
}
|
||||
|
||||
res, err := c.be.ListObjectParts(bucket, "", uploadId, partNumberMarker, maxParts)
|
||||
return responce(ctx, res, err)
|
||||
res, err := c.be.ListObjectParts(bucket, key, uploadId, partNumberMarker, maxParts)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
res, err := c.be.GetObjectAcl(bucket, key)
|
||||
return responce(ctx, res, err)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
if attrs := ctx.Get("X-Amz-Object-Attributes"); attrs != "" {
|
||||
res, err := c.be.GetObjectAttributes(bucket, key, strings.Split(attrs, ","))
|
||||
return responce(ctx, res, err)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
bRangeSl := strings.Split(ctx.Get("Range"), "=")
|
||||
if len(bRangeSl) < 2 {
|
||||
return errors.New("wrong api call")
|
||||
}
|
||||
|
||||
bRange := strings.Split(bRangeSl[1], "-")
|
||||
if len(bRange) < 2 {
|
||||
return errors.New("wrong api call")
|
||||
}
|
||||
|
||||
startOffset, err := strconv.Atoi(bRange[0])
|
||||
res, err := c.be.GetObject(bucket, key, acceptRange, ctx.Response().BodyWriter())
|
||||
if err != nil {
|
||||
return errors.New("wrong api call")
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
if res == nil {
|
||||
return SendResponse(ctx, fmt.Errorf("get object nil response"))
|
||||
}
|
||||
|
||||
length, err := strconv.Atoi(bRange[1])
|
||||
if err != nil {
|
||||
return errors.New("wrong api call")
|
||||
utils.SetMetaHeaders(ctx, res.Metadata)
|
||||
var lastmod string
|
||||
if res.LastModified != nil {
|
||||
lastmod = res.LastModified.Format(timefmt)
|
||||
}
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "Content-Length",
|
||||
Value: fmt.Sprint(res.ContentLength),
|
||||
},
|
||||
{
|
||||
Key: "Content-Type",
|
||||
Value: getstring(res.ContentType),
|
||||
},
|
||||
{
|
||||
Key: "Content-Encoding",
|
||||
Value: getstring(res.ContentEncoding),
|
||||
},
|
||||
{
|
||||
Key: "ETag",
|
||||
Value: getstring(res.ETag),
|
||||
},
|
||||
{
|
||||
Key: "Last-Modified",
|
||||
Value: lastmod,
|
||||
},
|
||||
})
|
||||
return ctx.SendStatus(http.StatusOK)
|
||||
}
|
||||
|
||||
res, err := c.be.GetObject(bucket, key, int64(startOffset), int64(length), ctx.Response().BodyWriter(), "")
|
||||
return responce(ctx, res, err)
|
||||
func getstring(s *string) string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return *s
|
||||
}
|
||||
|
||||
func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
bucket := ctx.Params("bucket")
|
||||
prefix := ctx.Query("prefix")
|
||||
marker := ctx.Query("continuation-token")
|
||||
delimiter := ctx.Query("delimiter")
|
||||
maxkeys := ctx.QueryInt("max-keys")
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
res, err := c.be.GetBucketAcl(ctx.Params("bucket"))
|
||||
return responce(ctx, res, err)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("uploads") {
|
||||
res, err := c.be.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: aws.String(ctx.Params("bucket"))})
|
||||
return responce(ctx, res, err)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
if ctx.QueryInt("list-type") == 2 {
|
||||
res, err := c.be.ListObjectsV2(ctx.Params("bucket"), "", "", "", 1)
|
||||
return responce(ctx, res, err)
|
||||
res, err := c.be.ListObjectsV2(bucket, prefix, marker, delimiter, maxkeys)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
res, err := c.be.ListObjects(ctx.Params("bucket"), "", "", "", 1)
|
||||
return responce(ctx, res, err)
|
||||
res, err := c.be.ListObjects(bucket, prefix, marker, delimiter, maxkeys)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
@@ -131,74 +177,68 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
GrantWriteACP: &grantWriteACP,
|
||||
})
|
||||
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
err := c.be.PutBucket(bucket)
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
dstBucket, dstKeyStart, dstKeyEnd, uploadId, partNumberStr := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId"), ctx.Query("partNumber")
|
||||
copySource, copySrcIfMatch, copySrcIfNoneMatch,
|
||||
copySrcModifSince, copySrcUnmodifSince, acl,
|
||||
grantFullControl, grantRead, grantReadACP,
|
||||
granWrite, grantWriteACP :=
|
||||
// Copy source headers
|
||||
ctx.Get("X-Amz-Copy-Source"),
|
||||
ctx.Get("X-Amz-Copy-Source-If-Match"),
|
||||
ctx.Get("X-Amz-Copy-Source-If-None-Match"),
|
||||
ctx.Get("X-Amz-Copy-Source-If-Modified-Since"),
|
||||
ctx.Get("X-Amz-Copy-Source-If-Unmodified-Since"),
|
||||
// Permission headers
|
||||
ctx.Get("X-Amz-Acl"),
|
||||
ctx.Get("X-Amz-Grant-Full-Control"),
|
||||
ctx.Get("X-Amz-Grant-Read"),
|
||||
ctx.Get("X-Amz-Grant-Read-Acp"),
|
||||
ctx.Get("X-Amz-Grant-Write"),
|
||||
ctx.Get("X-Amz-Grant-Write-Acp")
|
||||
bucket := ctx.Params("bucket")
|
||||
keyStart := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
partNumberStr := ctx.Query("partNumber")
|
||||
|
||||
// Copy source headers
|
||||
copySource := ctx.Get("X-Amz-Copy-Source")
|
||||
copySrcIfMatch := ctx.Get("X-Amz-Copy-Source-If-Match")
|
||||
copySrcIfNoneMatch := ctx.Get("X-Amz-Copy-Source-If-None-Match")
|
||||
copySrcModifSince := ctx.Get("X-Amz-Copy-Source-If-Modified-Since")
|
||||
copySrcUnmodifSince := ctx.Get("X-Amz-Copy-Source-If-Unmodified-Since")
|
||||
|
||||
// Permission headers
|
||||
acl := ctx.Get("X-Amz-Acl")
|
||||
grantFullControl := ctx.Get("X-Amz-Grant-Full-Control")
|
||||
grantRead := ctx.Get("X-Amz-Grant-Read")
|
||||
grantReadACP := ctx.Get("X-Amz-Grant-Read-Acp")
|
||||
granWrite := ctx.Get("X-Amz-Grant-Write")
|
||||
grantWriteACP := ctx.Get("X-Amz-Grant-Write-Acp")
|
||||
|
||||
// Other headers
|
||||
contentLengthStr := ctx.Get("Content-Length")
|
||||
|
||||
grants := grantFullControl + grantRead + grantReadACP + granWrite + grantWriteACP
|
||||
|
||||
if dstKeyEnd != "" {
|
||||
dstKeyStart = strings.Join([]string{dstKeyStart, dstKeyEnd}, "/")
|
||||
if keyEnd != "" {
|
||||
keyStart = strings.Join([]string{keyStart, keyEnd}, "/")
|
||||
}
|
||||
path := ctx.Path()
|
||||
if path[len(path)-1:] == "/" && keyStart[len(keyStart)-1:] != "/" {
|
||||
keyStart = keyStart + "/"
|
||||
}
|
||||
|
||||
if partNumberStr != "" {
|
||||
copySrcModifSinceDate, err := time.Parse(time.RFC3339, copySrcModifSince)
|
||||
if err != nil && copySrcModifSince != "" {
|
||||
return errors.New("wrong api call")
|
||||
}
|
||||
|
||||
copySrcUnmodifSinceDate, err := time.Parse(time.RFC3339, copySrcUnmodifSince)
|
||||
if err != nil && copySrcUnmodifSince != "" {
|
||||
return errors.New("wrong api call")
|
||||
}
|
||||
|
||||
partNumber, err := strconv.ParseInt(partNumberStr, 10, 64)
|
||||
var contentLength int64
|
||||
if contentLengthStr != "" {
|
||||
var err error
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
return errors.New("wrong api call")
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest))
|
||||
}
|
||||
|
||||
res, err := c.be.UploadPartCopy(&s3.UploadPartCopyInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstKeyStart,
|
||||
PartNumber: int32(partNumber),
|
||||
UploadId: &uploadId,
|
||||
CopySource: ©Source,
|
||||
CopySourceIfMatch: ©SrcIfMatch,
|
||||
CopySourceIfNoneMatch: ©SrcIfNoneMatch,
|
||||
CopySourceIfModifiedSince: ©SrcModifSinceDate,
|
||||
CopySourceIfUnmodifiedSince: ©SrcUnmodifSinceDate,
|
||||
})
|
||||
|
||||
return responce(ctx, res, err)
|
||||
}
|
||||
|
||||
if uploadId != "" {
|
||||
if uploadId != "" && partNumberStr != "" {
|
||||
partNumber := ctx.QueryInt("partNumber", -1)
|
||||
if partNumber < 1 {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPart))
|
||||
}
|
||||
|
||||
body := io.ReadSeeker(bytes.NewReader([]byte(ctx.Body())))
|
||||
res, err := c.be.UploadPart(dstBucket, dstKeyStart, uploadId, body)
|
||||
return responce(ctx, res, err)
|
||||
etag, err := c.be.PutObjectPart(bucket, keyStart, uploadId,
|
||||
partNumber, contentLength, body)
|
||||
ctx.Response().Header.Set("Etag", etag)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
if grants != "" || acl != "" {
|
||||
@@ -207,8 +247,8 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
err := c.be.PutObjectAcl(&s3.PutObjectAclInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstKeyStart,
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
ACL: types.ObjectCannedACL(acl),
|
||||
GrantFullControl: &grantFullControl,
|
||||
GrantRead: &grantRead,
|
||||
@@ -216,24 +256,35 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
GrantWrite: &granWrite,
|
||||
GrantWriteACP: &grantWriteACP,
|
||||
})
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
if copySource != "" {
|
||||
_, _, _, _ = copySrcIfMatch, copySrcIfNoneMatch,
|
||||
copySrcModifSince, copySrcUnmodifSince
|
||||
copySourceSplit := strings.Split(copySource, "/")
|
||||
srcBucket, srcObject := copySourceSplit[0], copySourceSplit[1:]
|
||||
|
||||
res, err := c.be.CopyObject(srcBucket, strings.Join(srcObject, "/"), dstBucket, dstKeyStart)
|
||||
return responce(ctx, res, err)
|
||||
res, err := c.be.CopyObject(srcBucket, strings.Join(srcObject, "/"), bucket, keyStart)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
res, err := c.be.PutObject(dstBucket, dstKeyStart, bytes.NewReader(ctx.Request().Body()))
|
||||
return responce(ctx, res, err)
|
||||
metadata := utils.GetUserMetaData(&ctx.Request().Header)
|
||||
|
||||
etag, err := c.be.PutObject(&s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
ContentLength: contentLength,
|
||||
Metadata: metadata,
|
||||
Body: bytes.NewReader(ctx.Request().Body()),
|
||||
})
|
||||
ctx.Response().Header.Set("ETag", etag)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
err := c.be.DeleteBucket(ctx.Params("bucket"))
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
@@ -243,11 +294,14 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
err := c.be.DeleteObjects(ctx.Params("bucket"), &s3.DeleteObjectsInput{Delete: &dObj})
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
bucket, key, keyEnd, uploadId := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId")
|
||||
bucket := ctx.Params("bucket")
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
@@ -263,60 +317,107 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
ExpectedBucketOwner: &expectedBucketOwner,
|
||||
RequestPayer: types.RequestPayer(requestPayer),
|
||||
})
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
err := c.be.DeleteObject(bucket, key)
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) error {
|
||||
res, err := c.be.HeadBucket(ctx.Params("bucket"))
|
||||
return responce(ctx, res, err)
|
||||
_, err := c.be.HeadBucket(ctx.Params("bucket"))
|
||||
// TODO: set bucket response headers
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
const (
|
||||
timefmt = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
)
|
||||
|
||||
func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
bucket, key, keyEnd := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1")
|
||||
bucket := ctx.Params("bucket")
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
res, err := c.be.HeadObject(bucket, key, "")
|
||||
return responce(ctx, res, err)
|
||||
res, err := c.be.HeadObject(bucket, key)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
if res == nil {
|
||||
return SendResponse(ctx, fmt.Errorf("head object nil response"))
|
||||
}
|
||||
|
||||
utils.SetMetaHeaders(ctx, res.Metadata)
|
||||
var lastmod string
|
||||
if res.LastModified != nil {
|
||||
lastmod = res.LastModified.Format(timefmt)
|
||||
}
|
||||
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
|
||||
{
|
||||
Key: "Content-Length",
|
||||
Value: fmt.Sprint(res.ContentLength),
|
||||
},
|
||||
{
|
||||
Key: "Content-Type",
|
||||
Value: getstring(res.ContentType),
|
||||
},
|
||||
{
|
||||
Key: "Content-Encoding",
|
||||
Value: getstring(res.ContentEncoding),
|
||||
},
|
||||
{
|
||||
Key: "ETag",
|
||||
Value: getstring(res.ETag),
|
||||
},
|
||||
{
|
||||
Key: "Last-Modified",
|
||||
Value: lastmod,
|
||||
},
|
||||
})
|
||||
|
||||
return SendResponse(ctx, nil)
|
||||
}
|
||||
|
||||
func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
bucket, key, keyEnd, uploadId := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId")
|
||||
var restoreRequest s3.RestoreObjectInput
|
||||
bucket := ctx.Params("bucket")
|
||||
key := ctx.Params("key")
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
|
||||
if keyEnd != "" {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
var restoreRequest s3.RestoreObjectInput
|
||||
if ctx.Request().URI().QueryArgs().Has("restore") {
|
||||
xmlErr := xml.Unmarshal(ctx.Body(), &restoreRequest)
|
||||
if xmlErr != nil {
|
||||
return errors.New("wrong api call")
|
||||
}
|
||||
err := c.be.RestoreObject(bucket, key, &restoreRequest)
|
||||
return responce[any](ctx, nil, err)
|
||||
return SendResponse(ctx, err)
|
||||
}
|
||||
|
||||
if uploadId != "" {
|
||||
var parts []types.Part
|
||||
data := struct {
|
||||
Parts []types.Part `xml:"Part"`
|
||||
}{}
|
||||
|
||||
if err := xml.Unmarshal(ctx.Body(), &parts); err != nil {
|
||||
if err := xml.Unmarshal(ctx.Body(), &data); err != nil {
|
||||
return errors.New("wrong api call")
|
||||
}
|
||||
|
||||
res, err := c.be.CompleteMultipartUpload(bucket, "", uploadId, parts)
|
||||
return responce(ctx, res, err)
|
||||
res, err := c.be.CompleteMultipartUpload(bucket, key, uploadId, data.Parts)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
res, err := c.be.CreateMultipartUpload(&s3.CreateMultipartUploadInput{Bucket: &bucket, Key: &key})
|
||||
return responce(ctx, res, err)
|
||||
return SendXMLResponse(ctx, res, err)
|
||||
}
|
||||
|
||||
func responce[R comparable](ctx *fiber.Ctx, resp R, err error) error {
|
||||
func SendResponse(ctx *fiber.Ctx, err error) error {
|
||||
if err != nil {
|
||||
serr, ok := err.(s3err.APIError)
|
||||
if ok {
|
||||
@@ -327,9 +428,37 @@ func responce[R comparable](ctx *fiber.Ctx, resp R, err error) error {
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
|
||||
// https://github.com/gofiber/fiber/issues/2080
|
||||
// ctx.SendStatus() sets incorrect content length on HEAD request
|
||||
ctx.Status(http.StatusOK)
|
||||
return nil
|
||||
}
|
||||
|
||||
func SendXMLResponse(ctx *fiber.Ctx, resp any, err error) error {
|
||||
if err != nil {
|
||||
serr, ok := err.(s3err.APIError)
|
||||
if ok {
|
||||
ctx.Status(serr.HTTPStatusCode)
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(serr, "", "", ""))
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Internal Error, req:\n%v\nerr:\n%v\n",
|
||||
ctx.Request(), err)
|
||||
|
||||
return ctx.Send(s3err.GetAPIErrorResponse(
|
||||
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
|
||||
}
|
||||
|
||||
var b []byte
|
||||
if b, err = xml.Marshal(resp); err != nil {
|
||||
return err
|
||||
|
||||
if resp != nil {
|
||||
if b, err = xml.Marshal(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(b) > 0 {
|
||||
ctx.Response().Header.SetContentType(fiber.MIMEApplicationXML)
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Send(b)
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
@@ -7,13 +21,15 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/scoutgw/s3err"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
@@ -113,8 +129,8 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{be: &BackendMock{
|
||||
ListObjectPartsFunc: func(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
|
||||
return &s3.ListPartsOutput{}, nil
|
||||
ListObjectPartsFunc: func(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
return s3response.ListPartsResponse{}, nil
|
||||
},
|
||||
GetObjectAclFunc: func(bucket, object string) (*s3.GetObjectAclOutput, error) {
|
||||
return &s3.GetObjectAclOutput{}, nil
|
||||
@@ -122,7 +138,7 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
GetObjectAttributesFunc: func(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error) {
|
||||
return &s3.GetObjectAttributesOutput{}, nil
|
||||
},
|
||||
GetObjectFunc: func(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
|
||||
GetObjectFunc: func(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return &s3.GetObjectOutput{Metadata: nil}, nil
|
||||
},
|
||||
}}
|
||||
@@ -154,16 +170,16 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=InvalidMaxParts", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Get-actions-invalid-part-number",
|
||||
name: "Get-actions-invalid-part-number-marker",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=200&part-number-marker=InvalidPartNumber", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Get-actions-list-object-parts-success",
|
||||
@@ -184,22 +200,13 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
statusCode: 200,
|
||||
},
|
||||
{
|
||||
name: "Get-actions-invalid-range-header",
|
||||
app: app,
|
||||
args: args{
|
||||
req: getObjectReq,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Get-actions-get-object-error",
|
||||
name: "Get-actions-get-object-success",
|
||||
app: app,
|
||||
args: args{
|
||||
req: getObjectSuccessReq,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
statusCode: 200,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -227,21 +234,21 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) {
|
||||
return &s3.GetBucketAclOutput{}, nil
|
||||
},
|
||||
ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
|
||||
return &s3.ListMultipartUploadsOutput{}, nil
|
||||
ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
return s3response.ListMultipartUploadsResponse{}, nil
|
||||
},
|
||||
ListObjectsV2Func: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
return &s3.ListBucketsOutput{}, nil
|
||||
ListObjectsV2Func: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
return &s3.ListObjectsV2Output{}, nil
|
||||
},
|
||||
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
return &s3.ListBucketsOutput{}, nil
|
||||
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
return &s3.ListObjectsOutput{}, nil
|
||||
},
|
||||
}}
|
||||
app.Get("/:bucket", s3ApiController.ListActions)
|
||||
|
||||
//Error case
|
||||
s3ApiControllerError := S3ApiController{be: &BackendMock{
|
||||
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
|
||||
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
}}
|
||||
@@ -306,11 +313,11 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("S3ApiController.ListActions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
t.Errorf("S3ApiController.GetActions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("S3ApiController.ListActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
t.Errorf("S3ApiController.GetActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -380,11 +387,11 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("S3ApiController.PutBucketActions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
t.Errorf("S3ApiController.GetActions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("S3ApiController.PutBucketActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
t.Errorf("S3ApiController.GetActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -408,8 +415,8 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
CopyObjectFunc: func(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
return &s3.CopyObjectOutput{}, nil
|
||||
},
|
||||
PutObjectFunc: func(bucket, object string, r io.Reader) (string, error) {
|
||||
return "hello", nil
|
||||
PutObjectFunc: func(*s3.PutObjectInput) (string, error) {
|
||||
return "Hey", nil
|
||||
},
|
||||
}}
|
||||
app.Put("/:bucket/:key/*", s3ApiController.PutActions)
|
||||
@@ -435,13 +442,13 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Upload-copy-part-error-case",
|
||||
name: "Upload-put-part-error-case",
|
||||
app: app,
|
||||
args: args{
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?partNumber=invalid", nil),
|
||||
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?uploadId=abc&partNumber=invalid", nil),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
statusCode: 400,
|
||||
},
|
||||
{
|
||||
name: "Upload-copy-part-success",
|
||||
@@ -511,11 +518,13 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
resp, err := tt.app.Test(tt.args.req)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("S3ApiController.PutActions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
t.Errorf("S3ApiController.GetActions() %v error = %v, wantErr %v",
|
||||
tt.name, err, tt.wantErr)
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.statusCode {
|
||||
t.Errorf("S3ApiController.PutActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
|
||||
t.Errorf("S3ApiController.GetActions() %v statusCode = %v, wantStatusCode = %v",
|
||||
tt.name, resp.StatusCode, tt.statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -789,9 +798,22 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
// Mock values
|
||||
contentEncoding := "gzip"
|
||||
contentType := "application/xml"
|
||||
eTag := "Valid etag"
|
||||
lastModifie := time.Now()
|
||||
|
||||
s3ApiController := S3ApiController{be: &BackendMock{
|
||||
HeadObjectFunc: func(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
return nil, nil
|
||||
HeadObjectFunc: func(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentLength: 64,
|
||||
ContentType: &contentType,
|
||||
LastModified: &lastModifie,
|
||||
ETag: &eTag,
|
||||
}, nil
|
||||
},
|
||||
}}
|
||||
|
||||
@@ -801,7 +823,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
appErr := fiber.New()
|
||||
|
||||
s3ApiControllerErr := S3ApiController{be: &BackendMock{
|
||||
HeadObjectFunc: func(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
|
||||
HeadObjectFunc: func(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(42)
|
||||
},
|
||||
}}
|
||||
@@ -932,7 +954,7 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_responce(t *testing.T) {
|
||||
func Test_XMLresponse(t *testing.T) {
|
||||
type args struct {
|
||||
ctx *fiber.Ctx
|
||||
resp any
|
||||
@@ -989,14 +1011,74 @@ func Test_responce(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := responce(tt.args.ctx, tt.args.resp, tt.args.err); (err != nil) != tt.wantErr {
|
||||
t.Errorf("responce() error = %v, wantErr %v", err, tt.wantErr)
|
||||
if err := SendXMLResponse(tt.args.ctx, tt.args.resp, tt.args.err); (err != nil) != tt.wantErr {
|
||||
t.Errorf("responce() %v error = %v, wantErr %v", tt.name, err, tt.wantErr)
|
||||
}
|
||||
|
||||
statusCode := tt.args.ctx.Response().StatusCode()
|
||||
|
||||
if statusCode != tt.statusCode {
|
||||
t.Errorf("responce() code = %v, wantErr %v", statusCode, tt.wantErr)
|
||||
t.Errorf("responce() %v code = %v, wantErr %v", tt.name, statusCode, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_response(t *testing.T) {
|
||||
type args struct {
|
||||
ctx *fiber.Ctx
|
||||
resp any
|
||||
err error
|
||||
}
|
||||
app := fiber.New()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
statusCode int
|
||||
}{
|
||||
{
|
||||
name: "Internal-server-error",
|
||||
args: args{
|
||||
ctx: app.AcquireCtx(&fasthttp.RequestCtx{}),
|
||||
resp: nil,
|
||||
err: s3err.GetAPIError(16),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 500,
|
||||
},
|
||||
{
|
||||
name: "Error-not-implemented",
|
||||
args: args{
|
||||
ctx: app.AcquireCtx(&fasthttp.RequestCtx{}),
|
||||
resp: nil,
|
||||
err: s3err.GetAPIError(50),
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 501,
|
||||
},
|
||||
{
|
||||
name: "Successful-response",
|
||||
args: args{
|
||||
ctx: app.AcquireCtx(&fasthttp.RequestCtx{}),
|
||||
resp: "Valid response",
|
||||
err: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
statusCode: 200,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := SendResponse(tt.args.ctx, tt.args.err); (err != nil) != tt.wantErr {
|
||||
t.Errorf("responce() %v error = %v, wantErr %v", tt.name, err, tt.wantErr)
|
||||
}
|
||||
|
||||
statusCode := tt.args.ctx.Response().StatusCode()
|
||||
|
||||
if statusCode != tt.statusCode {
|
||||
t.Errorf("responce() %v code = %v, wantErr %v", tt.name, statusCode, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
160
s3api/middlewares/authentication.go
Normal file
160
s3api/middlewares/authentication.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/backend/auth"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3api/utils"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
)
|
||||
|
||||
type RootUserConfig struct {
|
||||
Access string
|
||||
Secret string
|
||||
Region string
|
||||
}
|
||||
|
||||
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, debug bool) fiber.Handler {
|
||||
acct := accounts{root: root, iam: iam}
|
||||
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
authorization := ctx.Get("Authorization")
|
||||
if authorization == "" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty))
|
||||
}
|
||||
|
||||
// Check the signature version
|
||||
authParts := strings.Split(authorization, " ")
|
||||
if len(authParts) < 4 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
|
||||
}
|
||||
if authParts[0] != "AWS4-HMAC-SHA256" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported))
|
||||
}
|
||||
|
||||
credKv := strings.Split(authParts[1], "=")
|
||||
if len(credKv) != 2 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
|
||||
}
|
||||
creds := strings.Split(credKv[1], "/")
|
||||
if len(creds) < 4 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
|
||||
}
|
||||
|
||||
signHdrKv := strings.Split(authParts[2], "=")
|
||||
if len(signHdrKv) != 2 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
|
||||
}
|
||||
signedHdrs := strings.Split(signHdrKv[1], ";")
|
||||
|
||||
account := acct.getAccount(creds[0])
|
||||
if account == nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID))
|
||||
}
|
||||
|
||||
// Check X-Amz-Date header
|
||||
date := ctx.Get("X-Amz-Date")
|
||||
if date == "" {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader))
|
||||
}
|
||||
|
||||
// Parse the date and check the date validity
|
||||
tdate, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate))
|
||||
}
|
||||
|
||||
// Calculate the hash of the request payload
|
||||
hashedPayload := sha256.Sum256(ctx.Body())
|
||||
hexPayload := hex.EncodeToString(hashedPayload[:])
|
||||
|
||||
hashPayloadHeader := ctx.Get("X-Amz-Content-Sha256")
|
||||
|
||||
// Compare the calculated hash with the hash provided
|
||||
if hashPayloadHeader != hexPayload {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch))
|
||||
}
|
||||
|
||||
// Create a new http request instance from fasthttp request
|
||||
req, err := utils.CreateHttpRequestFromCtx(ctx, signedHdrs)
|
||||
if err != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
|
||||
}
|
||||
|
||||
signer := v4.NewSigner()
|
||||
|
||||
signErr := signer.SignHTTP(req.Context(), aws.Credentials{
|
||||
AccessKeyID: creds[0],
|
||||
SecretAccessKey: account.Secret,
|
||||
}, req, hexPayload, creds[3], account.Region, tdate, func(options *v4.SignerOptions) {
|
||||
if debug {
|
||||
options.LogSigning = true
|
||||
options.Logger = logging.NewStandardLogger(os.Stderr)
|
||||
}
|
||||
})
|
||||
if signErr != nil {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
|
||||
}
|
||||
|
||||
parts := strings.Split(req.Header.Get("Authorization"), " ")
|
||||
if len(parts) < 4 {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
|
||||
}
|
||||
calculatedSign := strings.Split(parts[3], "=")[1]
|
||||
expectedSign := strings.Split(authParts[3], "=")[1]
|
||||
|
||||
if expectedSign != calculatedSign {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch))
|
||||
}
|
||||
|
||||
ctx.Locals("role", account.Role)
|
||||
|
||||
return ctx.Next()
|
||||
}
|
||||
}
|
||||
|
||||
type accounts struct {
|
||||
root RootUserConfig
|
||||
iam auth.IAMService
|
||||
}
|
||||
|
||||
func (a accounts) getAccount(access string) *auth.Account {
|
||||
var account *auth.Account
|
||||
if access == a.root.Access {
|
||||
account = &auth.Account{
|
||||
Secret: a.root.Secret,
|
||||
Role: "admin",
|
||||
Region: a.root.Region,
|
||||
}
|
||||
} else {
|
||||
account = a.iam.GetUserAccount(access)
|
||||
}
|
||||
return account
|
||||
}
|
||||
43
s3api/middlewares/md5.go
Normal file
43
s3api/middlewares/md5.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
)
|
||||
|
||||
func VerifyMD5Body() fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
incomingSum := ctx.Get("Content-Md5")
|
||||
if incomingSum == "" {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
sum := md5.Sum(ctx.Body())
|
||||
calculatedSum := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if incomingSum != calculatedSum {
|
||||
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest))
|
||||
}
|
||||
|
||||
return ctx.Next()
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,34 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/scoutgw/s3api/controllers"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/auth"
|
||||
"github.com/versity/versitygw/s3api/controllers"
|
||||
)
|
||||
|
||||
type S3ApiRouter struct{}
|
||||
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend) {
|
||||
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService) {
|
||||
s3ApiController := controllers.New(be)
|
||||
adminController := controllers.AdminController{IAMService: iam}
|
||||
|
||||
// TODO: think of better routing system
|
||||
app.Post("/create-user", adminController.CreateUser)
|
||||
// ListBuckets action
|
||||
app.Get("/", s3ApiController.ListBuckets)
|
||||
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/auth"
|
||||
)
|
||||
|
||||
func TestS3ApiRouter_Init(t *testing.T) {
|
||||
type args struct {
|
||||
app *fiber.App
|
||||
be backend.Backend
|
||||
iam auth.IAMService
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -23,12 +39,13 @@ func TestS3ApiRouter_Init(t *testing.T) {
|
||||
args: args{
|
||||
app: fiber.New(),
|
||||
be: backend.BackendUnsupported{},
|
||||
iam: auth.IAMServiceUnsupported{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.sa.Init(tt.args.app, tt.args.be)
|
||||
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,27 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/auth"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
)
|
||||
|
||||
type S3ApiServer struct {
|
||||
@@ -11,16 +29,45 @@ type S3ApiServer struct {
|
||||
backend backend.Backend
|
||||
router *S3ApiRouter
|
||||
port string
|
||||
cert *tls.Certificate
|
||||
debug bool
|
||||
}
|
||||
|
||||
func New(app *fiber.App, be backend.Backend, port string) (s3ApiServer *S3ApiServer, err error) {
|
||||
s3ApiServer = &S3ApiServer{app, be, new(S3ApiRouter), port}
|
||||
func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port string, iam auth.IAMService, opts ...Option) (*S3ApiServer, error) {
|
||||
server := &S3ApiServer{
|
||||
app: app,
|
||||
backend: be,
|
||||
router: new(S3ApiRouter),
|
||||
port: port,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(server)
|
||||
}
|
||||
|
||||
app.Use(middlewares.VerifyV4Signature(root, iam, server.debug))
|
||||
app.Use(logger.New())
|
||||
s3ApiServer.router.Init(app, be)
|
||||
return
|
||||
app.Use(middlewares.VerifyMD5Body())
|
||||
server.router.Init(app, be, iam)
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// Option sets various options for New()
|
||||
type Option func(*S3ApiServer)
|
||||
|
||||
// WithTLS sets TLS Credentials
|
||||
func WithTLS(cert tls.Certificate) Option {
|
||||
return func(s *S3ApiServer) { s.cert = &cert }
|
||||
}
|
||||
|
||||
// WithDebug sets debug output
|
||||
func WithDebug() Option {
|
||||
return func(s *S3ApiServer) { s.debug = true }
|
||||
}
|
||||
|
||||
func (sa *S3ApiServer) Serve() (err error) {
|
||||
if sa.cert != nil {
|
||||
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
|
||||
}
|
||||
return sa.app.Listen(sa.port)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3api
|
||||
|
||||
import (
|
||||
@@ -5,7 +19,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/versity/scoutgw/backend"
|
||||
"github.com/versity/versitygw/backend"
|
||||
"github.com/versity/versitygw/backend/auth"
|
||||
"github.com/versity/versitygw/s3api/middlewares"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
@@ -13,6 +29,7 @@ func TestNew(t *testing.T) {
|
||||
app *fiber.App
|
||||
be backend.Backend
|
||||
port string
|
||||
root middlewares.RootUserConfig
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
@@ -32,6 +49,7 @@ func TestNew(t *testing.T) {
|
||||
app: app,
|
||||
be: be,
|
||||
port: port,
|
||||
root: middlewares.RootUserConfig{},
|
||||
},
|
||||
wantS3ApiServer: &S3ApiServer{
|
||||
app: app,
|
||||
@@ -44,7 +62,8 @@ func TestNew(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.port)
|
||||
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.root,
|
||||
tt.args.port, auth.IAMServiceUnsupported{})
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
93
s3api/utils/utils.go
Normal file
93
s3api/utils/utils.go
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
|
||||
metadata = make(map[string]string)
|
||||
headers.VisitAll(func(key, value []byte) {
|
||||
if strings.HasPrefix(string(key), "X-Amz-Meta-") {
|
||||
trimmedKey := strings.TrimPrefix(string(key), "X-Amz-Meta-")
|
||||
headerValue := string(value)
|
||||
metadata[trimmedKey] = headerValue
|
||||
}
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func CreateHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string) (*http.Request, error) {
|
||||
req := ctx.Request()
|
||||
|
||||
httpReq, err := http.NewRequest(string(req.Header.Method()), req.URI().String(), bytes.NewReader(req.Body()))
|
||||
if err != nil {
|
||||
return nil, errors.New("error in creating an http request")
|
||||
}
|
||||
|
||||
// Set the request headers
|
||||
req.Header.VisitAll(func(key, value []byte) {
|
||||
keyStr := string(key)
|
||||
if includeHeader(keyStr, signedHdrs) {
|
||||
httpReq.Header.Add(keyStr, string(value))
|
||||
}
|
||||
})
|
||||
|
||||
// Check if Content-Length in signed headers
|
||||
// If content length is non 0, then the header will be included
|
||||
if !includeHeader("Content-Length", signedHdrs) {
|
||||
httpReq.ContentLength = 0
|
||||
}
|
||||
|
||||
// Set the Host header
|
||||
httpReq.Host = string(req.Header.Host())
|
||||
|
||||
return httpReq, nil
|
||||
}
|
||||
|
||||
func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
|
||||
for key, val := range meta {
|
||||
ctx.Set(fmt.Sprintf("X-Amz-Meta-%s", key), val)
|
||||
}
|
||||
}
|
||||
|
||||
type CustomHeader struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
|
||||
for _, header := range headers {
|
||||
ctx.Set(header.Key, header.Value)
|
||||
}
|
||||
}
|
||||
|
||||
func includeHeader(hdr string, signedHdrs []string) bool {
|
||||
for _, shdr := range signedHdrs {
|
||||
if strings.EqualFold(hdr, shdr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
119
s3api/utils/utils_test.go
Normal file
119
s3api/utils/utils_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
func TestCreateHttpRequestFromCtx(t *testing.T) {
|
||||
type args struct {
|
||||
ctx *fiber.Ctx
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
// Expected output, Case 1
|
||||
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
req := ctx.Request()
|
||||
request, _ := http.NewRequest(string(req.Header.Method()), req.URI().String(), bytes.NewReader(req.Body()))
|
||||
|
||||
// Case 2
|
||||
ctx2 := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
req2 := ctx2.Request()
|
||||
req2.Header.Add("X-Amz-Mfa", "Some valid Mfa")
|
||||
|
||||
request2, _ := http.NewRequest(string(req2.Header.Method()), req2.URI().String(), bytes.NewReader(req2.Body()))
|
||||
request2.Header.Add("X-Amz-Mfa", "Some valid Mfa")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *http.Request
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Success-response",
|
||||
args: args{
|
||||
ctx: ctx,
|
||||
},
|
||||
want: request,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Success-response-With-Headers",
|
||||
args: args{
|
||||
ctx: ctx2,
|
||||
},
|
||||
want: request2,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := CreateHttpRequestFromCtx(tt.args.ctx, []string{"X-Amz-Mfa"})
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("CreateHttpRequestFromCtx() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got.Header, tt.want.Header) {
|
||||
t.Errorf("CreateHttpRequestFromCtx() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetUserMetaData(t *testing.T) {
|
||||
type args struct {
|
||||
headers *fasthttp.RequestHeader
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
// Case 1
|
||||
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
req := ctx.Request()
|
||||
|
||||
// Case 2
|
||||
ctx2 := app.AcquireCtx(&fasthttp.RequestCtx{})
|
||||
req2 := ctx2.Request()
|
||||
|
||||
req2.Header.Add("X-Amz-Meta-Name", "Nick")
|
||||
req2.Header.Add("X-Amz-Meta-Age", "27")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantMetadata map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Success-empty-response",
|
||||
args: args{
|
||||
headers: &req.Header,
|
||||
},
|
||||
wantMetadata: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Success-non-empty-response",
|
||||
args: args{
|
||||
headers: &req2.Header,
|
||||
},
|
||||
wantMetadata: map[string]string{
|
||||
"Age": "27",
|
||||
"Name": "Nick",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotMetadata := GetUserMetaData(tt.args.headers); !reflect.DeepEqual(gotMetadata, tt.wantMetadata) {
|
||||
t.Errorf("GetUserMetaData() = %v, want %v", gotMetadata, tt.wantMetadata)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3err
|
||||
|
||||
import (
|
||||
|
||||
96
s3response/s3response.go
Normal file
96
s3response/s3response.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2023 Versity Software
|
||||
// This file is licensed under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package s3response
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// Part describes part metadata.
|
||||
type Part struct {
|
||||
PartNumber int
|
||||
LastModified string
|
||||
ETag string
|
||||
Size int64
|
||||
}
|
||||
|
||||
// ListPartsResponse - s3 api list parts response.
|
||||
type ListPartsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
|
||||
|
||||
Bucket string
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
|
||||
Initiator Initiator
|
||||
Owner Owner
|
||||
|
||||
// The class of storage used to store the object.
|
||||
StorageClass string
|
||||
|
||||
PartNumberMarker int
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
IsTruncated bool
|
||||
|
||||
// List of parts.
|
||||
Parts []Part `xml:"Part"`
|
||||
}
|
||||
|
||||
// ListMultipartUploadsResponse - s3 api list multipart uploads response.
|
||||
type ListMultipartUploadsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
|
||||
|
||||
Bucket string
|
||||
KeyMarker string
|
||||
UploadIDMarker string `xml:"UploadIdMarker"`
|
||||
NextKeyMarker string
|
||||
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
|
||||
Delimiter string
|
||||
Prefix string
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
MaxUploads int
|
||||
IsTruncated bool
|
||||
|
||||
// List of pending uploads.
|
||||
Uploads []Upload `xml:"Upload"`
|
||||
|
||||
// Delimed common prefixes.
|
||||
CommonPrefixes []CommonPrefix
|
||||
}
|
||||
|
||||
// Upload desribes in progress multipart upload
|
||||
type Upload struct {
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
Initiator Initiator
|
||||
Owner Owner
|
||||
StorageClass string
|
||||
Initiated string
|
||||
}
|
||||
|
||||
// CommonPrefix ListObjectsResponse common prefixes (directory abstraction)
|
||||
type CommonPrefix struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// Initiator same fields as Owner
|
||||
type Initiator Owner
|
||||
|
||||
// Owner bucket ownership
|
||||
type Owner struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
}
|
||||
31
versitygw.spec.in
Normal file
31
versitygw.spec.in
Normal file
@@ -0,0 +1,31 @@
|
||||
%global debug_package %{nil}
|
||||
%define pkg_version @@VERSION@@
|
||||
|
||||
Name: versitygw
|
||||
Version: %{pkg_version}
|
||||
Release: 1%{?dist}
|
||||
Summary: Versity S3 Gateway
|
||||
|
||||
License: Apache-2.0
|
||||
URL: https://github.com/versity/versitygw
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
%description
|
||||
The S3 gateway is an S3 protocol translator that allows an S3 client
|
||||
to access the supported backend storage as if it was a native S3 service.
|
||||
|
||||
BuildRequires: golang >= 1.20
|
||||
|
||||
%prep
|
||||
%setup
|
||||
|
||||
%build
|
||||
make
|
||||
|
||||
%install
|
||||
mkdir -p %{buildroot}%{_bindir}
|
||||
install -m 0755 %{name} %{buildroot}%{_bindir}/
|
||||
|
||||
%post
|
||||
|
||||
%files
|
||||
%{_bindir}/%{name}
|
||||
Reference in New Issue
Block a user