Compare commits

..

1 Commits

38 changed files with 735 additions and 3118 deletions

View File

@@ -23,16 +23,5 @@ jobs:
run: |
go get -v -t -d ./...
- name: Build
run: go build -o versitygw cmd/versitygw/*.go
- name: Test
run: go test -v -timeout 30s -tags=github ./...
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
shell: bash
- name: Run govulncheck
run: govulncheck ./...
shell: bash

10
.gitignore vendored
View File

@@ -7,8 +7,6 @@
*.dll
*.so
*.dylib
cmd/versitygw/versitygw
/versitygw
# Test binary, built with `go test -c`
*.test
@@ -24,11 +22,3 @@ go.work
# ignore IntelliJ directories
.idea
# auto generated VERSION file
VERSION
# build output
/versitygw.spec
*.tar
*.tar.gz

View File

@@ -1,73 +0,0 @@
# Copyright 2023 Versity Software
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Go parameters
GOCMD=go
GOBUILD=$(GOCMD) build
GOCLEAN=$(GOCMD) clean
GOTEST=$(GOCMD) test
BIN=versitygw
VERSION := $(shell if test -e VERSION; then cat VERSION; else git describe --abbrev=0 --tags HEAD; fi)
BUILD := $(shell git rev-parse --short HEAD || echo release-rpm)
TIME := `date -u '+%Y-%m-%d_%I:%M:%S%p'`
LDFLAGS=-ldflags "-X=main.Build=$(BUILD) -X=main.BuildTime=$(TIME) -X=main.Version=$(VERSION)"
all: build
build: $(BIN)
.PHONY: $(BIN)
$(BIN):
$(GOBUILD) $(LDFLAGS) -o $(BIN) cmd/$(BIN)/*.go
.PHONY: test
test:
$(GOTEST) ./...
.PHONY: check
check:
# note this requires staticcheck be in your PATH:
# export PATH=$PATH:~/go/bin
# go install honnef.co/go/tools/cmd/staticcheck@latest
staticcheck ./...
golint ./...
gofmt -s -l .
.PHONY: clean
clean:
$(GOCLEAN)
.PHONY: cleanall
cleanall: clean
rm -f $(BIN)
rm -f versitygw-*.tar
rm -f versitygw-*.tar.gz
rm -f versitygw.spec
%.spec: %.spec.in
sed -e 's/@@VERSION@@/$(VERSION)/g' < $< > $@+
mv $@+ $@
TARFILE = $(BIN)-$(VERSION).tar
dist: $(BIN).spec
echo $(VERSION) >VERSION
git archive --format=tar --prefix $(BIN)-$(VERSION)/ HEAD > $(TARFILE)
@ tar rf $(TARFILE) --transform="s@\(.*\)@$(BIN)-$(VERSION)/\1@" $(BIN).spec VERSION
rm -f VERSION
rm -f $(BIN).spec
gzip -f $(TARFILE)

2
NOTICE
View File

@@ -1,2 +0,0 @@
versitygw - Versity S3 Gateway
Copyright 2023 Versity Software

View File

@@ -1,64 +0,0 @@
# The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/versity/versitygw/blob/assets/assets/logo-white.svg">
<source media="(prefers-color-scheme: light)" srcset="https://github.com/versity/versitygw/blob/assets/assets/logo.svg">
<a href="https://www.versity.com"><img alt="Versity Software logo image." src="https://github.com/versity/versitygw/blob/assets/assets/logo.svg"></a>
</picture>
[![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/versity/versitygw/blob/main/LICENSE)
The Versity Gateway: A High-Performance Open Source S3 to File Translation Tool
Current status: Alpha, in development not yet suited for production use
See project [documentation](https://github.com/versity/versitygw/wiki) on the wiki.
Versity Gateway, a simple to use tool for seamless inline translation between AWS S3 object commands and file-based storage systems. The Versity Gateway bridges the gap between S3-reliant applications and file storage systems, enabling enhanced compatibility and integration with file based systems while offering exceptional scalability.
The server translates incoming S3 API requests and transforms them into equivalent operations to the backend service. By leveraging this gateway server, applications can interact with the S3-compatible API on top of already existing storage systems. This project enables leveraging existing infrastructure investments while seamlessly integrating with S3-compatible systems, offering increased flexibility and compatibility in managing data storage.
The Versity Gateway is focused on performance, simplicity, and expandability. The Versity Gateway is designed with modularity in mind, enabling future extensions to support additional backend storage systems. At present, the Versity Gateway supports any generic POSIX file backend storage and Versitys open source ScoutFS filesystem.
The gateway is completely stateless. Multiple Versity Gateway instances may be deployed in a cluster to increase aggregate throughput. The Versity Gateways stateless architecture allows any request to be serviced by any gateway thereby distributing workloads and enhancing performance. Load balancers may be used to evenly distribute requests across the cluster of gateways for optimal performance.
The S3 HTTP(S) server and routing is implemented using the [Fiber](https://gofiber.io) web framework. This framework is actively developed with a focus on performance. S3 API compatibility leverages the official [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) whenever possible for maximum service compatibility with AWS S3.
## Getting Started
See the [Quickstart](https://github.com/versity/versitygw/wiki/Quickstart) documentation.
### Run the gateway with posix backend:
```
mkdir /tmp/vgw
ROOT_ACCESS_KEY="testuser" ROOT_SECRET_KEY="secret" ./versitygw --port :10000 posix /tmp/vgw
```
This will enable an S3 server on the current host listening on port 10000 and hosting the directory `/tmp/vgw`.
To get the usage output, run the following:
```
./versitygw --help
```
The command format is
```
versitygw [global options] command [command options] [arguments...]
```
The global options are specified before the backend type and the backend options are specified after.
***
#### Versity gives you clarity and control over your archival storage, so you can allocate more resources to your core mission.
### Contact
info@versity.com <br />
+1 844 726 8826
### @versitysoftware
[![linkedin](https://github.com/versity/versitygw/blob/assets/assets/linkedin.jpg)](https://www.linkedin.com/company/versity/) &nbsp;
[![twitter](https://github.com/versity/versitygw/blob/assets/assets/twitter.jpg)](https://twitter.com/VersitySoftware) &nbsp;
[![facebook](https://github.com/versity/versitygw/blob/assets/assets/facebook.jpg)](https://www.facebook.com/versitysoftware) &nbsp;
[![instagram](https://github.com/versity/versitygw/blob/assets/assets/instagram.jpg)](https://www.instagram.com/versitysoftware/) &nbsp;

View File

@@ -1,137 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auth
import (
"encoding/json"
"fmt"
"os"
"sync"
"github.com/versity/versitygw/s3err"
)
type Account struct {
Secret string `json:"secret"`
Role string `json:"role"`
Region string `json:"region"`
}
type IAMConfig struct {
AccessAccounts map[string]Account `json:"accessAccounts"`
}
type AccountsCache struct {
mu sync.Mutex
Accounts map[string]Account
}
func (c *AccountsCache) getAccount(access string) *Account {
c.mu.Lock()
defer c.mu.Unlock()
acc, ok := c.Accounts[access]
if !ok {
return nil
}
return &acc
}
func (c *AccountsCache) updateAccounts() error {
c.mu.Lock()
defer c.mu.Unlock()
var data IAMConfig
file, err := os.ReadFile("users.json")
if err != nil {
return fmt.Errorf("error reading config file: %w", err)
}
if err := json.Unmarshal(file, &data); err != nil {
return fmt.Errorf("error parsing the data: %w", err)
}
c.Accounts = data.AccessAccounts
return nil
}
type IAMService interface {
GetIAMConfig() (*IAMConfig, error)
CreateAccount(access string, account *Account) error
GetUserAccount(access string) *Account
}
type IAMServiceUnsupported struct {
accCache *AccountsCache
}
var _ IAMService = &IAMServiceUnsupported{}
func New() IAMService {
return &IAMServiceUnsupported{accCache: &AccountsCache{Accounts: map[string]Account{}}}
}
func (IAMServiceUnsupported) GetIAMConfig() (*IAMConfig, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (s IAMServiceUnsupported) CreateAccount(access string, account *Account) error {
var data IAMConfig
file, err := os.ReadFile("users.json")
if err != nil {
data = IAMConfig{AccessAccounts: map[string]Account{
access: *account,
}}
} else {
if err := json.Unmarshal(file, &data); err != nil {
return err
}
_, ok := data.AccessAccounts[access]
if ok {
return fmt.Errorf("user with the given access already exists")
}
data.AccessAccounts[access] = *account
}
updatedJSON, err := json.MarshalIndent(data, "", " ")
if err != nil {
return err
}
if err := os.WriteFile("users.json", updatedJSON, 0644); err != nil {
return err
}
return nil
}
func (s IAMServiceUnsupported) GetUserAccount(access string) *Account {
acc := s.accCache.getAccount(access)
if acc == nil {
err := s.accCache.updateAccounts()
if err != nil {
return nil
}
return s.accCache.getAccount(access)
}
return acc
}

View File

@@ -1,17 +1,3 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package backend
import (
@@ -20,46 +6,47 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
"github.com/versity/scoutgw/s3err"
)
//go:generate moq -out backend_moq_test.go . Backend
//go:generate moq -out ../s3api/controllers/backend_moq_test.go -pkg controllers . Backend
//go:generate moq -out ../s3api/controllers/backend_moq_test.go . Backend
type Backend interface {
fmt.Stringer
GetIAMConfig() ([]byte, error)
Shutdown()
ListBuckets() (*s3.ListBucketsOutput, error)
HeadBucket(bucket string) (*s3.HeadBucketOutput, error)
GetBucketAcl(bucket string) (*s3.GetBucketAclOutput, error)
PutBucket(bucket string) error
PutBucketAcl(*s3.PutBucketAclInput) error
DeleteBucket(bucket string) error
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(*s3.AbortMultipartUploadInput) error
ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error)
CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error)
PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error)
PutObjectPart(bucket, object, uploadID string, part int, r io.Reader) (etag string, err error)
PutObject(*s3.PutObjectInput) (string, error)
HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
PutObject(bucket, object string, r io.Reader) (string, error)
HeadObject(bucket, object string, etag string) (*s3.HeadObjectOutput, error)
GetObject(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error)
GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error)
GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error)
CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error)
ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
DeleteObject(bucket, object string) error
DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error
PutBucketAcl(*s3.PutBucketAclInput) error
PutObjectAcl(*s3.PutObjectAclInput) error
RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error
UploadPart(bucket, object, uploadId string, Body io.ReadSeeker) (*s3.UploadPartOutput, error)
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
IsTaggingSupported() bool
GetTags(bucket, object string) (map[string]string, error)
SetTags(bucket, object string, tags map[string]string) error
RemoveTags(bucket, object string) error
@@ -72,6 +59,10 @@ var _ Backend = &BackendUnsupported{}
func New() Backend {
return &BackendUnsupported{}
}
func (BackendUnsupported) GetIAMConfig() ([]byte, error) {
return nil, fmt.Errorf("not supported")
}
func (BackendUnsupported) Shutdown() {}
func (BackendUnsupported) String() string {
return "Unsupported"
@@ -116,20 +107,20 @@ func (BackendUnsupported) CompleteMultipartUpload(bucket, object, uploadID strin
func (BackendUnsupported) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
return s3response.ListMultipartUploadsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
return s3response.ListPartsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error) {
func (BackendUnsupported) PutObjectPart(bucket, object, uploadID string, part int, r io.Reader) (etag string, err error) {
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) PutObject(*s3.PutObjectInput) (string, error) {
func (BackendUnsupported) PutObject(bucket, object string, r io.Reader) (string, error) {
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) DeleteObject(bucket, object string) error {
@@ -138,10 +129,10 @@ func (BackendUnsupported) DeleteObject(bucket, object string) error {
func (BackendUnsupported) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
func (BackendUnsupported) GetObject(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
func (BackendUnsupported) HeadObject(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error) {
@@ -153,19 +144,20 @@ func (BackendUnsupported) GetObjectAttributes(bucket, object string, attributes
func (BackendUnsupported) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
func (BackendUnsupported) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
func (BackendUnsupported) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (BackendUnsupported) IsTaggingSupported() bool { return false }
func (BackendUnsupported) GetTags(bucket, object string) (map[string]string, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
return nil, fmt.Errorf("not supported")
}
func (BackendUnsupported) SetTags(bucket, object string, tags map[string]string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
return fmt.Errorf("not supported")
}
func (BackendUnsupported) RemoveTags(bucket, object string) error {
return s3err.GetAPIError(s3err.ErrNotImplemented)
return fmt.Errorf("not supported")
}

View File

@@ -6,7 +6,6 @@ package backend
import (
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3response"
"io"
"sync"
)
@@ -48,7 +47,10 @@ var _ Backend = &BackendMock{}
// GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) {
// panic("mock out the GetBucketAcl method")
// },
// GetObjectFunc: func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
// GetIAMConfigFunc: func() ([]byte, error) {
// panic("mock out the GetIAMConfig method")
// },
// GetObjectFunc: func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
// panic("mock out the GetObject method")
// },
// GetObjectAclFunc: func(bucket string, object string) (*s3.GetObjectAclOutput, error) {
@@ -63,22 +65,25 @@ var _ Backend = &BackendMock{}
// HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
// panic("mock out the HeadBucket method")
// },
// HeadObjectFunc: func(bucket string, object string) (*s3.HeadObjectOutput, error) {
// HeadObjectFunc: func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
// panic("mock out the HeadObject method")
// },
// IsTaggingSupportedFunc: func() bool {
// panic("mock out the IsTaggingSupported method")
// },
// ListBucketsFunc: func() (*s3.ListBucketsOutput, error) {
// panic("mock out the ListBuckets method")
// },
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
// panic("mock out the ListMultipartUploads method")
// },
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
// panic("mock out the ListObjectParts method")
// },
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
// panic("mock out the ListObjects method")
// },
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
// panic("mock out the ListObjectsV2 method")
// },
// PutBucketFunc: func(bucket string) error {
@@ -87,13 +92,13 @@ var _ Backend = &BackendMock{}
// PutBucketAclFunc: func(putBucketAclInput *s3.PutBucketAclInput) error {
// panic("mock out the PutBucketAcl method")
// },
// PutObjectFunc: func(putObjectInput *s3.PutObjectInput) (string, error) {
// PutObjectFunc: func(bucket string, object string, r io.Reader) (string, error) {
// panic("mock out the PutObject method")
// },
// PutObjectAclFunc: func(putObjectAclInput *s3.PutObjectAclInput) error {
// panic("mock out the PutObjectAcl method")
// },
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
// panic("mock out the PutObjectPart method")
// },
// RemoveTagsFunc: func(bucket string, object string) error {
@@ -151,8 +156,11 @@ type BackendMock struct {
// GetBucketAclFunc mocks the GetBucketAcl method.
GetBucketAclFunc func(bucket string) (*s3.GetBucketAclOutput, error)
// GetIAMConfigFunc mocks the GetIAMConfig method.
GetIAMConfigFunc func() ([]byte, error)
// GetObjectFunc mocks the GetObject method.
GetObjectFunc func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
GetObjectFunc func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error)
// GetObjectAclFunc mocks the GetObjectAcl method.
GetObjectAclFunc func(bucket string, object string) (*s3.GetObjectAclOutput, error)
@@ -167,22 +175,25 @@ type BackendMock struct {
HeadBucketFunc func(bucket string) (*s3.HeadBucketOutput, error)
// HeadObjectFunc mocks the HeadObject method.
HeadObjectFunc func(bucket string, object string) (*s3.HeadObjectOutput, error)
HeadObjectFunc func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error)
// IsTaggingSupportedFunc mocks the IsTaggingSupported method.
IsTaggingSupportedFunc func() bool
// ListBucketsFunc mocks the ListBuckets method.
ListBucketsFunc func() (*s3.ListBucketsOutput, error)
// ListMultipartUploadsFunc mocks the ListMultipartUploads method.
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
// ListObjectPartsFunc mocks the ListObjectParts method.
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error)
// ListObjectsFunc mocks the ListObjects method.
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
// ListObjectsV2Func mocks the ListObjectsV2 method.
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
// PutBucketFunc mocks the PutBucket method.
PutBucketFunc func(bucket string) error
@@ -191,13 +202,13 @@ type BackendMock struct {
PutBucketAclFunc func(putBucketAclInput *s3.PutBucketAclInput) error
// PutObjectFunc mocks the PutObject method.
PutObjectFunc func(putObjectInput *s3.PutObjectInput) (string, error)
PutObjectFunc func(bucket string, object string, r io.Reader) (string, error)
// PutObjectAclFunc mocks the PutObjectAcl method.
PutObjectAclFunc func(putObjectAclInput *s3.PutObjectAclInput) error
// PutObjectPartFunc mocks the PutObjectPart method.
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error)
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error)
// RemoveTagsFunc mocks the RemoveTags method.
RemoveTagsFunc func(bucket string, object string) error
@@ -293,16 +304,23 @@ type BackendMock struct {
// Bucket is the bucket argument value.
Bucket string
}
// GetIAMConfig holds details about calls to the GetIAMConfig method.
GetIAMConfig []struct {
}
// GetObject holds details about calls to the GetObject method.
GetObject []struct {
// Bucket is the bucket argument value.
Bucket string
// Object is the object argument value.
Object string
// AcceptRange is the acceptRange argument value.
AcceptRange string
// StartOffset is the startOffset argument value.
StartOffset int64
// Length is the length argument value.
Length int64
// Writer is the writer argument value.
Writer io.Writer
// Etag is the etag argument value.
Etag string
}
// GetObjectAcl holds details about calls to the GetObjectAcl method.
GetObjectAcl []struct {
@@ -338,6 +356,11 @@ type BackendMock struct {
Bucket string
// Object is the object argument value.
Object string
// Etag is the etag argument value.
Etag string
}
// IsTaggingSupported holds details about calls to the IsTaggingSupported method.
IsTaggingSupported []struct {
}
// ListBuckets holds details about calls to the ListBuckets method.
ListBuckets []struct {
@@ -398,8 +421,12 @@ type BackendMock struct {
}
// PutObject holds details about calls to the PutObject method.
PutObject []struct {
// PutObjectInput is the putObjectInput argument value.
PutObjectInput *s3.PutObjectInput
// Bucket is the bucket argument value.
Bucket string
// Object is the object argument value.
Object string
// R is the r argument value.
R io.Reader
}
// PutObjectAcl holds details about calls to the PutObjectAcl method.
PutObjectAcl []struct {
@@ -416,8 +443,6 @@ type BackendMock struct {
UploadID string
// Part is the part argument value.
Part int
// Length is the length argument value.
Length int64
// R is the r argument value.
R io.Reader
}
@@ -478,12 +503,14 @@ type BackendMock struct {
lockDeleteObject sync.RWMutex
lockDeleteObjects sync.RWMutex
lockGetBucketAcl sync.RWMutex
lockGetIAMConfig sync.RWMutex
lockGetObject sync.RWMutex
lockGetObjectAcl sync.RWMutex
lockGetObjectAttributes sync.RWMutex
lockGetTags sync.RWMutex
lockHeadBucket sync.RWMutex
lockHeadObject sync.RWMutex
lockIsTaggingSupported sync.RWMutex
lockListBuckets sync.RWMutex
lockListMultipartUploads sync.RWMutex
lockListObjectParts sync.RWMutex
@@ -843,26 +870,57 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
return calls
}
// GetIAMConfig calls GetIAMConfigFunc.
func (mock *BackendMock) GetIAMConfig() ([]byte, error) {
if mock.GetIAMConfigFunc == nil {
panic("BackendMock.GetIAMConfigFunc: method is nil but Backend.GetIAMConfig was just called")
}
callInfo := struct {
}{}
mock.lockGetIAMConfig.Lock()
mock.calls.GetIAMConfig = append(mock.calls.GetIAMConfig, callInfo)
mock.lockGetIAMConfig.Unlock()
return mock.GetIAMConfigFunc()
}
// GetIAMConfigCalls gets all the calls that were made to GetIAMConfig.
// Check the length with:
//
// len(mockedBackend.GetIAMConfigCalls())
func (mock *BackendMock) GetIAMConfigCalls() []struct {
} {
var calls []struct {
}
mock.lockGetIAMConfig.RLock()
calls = mock.calls.GetIAMConfig
mock.lockGetIAMConfig.RUnlock()
return calls
}
// GetObject calls GetObjectFunc.
func (mock *BackendMock) GetObject(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
func (mock *BackendMock) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
if mock.GetObjectFunc == nil {
panic("BackendMock.GetObjectFunc: method is nil but Backend.GetObject was just called")
}
callInfo := struct {
Bucket string
Object string
AcceptRange string
StartOffset int64
Length int64
Writer io.Writer
Etag string
}{
Bucket: bucket,
Object: object,
AcceptRange: acceptRange,
StartOffset: startOffset,
Length: length,
Writer: writer,
Etag: etag,
}
mock.lockGetObject.Lock()
mock.calls.GetObject = append(mock.calls.GetObject, callInfo)
mock.lockGetObject.Unlock()
return mock.GetObjectFunc(bucket, object, acceptRange, writer)
return mock.GetObjectFunc(bucket, object, startOffset, length, writer, etag)
}
// GetObjectCalls gets all the calls that were made to GetObject.
@@ -872,14 +930,18 @@ func (mock *BackendMock) GetObject(bucket string, object string, acceptRange str
func (mock *BackendMock) GetObjectCalls() []struct {
Bucket string
Object string
AcceptRange string
StartOffset int64
Length int64
Writer io.Writer
Etag string
} {
var calls []struct {
Bucket string
Object string
AcceptRange string
StartOffset int64
Length int64
Writer io.Writer
Etag string
}
mock.lockGetObject.RLock()
calls = mock.calls.GetObject
@@ -1032,21 +1094,23 @@ func (mock *BackendMock) HeadBucketCalls() []struct {
}
// HeadObject calls HeadObjectFunc.
func (mock *BackendMock) HeadObject(bucket string, object string) (*s3.HeadObjectOutput, error) {
func (mock *BackendMock) HeadObject(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
if mock.HeadObjectFunc == nil {
panic("BackendMock.HeadObjectFunc: method is nil but Backend.HeadObject was just called")
}
callInfo := struct {
Bucket string
Object string
Etag string
}{
Bucket: bucket,
Object: object,
Etag: etag,
}
mock.lockHeadObject.Lock()
mock.calls.HeadObject = append(mock.calls.HeadObject, callInfo)
mock.lockHeadObject.Unlock()
return mock.HeadObjectFunc(bucket, object)
return mock.HeadObjectFunc(bucket, object, etag)
}
// HeadObjectCalls gets all the calls that were made to HeadObject.
@@ -1056,10 +1120,12 @@ func (mock *BackendMock) HeadObject(bucket string, object string) (*s3.HeadObjec
func (mock *BackendMock) HeadObjectCalls() []struct {
Bucket string
Object string
Etag string
} {
var calls []struct {
Bucket string
Object string
Etag string
}
mock.lockHeadObject.RLock()
calls = mock.calls.HeadObject
@@ -1067,6 +1133,33 @@ func (mock *BackendMock) HeadObjectCalls() []struct {
return calls
}
// IsTaggingSupported calls IsTaggingSupportedFunc.
func (mock *BackendMock) IsTaggingSupported() bool {
if mock.IsTaggingSupportedFunc == nil {
panic("BackendMock.IsTaggingSupportedFunc: method is nil but Backend.IsTaggingSupported was just called")
}
callInfo := struct {
}{}
mock.lockIsTaggingSupported.Lock()
mock.calls.IsTaggingSupported = append(mock.calls.IsTaggingSupported, callInfo)
mock.lockIsTaggingSupported.Unlock()
return mock.IsTaggingSupportedFunc()
}
// IsTaggingSupportedCalls gets all the calls that were made to IsTaggingSupported.
// Check the length with:
//
// len(mockedBackend.IsTaggingSupportedCalls())
func (mock *BackendMock) IsTaggingSupportedCalls() []struct {
} {
var calls []struct {
}
mock.lockIsTaggingSupported.RLock()
calls = mock.calls.IsTaggingSupported
mock.lockIsTaggingSupported.RUnlock()
return calls
}
// ListBuckets calls ListBucketsFunc.
func (mock *BackendMock) ListBuckets() (*s3.ListBucketsOutput, error) {
if mock.ListBucketsFunc == nil {
@@ -1095,7 +1188,7 @@ func (mock *BackendMock) ListBucketsCalls() []struct {
}
// ListMultipartUploads calls ListMultipartUploadsFunc.
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
if mock.ListMultipartUploadsFunc == nil {
panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called")
}
@@ -1127,7 +1220,7 @@ func (mock *BackendMock) ListMultipartUploadsCalls() []struct {
}
// ListObjectParts calls ListObjectPartsFunc.
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
if mock.ListObjectPartsFunc == nil {
panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called")
}
@@ -1175,7 +1268,7 @@ func (mock *BackendMock) ListObjectPartsCalls() []struct {
}
// ListObjects calls ListObjectsFunc.
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
if mock.ListObjectsFunc == nil {
panic("BackendMock.ListObjectsFunc: method is nil but Backend.ListObjects was just called")
}
@@ -1223,7 +1316,7 @@ func (mock *BackendMock) ListObjectsCalls() []struct {
}
// ListObjectsV2 calls ListObjectsV2Func.
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
if mock.ListObjectsV2Func == nil {
panic("BackendMock.ListObjectsV2Func: method is nil but Backend.ListObjectsV2 was just called")
}
@@ -1335,19 +1428,23 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
}
// PutObject calls PutObjectFunc.
func (mock *BackendMock) PutObject(putObjectInput *s3.PutObjectInput) (string, error) {
func (mock *BackendMock) PutObject(bucket string, object string, r io.Reader) (string, error) {
if mock.PutObjectFunc == nil {
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
}
callInfo := struct {
PutObjectInput *s3.PutObjectInput
Bucket string
Object string
R io.Reader
}{
PutObjectInput: putObjectInput,
Bucket: bucket,
Object: object,
R: r,
}
mock.lockPutObject.Lock()
mock.calls.PutObject = append(mock.calls.PutObject, callInfo)
mock.lockPutObject.Unlock()
return mock.PutObjectFunc(putObjectInput)
return mock.PutObjectFunc(bucket, object, r)
}
// PutObjectCalls gets all the calls that were made to PutObject.
@@ -1355,10 +1452,14 @@ func (mock *BackendMock) PutObject(putObjectInput *s3.PutObjectInput) (string, e
//
// len(mockedBackend.PutObjectCalls())
func (mock *BackendMock) PutObjectCalls() []struct {
PutObjectInput *s3.PutObjectInput
Bucket string
Object string
R io.Reader
} {
var calls []struct {
PutObjectInput *s3.PutObjectInput
Bucket string
Object string
R io.Reader
}
mock.lockPutObject.RLock()
calls = mock.calls.PutObject
@@ -1399,7 +1500,7 @@ func (mock *BackendMock) PutObjectAclCalls() []struct {
}
// PutObjectPart calls PutObjectPartFunc.
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
if mock.PutObjectPartFunc == nil {
panic("BackendMock.PutObjectPartFunc: method is nil but Backend.PutObjectPart was just called")
}
@@ -1408,20 +1509,18 @@ func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID st
Object string
UploadID string
Part int
Length int64
R io.Reader
}{
Bucket: bucket,
Object: object,
UploadID: uploadID,
Part: part,
Length: length,
R: r,
}
mock.lockPutObjectPart.Lock()
mock.calls.PutObjectPart = append(mock.calls.PutObjectPart, callInfo)
mock.lockPutObjectPart.Unlock()
return mock.PutObjectPartFunc(bucket, object, uploadID, part, length, r)
return mock.PutObjectPartFunc(bucket, object, uploadID, part, r)
}
// PutObjectPartCalls gets all the calls that were made to PutObjectPart.
@@ -1433,7 +1532,6 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
Object string
UploadID string
Part int
Length int64
R io.Reader
} {
var calls []struct {
@@ -1441,7 +1539,6 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
Object string
UploadID string
Part int
Length int64
R io.Reader
}
mock.lockPutObjectPart.RLock()

View File

@@ -1,17 +1,3 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package backend
import (
@@ -21,7 +7,7 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err"
"github.com/versity/scoutgw/s3err"
)
func TestBackend_ListBuckets(t *testing.T) {

View File

@@ -1,34 +1,11 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package backend
import (
"errors"
"io/fs"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
var (
// RFC3339TimeFormat RFC3339 time format
RFC3339TimeFormat = "2006-01-02T15:04:05.999Z"
)
func IsValidBucketName(name string) bool { return true }
type ByBucketName []types.Bucket
@@ -37,12 +14,6 @@ func (d ByBucketName) Len() int { return len(d) }
func (d ByBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByBucketName) Less(i, j int) bool { return *d[i].Name < *d[j].Name }
type ByObjectName []types.Object
func (d ByObjectName) Len() int { return len(d) }
func (d ByObjectName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByObjectName) Less(i, j int) bool { return *d[i].Key < *d[j].Key }
func GetStringPtr(s string) *string {
return &s
}
@@ -50,36 +21,3 @@ func GetStringPtr(s string) *string {
func GetTimePtr(t time.Time) *time.Time {
return &t
}
func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
if acceptRange == "" {
return 0, file.Size(), nil
}
rangeKv := strings.Split(acceptRange, "=")
if len(rangeKv) < 2 {
return 0, 0, errors.New("invalid range parameter")
}
bRange := strings.Split(rangeKv[1], "-")
if len(bRange) < 2 {
return 0, 0, errors.New("invalid range parameter")
}
startOffset, err := strconv.ParseInt(bRange[0], 10, 64)
if err != nil {
return 0, 0, errors.New("invalid range parameter")
}
endOffset, err := strconv.ParseInt(bRange[1], 10, 64)
if err != nil {
return 0, 0, errors.New("invalid range parameter")
}
if endOffset < startOffset {
return 0, 0, errors.New("invalid range parameter")
}
return int64(startOffset), int64(endOffset - startOffset + 1), nil
}

View File

@@ -1,28 +1,11 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package posix
import (
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"sort"
@@ -34,14 +17,11 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/google/uuid"
"github.com/pkg/xattr"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
"github.com/versity/scoutgw/backend"
"github.com/versity/scoutgw/s3err"
)
type Posix struct {
rootfd *os.File
rootdir string
backend.BackendUnsupported
}
@@ -52,30 +32,13 @@ const (
metaTmpMultipartDir = metaTmpDir + "/multipart"
onameAttr = "user.objname"
tagHdr = "X-Amz-Tagging"
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
dirObjKey = "user.dirisobject"
)
func New(rootdir string) (*Posix, error) {
err := os.Chdir(rootdir)
if err != nil {
return nil, fmt.Errorf("chdir %v: %w", rootdir, err)
}
f, err := os.Open(rootdir)
if err != nil {
return nil, fmt.Errorf("open %v: %w", rootdir, err)
}
return &Posix{rootfd: f, rootdir: rootdir}, nil
}
func (p *Posix) Shutdown() {
p.rootfd.Close()
}
func (p *Posix) Sring() string {
return "Posix Gateway"
}
var (
newObjUID = 0
newObjGID = 0
)
func (p *Posix) ListBuckets() (*s3.ListBucketsOutput, error) {
entries, err := os.ReadDir(".")
@@ -111,7 +74,7 @@ func (p *Posix) ListBuckets() (*s3.ListBucketsOutput, error) {
func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
_, err := os.Lstat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -135,7 +98,7 @@ func (p *Posix) PutBucket(bucket string) error {
func (p *Posix) DeleteBucket(bucket string) error {
names, err := os.ReadDir(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -146,7 +109,7 @@ func (p *Posix) DeleteBucket(bucket string) error {
// if .sgwtmp is only item in directory
// then clean this up before trying to remove the bucket
err = os.RemoveAll(filepath.Join(bucket, metaTmpDir))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("remove temp dir: %w", err)
}
}
@@ -167,7 +130,7 @@ func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.C
object := *mpu.Key
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -215,7 +178,7 @@ func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.C
func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -232,10 +195,8 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
// check all parts ok
last := len(parts) - 1
partsize := int64(0)
var totalsize int64
for i, p := range parts {
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber))
fi, err := os.Lstat(partPath)
fi, err := os.Lstat(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber)))
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
@@ -243,25 +204,17 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
if i == 0 {
partsize = fi.Size()
}
totalsize += fi.Size()
// all parts except the last need to be the same size
if i < last && partsize != fi.Size() {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
}
b, err := xattr.Get(partPath, "user.etag")
etag := string(b)
if err != nil {
etag = ""
}
parts[i].ETag = &etag
}
f, err := openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, totalsize)
f, err := openTmpFile(".")
if err != nil {
return nil, fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
defer f.Close()
for _, p := range parts {
pf, err := os.Open(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber)))
@@ -283,12 +236,15 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
dir := filepath.Dir(objname)
if dir != "" {
if err = mkdirAll(dir, os.FileMode(0755), bucket, object); err != nil {
if err != nil && os.IsExist(err) {
return nil, s3err.GetAPIError(s3err.ErrObjectParentIsFile)
}
if err != nil {
return nil, s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
return nil, fmt.Errorf("make object parent directories: %w", err)
}
}
}
err = f.link()
err = linkTmpFile(f, objname)
if err != nil {
return nil, fmt.Errorf("link object in namespace: %w", err)
}
@@ -312,6 +268,15 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
return nil, fmt.Errorf("set etag attr: %w", err)
}
if newObjUID != 0 || newObjGID != 0 {
err = os.Chown(objname, newObjUID, newObjGID)
if err != nil {
// cleanup object if returning error
os.Remove(objname)
return nil, fmt.Errorf("set object uid/gid: %w", err)
}
}
// cleanup tmp dirs
os.RemoveAll(upiddir)
// use Remove for objdir in case there are still other uploads
@@ -330,7 +295,7 @@ func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte,
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
_, err := os.Stat(filepath.Join(objdir, uploadID))
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return [32]byte{}, s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if err != nil {
@@ -339,7 +304,7 @@ func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte,
return sum, nil
}
func loadUserMetaData(path string, m map[string]string) (contentType, contentEncoding string) {
func loadUserMetaData(path string, m map[string]string) (tag, contentType, contentEncoding string) {
ents, err := xattr.List(path)
if err != nil || len(ents) == 0 {
return
@@ -359,7 +324,16 @@ func loadUserMetaData(path string, m map[string]string) (contentType, contentEnc
m[strings.TrimPrefix(e, "user.")] = string(b)
}
b, err := xattr.Get(path, "user.content-type")
b, err := xattr.Get(path, "user."+tagHdr)
tag = string(b)
if err != nil {
tag = ""
}
if tag != "" {
m[tagHdr] = tag
}
b, err = xattr.Get(path, "user.content-type")
contentType = string(b)
if err != nil {
contentType = ""
@@ -432,6 +406,12 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
}
return s3err.GetAPIError(s3err.ErrObjectParentIsFile)
}
if newObjUID != 0 || newObjGID != 0 {
err = os.Chown(path, newObjUID, newObjGID)
if err != nil {
return fmt.Errorf("set parent ownership: %w", err)
}
}
return nil
}
@@ -463,7 +443,7 @@ func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
uploadID := *mpu.UploadId
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -487,40 +467,24 @@ func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
return nil
}
func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
bucket := *mpu.Bucket
var delimiter string
if mpu.Delimiter != nil {
delimiter = *mpu.Delimiter
}
var prefix string
if mpu.Prefix != nil {
prefix = *mpu.Prefix
}
var lmu s3response.ListMultipartUploadsResponse
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return lmu, s3err.GetAPIError(s3err.ErrNoSuchBucket)
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return lmu, fmt.Errorf("stat bucket: %w", err)
return nil, fmt.Errorf("stat bucket: %w", err)
}
// ignore readdir error and use the empty list returned
objs, _ := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir))
var uploads []s3response.Upload
var uploads []types.MultipartUpload
var keyMarker string
if mpu.KeyMarker != nil {
keyMarker = *mpu.KeyMarker
}
var uploadIDMarker string
if mpu.UploadIdMarker != nil {
uploadIDMarker = *mpu.UploadIdMarker
}
keyMarker := *mpu.KeyMarker
uploadIDMarker := *mpu.UploadIdMarker
var pastMarker bool
if keyMarker == "" && uploadIDMarker == "" {
pastMarker = true
@@ -536,7 +500,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
continue
}
objectName := string(b)
if mpu.Prefix != nil && !strings.HasPrefix(objectName, *mpu.Prefix) {
if !strings.HasPrefix(objectName, *mpu.Prefix) {
continue
}
@@ -562,71 +526,64 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
upiddir := filepath.Join(bucket, metaTmpMultipartDir, obj.Name(), upid.Name())
loadUserMetaData(upiddir, userMetaData)
fi, err := upid.Info()
if err != nil {
return lmu, fmt.Errorf("stat %q: %w", upid.Name(), err)
}
uploadID := upid.Name()
uploads = append(uploads, s3response.Upload{
Key: objectName,
UploadID: uploadID,
Initiated: fi.ModTime().Format(backend.RFC3339TimeFormat),
uploads = append(uploads, types.MultipartUpload{
Key: &objectName,
UploadId: &uploadID,
})
if len(uploads) == int(mpu.MaxUploads) {
return s3response.ListMultipartUploadsResponse{
Bucket: bucket,
Delimiter: delimiter,
return &s3.ListMultipartUploadsOutput{
Bucket: &bucket,
Delimiter: mpu.Delimiter,
IsTruncated: i != len(objs) || j != len(upids),
KeyMarker: keyMarker,
MaxUploads: int(mpu.MaxUploads),
NextKeyMarker: objectName,
NextUploadIDMarker: uploadID,
Prefix: prefix,
UploadIDMarker: uploadIDMarker,
KeyMarker: &keyMarker,
MaxUploads: mpu.MaxUploads,
NextKeyMarker: &objectName,
NextUploadIdMarker: &uploadID,
Prefix: mpu.Prefix,
UploadIdMarker: mpu.UploadIdMarker,
Uploads: uploads,
}, nil
}
}
}
return s3response.ListMultipartUploadsResponse{
Bucket: bucket,
Delimiter: delimiter,
KeyMarker: keyMarker,
MaxUploads: int(mpu.MaxUploads),
Prefix: prefix,
UploadIDMarker: uploadIDMarker,
return &s3.ListMultipartUploadsOutput{
Bucket: &bucket,
Delimiter: mpu.Delimiter,
KeyMarker: &keyMarker,
MaxUploads: mpu.MaxUploads,
Prefix: mpu.Prefix,
UploadIdMarker: mpu.UploadIdMarker,
Uploads: uploads,
}, nil
}
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
var lpr s3response.ListPartsResponse
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return lpr, s3err.GetAPIError(s3err.ErrNoSuchBucket)
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return lpr, fmt.Errorf("stat bucket: %w", err)
return nil, fmt.Errorf("stat bucket: %w", err)
}
sum, err := p.checkUploadIDExists(bucket, object, uploadID)
if err != nil {
return lpr, err
return nil, err
}
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
ents, err := os.ReadDir(filepath.Join(objdir, uploadID))
if errors.Is(err, fs.ErrNotExist) {
return lpr, s3err.GetAPIError(s3err.ErrNoSuchUpload)
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if err != nil {
return lpr, fmt.Errorf("readdir upload: %w", err)
return nil, fmt.Errorf("readdir upload: %w", err)
}
var parts []s3response.Part
var parts []types.Part
for _, e := range ents {
pn, _ := strconv.Atoi(e.Name())
if pn <= partNumberMarker {
@@ -645,10 +602,10 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
continue
}
parts = append(parts, s3response.Part{
PartNumber: pn,
ETag: etag,
LastModified: fi.ModTime().Format(backend.RFC3339TimeFormat),
parts = append(parts, types.Part{
PartNumber: int32(pn),
ETag: &etag,
LastModified: backend.GetTimePtr(fi.ModTime()),
Size: fi.Size(),
})
}
@@ -657,12 +614,12 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
func(i int, j int) bool { return parts[i].PartNumber < parts[j].PartNumber })
oldLen := len(parts)
if maxParts > 0 && len(parts) > maxParts {
if len(parts) > maxParts {
parts = parts[:maxParts]
}
newLen := len(parts)
nextpart := 0
nextpart := int32(0)
if len(parts) != 0 {
nextpart = parts[len(parts)-1].PartNumber
}
@@ -671,15 +628,15 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
upiddir := filepath.Join(objdir, uploadID)
loadUserMetaData(upiddir, userMetaData)
return s3response.ListPartsResponse{
Bucket: bucket,
return &s3.ListPartsOutput{
Bucket: &bucket,
IsTruncated: oldLen != newLen,
Key: object,
MaxParts: maxParts,
NextPartNumberMarker: nextpart,
PartNumberMarker: partNumberMarker,
Key: &object,
MaxParts: int32(maxParts),
NextPartNumberMarker: backend.GetStringPtr(fmt.Sprintf("%v", nextpart)),
PartNumberMarker: backend.GetStringPtr(fmt.Sprintf("%v", partNumberMarker)),
Parts: parts,
UploadID: uploadID,
UploadId: &uploadID,
}, nil
}
@@ -687,25 +644,20 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
// func (p *Posix) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
// }
func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (string, error) {
func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, r io.Reader) (string, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return "", fmt.Errorf("stat bucket: %w", err)
}
sum := sha256.Sum256([]byte(object))
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", part))
f, err := openTmpFile(filepath.Join(bucket, objdir),
bucket, partPath, length)
f, err := openTmpFile(".")
if err != nil {
return "", fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
defer f.Close()
hash := md5.New()
tr := io.TeeReader(r, hash)
@@ -714,87 +666,103 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
return "", fmt.Errorf("write part data: %w", err)
}
err = f.link()
sum := sha256.Sum256([]byte(object))
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", part))
err = linkTmpFile(f, partPath)
if err != nil {
return "", fmt.Errorf("link object in namespace: %w", err)
}
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum)
etag := hex.EncodeToString(dataSum[:])
xattr.Set(partPath, "user.etag", []byte(etag))
return etag, nil
}
func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
_, err := os.Stat(*po.Bucket)
if errors.Is(err, fs.ErrNotExist) {
func (p *Posix) PutObject(bucket, object string, r io.Reader) (string, error) {
_, err := os.Stat(bucket)
if err != nil && os.IsNotExist(err) {
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return "", fmt.Errorf("stat bucket: %w", err)
}
name := filepath.Join(*po.Bucket, *po.Key)
name := filepath.Join(bucket, object)
if strings.HasSuffix(*po.Key, "/") {
etag := ""
if strings.HasSuffix(object, "/") {
// object is directory
err = mkdirAll(name, os.FileMode(0755), *po.Bucket, *po.Key)
err = mkdirAll(name, os.FileMode(0755), bucket, object)
if err != nil {
return "", err
}
for k, v := range po.Metadata {
xattr.Set(name, "user."+k, []byte(v))
}
// TODO: set user attrs
//for k, v := range metadata {
// xattr.Set(name, "user."+k, []byte(v))
//}
// set etag attribute to signify this dir was specifically put
xattr.Set(name, "user.etag", []byte(emptyMD5))
return emptyMD5, nil
}
// object is file
f, err := openTmpFile(filepath.Join(*po.Bucket, metaTmpDir),
*po.Bucket, *po.Key, po.ContentLength)
if err != nil {
return "", fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
hash := md5.New()
rdr := io.TeeReader(po.Body, hash)
_, err = io.Copy(f, rdr)
if err != nil {
return "", fmt.Errorf("write object data: %w", err)
}
dir := filepath.Dir(name)
if dir != "" {
err = mkdirAll(dir, os.FileMode(0755), *po.Bucket, *po.Key)
// set our tag that this dir was specifically put
xattr.Set(name, dirObjKey, nil)
} else {
// object is file
f, err := openTmpFile(".")
if err != nil {
return "", s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
return "", fmt.Errorf("open temp file: %w", err)
}
defer f.Close()
// TODO: fallocate based on content length
hash := md5.New()
rdr := io.TeeReader(r, hash)
_, err = io.Copy(f, rdr)
if err != nil {
f.Close()
return "", fmt.Errorf("write object data: %w", err)
}
dir := filepath.Dir(name)
if dir != "" {
err = mkdirAll(dir, os.FileMode(0755), bucket, object)
if err != nil {
f.Close()
return "", fmt.Errorf("make object parent directories: %w", err)
}
}
err = linkTmpFile(f, name)
if err != nil {
return "", fmt.Errorf("link object in namespace: %w", err)
}
// TODO: set user attrs
//for k, v := range metadata {
// xattr.Set(name, "user."+k, []byte(v))
//}
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum[:])
xattr.Set(name, "user.etag", []byte(etag))
if newObjUID != 0 || newObjGID != 0 {
err = os.Chown(name, newObjUID, newObjGID)
if err != nil {
return "", fmt.Errorf("set object uid/gid: %v", err)
}
}
}
err = f.link()
if err != nil {
return "", fmt.Errorf("link object in namespace: %w", err)
}
for k, v := range po.Metadata {
xattr.Set(name, "user."+k, []byte(v))
}
dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum[:])
xattr.Set(name, "user.etag", []byte(etag))
return etag, nil
}
func (p *Posix) DeleteObject(bucket, object string) error {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -802,7 +770,7 @@ func (p *Posix) DeleteObject(bucket, object string) error {
}
os.Remove(filepath.Join(bucket, object))
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
@@ -827,7 +795,7 @@ func (p *Posix) removeParents(bucket, object string) error {
break
}
_, err := xattr.Get(parent, "user.etag")
_, err := xattr.Get(parent, dirObjKey)
if err == nil {
break
}
@@ -854,9 +822,9 @@ func (p *Posix) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) err
return nil
}
func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
func (p *Posix) GetObject(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -865,25 +833,20 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
objPath := filepath.Join(bucket, object)
fi, err := os.Stat(objPath)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
return nil, fmt.Errorf("stat object: %w", err)
}
startOffset, length, err := backend.ParseRange(fi, acceptRange)
if err != nil {
return nil, err
}
if startOffset+length > fi.Size() {
// TODO: is ErrInvalidRequest correct here?
return nil, s3err.GetAPIError(s3err.ErrInvalidRequest)
}
f, err := os.Open(objPath)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
@@ -899,34 +862,23 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
userMetaData := make(map[string]string)
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
b, err := xattr.Get(objPath, "user.etag")
etag := string(b)
if err != nil {
etag = ""
}
tags, err := p.getXattrTags(bucket, object)
if err != nil {
return nil, fmt.Errorf("get object tags: %w", err)
}
_, contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
// TODO: fill range request header?
// TODO: parse tags for tag count?
return &s3.GetObjectOutput{
AcceptRanges: &acceptRange,
ContentLength: length,
ContentEncoding: &contentEncoding,
ContentType: &contentType,
ETag: &etag,
LastModified: backend.GetTimePtr(fi.ModTime()),
Metadata: userMetaData,
TagCount: int32(len(tags)),
}, nil
}
func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
func (p *Posix) HeadObject(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -935,7 +887,7 @@ func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
objPath := filepath.Join(bucket, object)
fi, err := os.Stat(objPath)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
@@ -943,14 +895,10 @@ func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
}
userMetaData := make(map[string]string)
contentType, contentEncoding := loadUserMetaData(objPath, userMetaData)
b, err := xattr.Get(objPath, "user.etag")
etag := string(b)
if err != nil {
etag = ""
}
_, contentType, contentEncoding := loadUserMetaData(filepath.Join(bucket, objPath), userMetaData)
// TODO: fill accept ranges request header?
// TODO: do we need to get etag from xattr?
return &s3.HeadObjectOutput{
ContentLength: fi.Size(),
ContentType: &contentType,
@@ -963,7 +911,7 @@ func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
_, err := os.Stat(srcBucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -971,7 +919,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
}
_, err = os.Stat(DstBucket)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
@@ -980,7 +928,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
objPath := filepath.Join(srcBucket, srcObject)
f, err := os.Open(objPath)
if errors.Is(err, fs.ErrNotExist) {
if err != nil && os.IsNotExist(err) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
@@ -988,7 +936,7 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
}
defer f.Close()
etag, err := p.PutObject(&s3.PutObjectInput{Bucket: &DstBucket, Key: &dstObject, Body: f})
etag, err := p.PutObject(DstBucket, dstObject, f)
if err != nil {
return nil, err
}
@@ -1006,171 +954,9 @@ func (p *Posix) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*
}, nil
}
func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return nil, fmt.Errorf("stat bucket: %w", err)
}
fileSystem := os.DirFS(bucket)
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
func(path string) (bool, error) {
_, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
if isNoAttr(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}, func(path string) (string, error) {
etag, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
return string(etag), err
}, []string{metaTmpDir})
if err != nil {
return nil, fmt.Errorf("walk %v: %w", bucket, err)
}
return &s3.ListObjectsOutput{
CommonPrefixes: results.CommonPrefixes,
Contents: results.Objects,
Delimiter: &delim,
IsTruncated: results.Truncated,
Marker: &marker,
MaxKeys: int32(maxkeys),
Name: &bucket,
NextMarker: &results.NextMarker,
Prefix: &prefix,
}, nil
func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}
func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return nil, fmt.Errorf("stat bucket: %w", err)
}
fileSystem := os.DirFS(bucket)
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
func(path string) (bool, error) {
_, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
if isNoAttr(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}, func(path string) (string, error) {
etag, err := xattr.Get(filepath.Join(bucket, path), "user.etag")
return string(etag), err
}, []string{metaTmpDir})
if err != nil {
return nil, fmt.Errorf("walk %v: %w", bucket, err)
}
return &s3.ListObjectsV2Output{
CommonPrefixes: results.CommonPrefixes,
Contents: results.Objects,
Delimiter: &delim,
IsTruncated: results.Truncated,
ContinuationToken: &marker,
MaxKeys: int32(maxkeys),
Name: &bucket,
NextContinuationToken: &results.NextMarker,
Prefix: &prefix,
}, nil
}
func (p *Posix) GetTags(bucket, object string) (map[string]string, error) {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return nil, fmt.Errorf("stat bucket: %w", err)
}
return p.getXattrTags(bucket, object)
}
func (p *Posix) getXattrTags(bucket, object string) (map[string]string, error) {
tags := make(map[string]string)
b, err := xattr.Get(filepath.Join(bucket, object), "user."+tagHdr)
if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if isNoAttr(err) {
return tags, nil
}
if err != nil {
return nil, fmt.Errorf("get tags: %w", err)
}
err = json.Unmarshal(b, &tags)
if err != nil {
return nil, fmt.Errorf("unmarshal tags: %w", err)
}
return tags, nil
}
func (p *Posix) SetTags(bucket, object string, tags map[string]string) error {
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return fmt.Errorf("stat bucket: %w", err)
}
if tags == nil {
err = xattr.Remove(filepath.Join(bucket, object), "user."+tagHdr)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
return fmt.Errorf("remove tags: %w", err)
}
return nil
}
b, err := json.Marshal(tags)
if err != nil {
return fmt.Errorf("marshal tags: %w", err)
}
err = xattr.Set(filepath.Join(bucket, object), "user."+tagHdr, b)
if errors.Is(err, fs.ErrNotExist) {
return s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if err != nil {
return fmt.Errorf("set tags: %w", err)
}
return nil
}
func (p *Posix) RemoveTags(bucket, object string) error {
return p.SetTags(bucket, object, nil)
}
func isNoAttr(err error) bool {
if err == nil {
return false
}
xerr, ok := err.(*xattr.Error)
if ok && xerr.Err == xattr.ENOATTR {
return true
}
if err == syscall.ENODATA {
return true
}
return false
func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
}

View File

@@ -1,89 +1,14 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package posix
import (
"crypto/sha256"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
)
type tmpfile struct {
f *os.File
bucket string
objname string
size int64
func openTmpFile(dir string) (*os.File, error) {
return nil, fmt.Errorf("not implemented")
}
func openTmpFile(dir, bucket, obj string, size int64) (*tmpfile, error) {
// Create a temp file for upload while in progress (see link comments below).
err := os.MkdirAll(dir, 0700)
if err != nil {
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
return nil, err
}
return &tmpfile{f: f, bucket: bucket, objname: obj, size: size}, nil
}
func (tmp *tmpfile) link() error {
tempname := tmp.f.Name()
// cleanup in case anything goes wrong, if rename succeeds then
// this will no longer exist
defer os.Remove(tempname)
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
objPath := filepath.Join(tmp.bucket, tmp.objname)
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
err = tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
err = os.Rename(tempname, objPath)
if err != nil {
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) Write(b []byte) (int, error) {
if int64(len(b)) > tmp.size {
return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
}
n, err := tmp.f.Write(b)
tmp.size -= int64(n)
return n, err
}
func (tmp *tmpfile) cleanup() {
tmp.f.Close()
func linkTmpFile(f *os.File, path string) error {
return fmt.Errorf("not implemented")
}

View File

@@ -1,105 +1,31 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package posix
import (
"crypto/sha256"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"strconv"
"syscall"
"golang.org/x/sys/unix"
)
const procfddir = "/proc/self/fd"
type tmpfile struct {
f *os.File
bucket string
objname string
isOTmp bool
size int64
}
func openTmpFile(dir, bucket, obj string, size int64) (*tmpfile, error) {
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
// This can help reduce contention within the namespace (parent directories),
// etc. And will auto cleanup the inode on close if we never link this
// file descriptor into the namespace.
// Not all filesystems support this, so fallback to CreateTemp for when
// this is not supported.
func openTmpFile(dir string) (*os.File, error) {
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, 0666)
if err != nil {
// O_TMPFILE not supported, try fallback
err := os.MkdirAll(dir, 0700)
if err != nil {
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
return nil, err
}
tmp := &tmpfile{f: f, bucket: bucket, objname: obj, size: size}
// falloc is best effort, its fine if this fails
if size > 0 {
tmp.falloc()
}
return tmp, nil
return nil, err
}
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
// later to link file into namespace
f := os.NewFile(uintptr(fd), filepath.Join(procfddir, strconv.Itoa(fd)))
tmp := &tmpfile{f: f, isOTmp: true, size: size}
// falloc is best effort, its fine if this fails
if size > 0 {
tmp.falloc()
}
return tmp, nil
return os.NewFile(uintptr(fd), filepath.Join(procfddir, strconv.Itoa(fd))), nil
}
func (tmp *tmpfile) falloc() error {
err := syscall.Fallocate(int(tmp.f.Fd()), 0, 0, tmp.size)
if err != nil {
return fmt.Errorf("fallocate: %v", err)
}
return nil
}
func (tmp *tmpfile) link() error {
// We use Linkat/Rename as the atomic operation for object puts. The
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
// with any other simultaneous uploads. The final operation is to move the
// temp file into place for the object. This ensures the object semantics
// of last upload completed wins and is not some combination of writes
// from simultaneous uploads.
objPath := filepath.Join(tmp.bucket, tmp.objname)
err := os.Remove(objPath)
func linkTmpFile(f *os.File, path string) error {
err := os.Remove(path)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
if !tmp.isOTmp {
// O_TMPFILE not suported, use fallback
return tmp.fallbackLink()
return fmt.Errorf("remove stale part: %w", err)
}
procdir, err := os.Open(procfddir)
@@ -108,56 +34,17 @@ func (tmp *tmpfile) link() error {
}
defer procdir.Close()
dir, err := os.Open(filepath.Dir(objPath))
dir, err := os.Open(filepath.Dir(path))
if err != nil {
return fmt.Errorf("open parent dir: %w", err)
}
defer dir.Close()
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dir.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
err = unix.Linkat(int(procdir.Fd()), filepath.Base(f.Name()),
int(dir.Fd()), filepath.Base(path), unix.AT_SYMLINK_FOLLOW)
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
err = tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) fallbackLink() error {
tempname := tmp.f.Name()
// cleanup in case anything goes wrong, if rename succeeds then
// this will no longer exist
defer os.Remove(tempname)
err := tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
objPath := filepath.Join(tmp.bucket, tmp.objname)
err = os.Rename(tempname, objPath)
if err != nil {
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) Write(b []byte) (int, error) {
if int64(len(b)) > tmp.size {
return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
}
n, err := tmp.f.Write(b)
tmp.size -= int64(n)
return n, err
}
func (tmp *tmpfile) cleanup() {
tmp.f.Close()
}

View File

@@ -1,89 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package posix
import (
"crypto/sha256"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
)
type tmpfile struct {
f *os.File
bucket string
objname string
size int64
}
func openTmpFile(dir, bucket, obj string, size int64) (*tmpfile, error) {
// Create a temp file for upload while in progress (see link comments below).
err := os.MkdirAll(dir, 0700)
if err != nil {
return nil, fmt.Errorf("make temp dir: %w", err)
}
f, err := os.CreateTemp(dir,
fmt.Sprintf("%x.", sha256.Sum256([]byte(obj))))
if err != nil {
return nil, err
}
return &tmpfile{f: f, bucket: bucket, objname: obj, size: size}, nil
}
func (tmp *tmpfile) link() error {
tempname := tmp.f.Name()
// cleanup in case anything goes wrong, if rename succeeds then
// this will no longer exist
defer os.Remove(tempname)
// We use Rename as the atomic operation for object puts. The upload is
// written to a temp file to not conflict with any other simultaneous
// uploads. The final operation is to move the temp file into place for
// the object. This ensures the object semantics of last upload completed
// wins and is not some combination of writes from simultaneous uploads.
objPath := filepath.Join(tmp.bucket, tmp.objname)
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
err = tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
err = os.Rename(tempname, objPath)
if err != nil {
return fmt.Errorf("rename tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) Write(b []byte) (int, error) {
if int64(len(b)) > tmp.size {
return 0, fmt.Errorf("write exceeds content length")
}
n, err := tmp.f.Write(b)
tmp.size -= int64(n)
return n, err
}
func (tmp *tmpfile) cleanup() {
tmp.f.Close()
}

View File

@@ -1,22 +1,8 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package scoutfs
import (
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/posix"
"github.com/versity/scoutgw/backend"
"github.com/versity/scoutgw/backend/posix"
)
type ScoutFS struct {

View File

@@ -1,242 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package backend
import (
"fmt"
"io/fs"
"os"
"sort"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
type WalkResults struct {
CommonPrefixes []types.CommonPrefix
Objects []types.Object
Truncated bool
NextMarker string
}
type DirObjCheck func(path string) (bool, error)
type GetETag func(path string) (string, error)
// Walk walks the supplied fs.FS and returns results compatible with list
// objects responses
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, dirchk DirObjCheck, getetag GetETag, skipdirs []string) (WalkResults, error) {
cpmap := make(map[string]struct{})
var objects []types.Object
var pastMarker bool
if marker == "" {
pastMarker = true
}
var pastMax bool
var newMarker string
var truncated bool
err := fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if pastMax {
newMarker = path
truncated = true
return fs.SkipAll
}
if d.IsDir() {
// Ignore the root directory
if path == "." {
return nil
}
if contains(d.Name(), skipdirs) {
return fs.SkipDir
}
// If prefix is defined and the directory does not match prefix,
// do not descend into the directory because nothing will
// match this prefix. Make sure to append the / at the end of
// directories since this is implied as a directory path name.
// If path is a prefix of prefix, then path could still be
// building to match. So only skip if path isnt a prefix of prefix
// and prefix isnt a prefix of path.
if prefix != "" &&
!strings.HasPrefix(path+string(os.PathSeparator), prefix) &&
!strings.HasPrefix(prefix, path+string(os.PathSeparator)) {
return fs.SkipDir
}
// TODO: can we do better here rather than a second readdir
// per directory?
ents, err := fs.ReadDir(fileSystem, path)
if err != nil {
return fmt.Errorf("readdir %q: %w", path, err)
}
if len(ents) == 0 {
dirobj, err := dirchk(path)
if err != nil {
return fmt.Errorf("directory object check %q: %w", path, err)
}
if dirobj {
fi, err := d.Info()
if err != nil {
return fmt.Errorf("dir info %q: %w", path, err)
}
etag, err := getetag(path)
if err != nil {
return fmt.Errorf("get etag %q: %w", path, err)
}
path := path + "/"
objects = append(objects, types.Object{
ETag: &etag,
Key: &path,
LastModified: GetTimePtr(fi.ModTime()),
})
}
}
return nil
}
if !pastMarker {
if path != marker {
return nil
}
pastMarker = true
}
// If object doesnt have prefix, dont include in results.
if prefix != "" && !strings.HasPrefix(path, prefix) {
return nil
}
if delimiter == "" {
// If no delimeter specified, then all files with matching
// prefix are included in results
fi, err := d.Info()
if err != nil {
return fmt.Errorf("get info for %v: %w", path, err)
}
etag, err := getetag(path)
if err != nil {
return fmt.Errorf("get etag %q: %w", path, err)
}
objects = append(objects, types.Object{
ETag: &etag,
Key: &path,
LastModified: GetTimePtr(fi.ModTime()),
Size: fi.Size(),
})
if max > 0 && (len(objects)+len(cpmap)) == max {
pastMax = true
}
return nil
}
// Since delimiter is specified, we only want results that
// do not contain the delimiter beyond the prefix. If the
// delimiter exists past the prefix, then the substring
// between the prefix and delimiter is part of common prefixes.
//
// For example:
// prefix = A/
// delimeter = /
// and objects:
// A/file
// A/B/file
// B/C
// would return:
// objects: A/file
// common prefix: A/B/
//
// Note: No obects are included past the common prefix since
// these are all rolled up into the common prefix.
// Note: The delimeter can be anything, so we have to operate on
// the full path without any assumptions on posix directory heirarchy
// here. Usually the delimeter will be "/", but thats not required.
suffix := strings.TrimPrefix(path, prefix)
before, _, found := strings.Cut(suffix, delimiter)
if !found {
fi, err := d.Info()
if err != nil {
return fmt.Errorf("get info for %v: %w", path, err)
}
etag, err := getetag(path)
if err != nil {
return fmt.Errorf("get etag %q: %w", path, err)
}
objects = append(objects, types.Object{
ETag: &etag,
Key: &path,
LastModified: GetTimePtr(fi.ModTime()),
Size: fi.Size(),
})
if (len(objects) + len(cpmap)) == max {
pastMax = true
}
return nil
}
// Common prefixes are a set, so should not have duplicates.
// These are abstractly a "directory", so need to include the
// delimeter at the end.
cpmap[prefix+before+delimiter] = struct{}{}
if (len(objects) + len(cpmap)) == max {
pastMax = true
}
return nil
})
if err != nil {
return WalkResults{}, err
}
var commonPrefixStrings []string
for k := range cpmap {
commonPrefixStrings = append(commonPrefixStrings, k)
}
sort.Strings(commonPrefixStrings)
commonPrefixes := make([]types.CommonPrefix, 0, len(commonPrefixStrings))
for _, cp := range commonPrefixStrings {
pfx := cp
commonPrefixes = append(commonPrefixes, types.CommonPrefix{
Prefix: &pfx,
})
}
return WalkResults{
CommonPrefixes: commonPrefixes,
Objects: objects,
Truncated: truncated,
NextMarker: newMarker,
}, nil
}
func contains(a string, strs []string) bool {
for _, s := range strs {
if s == a {
return true
}
}
return false
}

View File

@@ -1,167 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package backend_test
import (
"io/fs"
"testing"
"testing/fstest"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
)
type walkTest struct {
fsys fs.FS
expected backend.WalkResults
dc backend.DirObjCheck
}
func gettag(string) (string, error) { return "myetag", nil }
func TestWalk(t *testing.T) {
tests := []walkTest{
{
// test case from
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html
fsys: fstest.MapFS{
"sample.jpg": {},
"photos/2006/January/sample.jpg": {},
"photos/2006/February/sample2.jpg": {},
"photos/2006/February/sample3.jpg": {},
"photos/2006/February/sample4.jpg": {},
},
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetStringPtr("photos/"),
}},
Objects: []types.Object{{
Key: backend.GetStringPtr("sample.jpg"),
}},
},
dc: func(string) (bool, error) { return false, nil },
},
{
// test case single dir/single file
fsys: fstest.MapFS{
"test/file": {},
},
expected: backend.WalkResults{
CommonPrefixes: []types.CommonPrefix{{
Prefix: backend.GetStringPtr("test/"),
}},
Objects: []types.Object{},
},
dc: func(string) (bool, error) { return true, nil },
},
}
for _, tt := range tests {
res, err := backend.Walk(tt.fsys, "", "/", "", 1000, tt.dc, gettag, []string{})
if err != nil {
t.Fatalf("walk: %v", err)
}
compareResults(res, tt.expected, t)
}
}
func compareResults(got, wanted backend.WalkResults, t *testing.T) {
if !compareCommonPrefix(got.CommonPrefixes, wanted.CommonPrefixes) {
t.Errorf("unexpected common prefix, got %v wanted %v",
printCommonPrefixes(got.CommonPrefixes),
printCommonPrefixes(wanted.CommonPrefixes))
}
if !compareObjects(got.Objects, wanted.Objects) {
t.Errorf("unexpected object, got %v wanted %v",
printObjects(got.Objects),
printObjects(wanted.Objects))
}
}
func compareCommonPrefix(a, b []types.CommonPrefix) bool {
if len(a) == 0 && len(b) == 0 {
return true
}
if len(a) != len(b) {
return false
}
for _, cp := range a {
if containsCommonPrefix(cp, b) {
return true
}
}
return false
}
func containsCommonPrefix(c types.CommonPrefix, list []types.CommonPrefix) bool {
for _, cp := range list {
if *c.Prefix == *cp.Prefix {
return true
}
}
return false
}
func printCommonPrefixes(list []types.CommonPrefix) string {
res := "["
for _, cp := range list {
if res == "[" {
res = res + *cp.Prefix
} else {
res = res + ", " + *cp.Prefix
}
}
return res + "]"
}
func compareObjects(a, b []types.Object) bool {
if len(a) == 0 && len(b) == 0 {
return true
}
if len(a) != len(b) {
return false
}
for _, cp := range a {
if containsObject(cp, b) {
return true
}
}
return false
}
func containsObject(c types.Object, list []types.Object) bool {
for _, cp := range list {
if *c.Key == *cp.Key {
return true
}
}
return false
}
func printObjects(list []types.Object) string {
res := "["
for _, cp := range list {
if res == "[" {
res = res + *cp.Key
} else {
res = res + ", " + *cp.Key
}
}
return res + "]"
}

18
cmd/scoutgw/main.go Normal file
View File

@@ -0,0 +1,18 @@
package main
import (
"github.com/gofiber/fiber/v2"
"github.com/versity/scoutgw/backend"
"github.com/versity/scoutgw/s3api"
"log"
)
func main() {
app := fiber.New(fiber.Config{})
back := backend.New()
if api, err := s3api.New(app, back, ":7070"); err != nil {
log.Fatalln(err)
} else if err = api.Serve(); err != nil {
log.Fatalln(err)
}
}

View File

@@ -1,145 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/urfave/cli/v2"
)
var (
adminAccess string
adminSecret string
adminRegion string
)
func adminCommand() *cli.Command {
return &cli.Command{
Name: "admin",
Usage: "admin CLI tool",
Description: `admin CLI tool for interacting with admin api.
Here is the available api list:
create-user
`,
Subcommands: []*cli.Command{
{
Name: "create-user",
Usage: "Create a new user",
Action: createUser,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "access",
Usage: "access value for the new user",
Required: true,
Aliases: []string{"a"},
},
&cli.StringFlag{
Name: "secret",
Usage: "secret value for the new user",
Required: true,
Aliases: []string{"s"},
},
&cli.StringFlag{
Name: "role",
Usage: "role for the new user",
Required: true,
Aliases: []string{"r"},
},
&cli.StringFlag{
Name: "region",
Usage: "s3 region string for the user",
Value: "us-east-1",
Aliases: []string{"rg"},
},
},
},
},
Flags: []cli.Flag{
// TODO: create a configuration file for this
&cli.StringFlag{
Name: "adminAccess",
Usage: "admin access account",
EnvVars: []string{"ADMIN_ACCESS_KEY_ID", "ADMIN_ACCESS_KEY"},
Aliases: []string{"aa"},
Destination: &adminAccess,
},
&cli.StringFlag{
Name: "adminSecret",
Usage: "admin secret access key",
EnvVars: []string{"ADMIN_SECRET_ACCESS_KEY", "ADMIN_SECRET_KEY"},
Aliases: []string{"as"},
Destination: &adminSecret,
},
&cli.StringFlag{
Name: "adminRegion",
Usage: "s3 region string",
Value: "us-east-1",
Destination: &adminRegion,
Aliases: []string{"ar"},
},
},
}
}
func createUser(ctx *cli.Context) error {
access, secret, role, region := ctx.String("access"), ctx.String("secret"), ctx.String("role"), ctx.String("region")
if access == "" || secret == "" || region == "" {
return fmt.Errorf("invalid input parameters for the new user")
}
if role != "admin" && role != "user" {
return fmt.Errorf("invalid input parameter for role")
}
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://localhost:7070/create-user?access=%v&secret=%v&role=%v&region=%v", access, secret, role, region), nil)
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
signer := v4.NewSigner()
hashedPayload := sha256.Sum256([]byte{})
hexPayload := hex.EncodeToString(hashedPayload[:])
req.Header.Set("X-Amz-Content-Sha256", hexPayload)
signErr := signer.SignHTTP(req.Context(), aws.Credentials{AccessKeyID: adminAccess, SecretAccessKey: adminSecret}, req, hexPayload, "s3", adminRegion, time.Now())
if signErr != nil {
return fmt.Errorf("failed to sign the request: %w", err)
}
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to send the request: %w", err)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
fmt.Printf("%s", body)
return nil
}

View File

@@ -1,173 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"crypto/tls"
"fmt"
"log"
"os"
"github.com/gofiber/fiber/v2"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/auth"
"github.com/versity/versitygw/s3api"
"github.com/versity/versitygw/s3api/middlewares"
)
var (
port string
rootUserAccess string
rootUserSecret string
region string
certFile, keyFile string
debug bool
)
var (
// Version is the latest tag (set within Makefile)
Version = "git"
// Build is the commit hash (set within Makefile)
Build = "norev"
// BuildTime is the date/time of build (set within Makefile)
BuildTime = "none"
)
func main() {
app := initApp()
app.Commands = []*cli.Command{
posixCommand(),
adminCommand(),
}
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
}
}
func initApp() *cli.App {
return &cli.App{
Name: "versitygw",
Usage: "Start S3 gateway service with specified backend storage.",
Description: `The S3 gateway is an S3 protocol translator that allows an S3 client
to access the supported backend storage as if it was a native S3 service.`,
Action: func(ctx *cli.Context) error {
return ctx.App.Command("help").Run(ctx)
},
Flags: initFlags(),
}
}
func initFlags() []cli.Flag {
return []cli.Flag{
&cli.BoolFlag{
Name: "version",
Usage: "list versitygw version",
Aliases: []string{"v"},
Action: func(*cli.Context, bool) error {
fmt.Println("Version :", Version)
fmt.Println("Build :", Build)
fmt.Println("BuildTime:", BuildTime)
os.Exit(0)
return nil
},
},
&cli.StringFlag{
Name: "port",
Usage: "gateway listen address <ip>:<port> or :<port>",
Value: ":7070",
Destination: &port,
Aliases: []string{"p"},
},
&cli.StringFlag{
Name: "access",
Usage: "root user access key",
EnvVars: []string{"ROOT_ACCESS_KEY_ID", "ROOT_ACCESS_KEY"},
Aliases: []string{"a"},
Destination: &rootUserAccess,
},
&cli.StringFlag{
Name: "secret",
Usage: "root user secret access key",
EnvVars: []string{"ROOT_SECRET_ACCESS_KEY", "ROOT_SECRET_KEY"},
Aliases: []string{"s"},
Destination: &rootUserSecret,
},
&cli.StringFlag{
Name: "region",
Usage: "s3 region string",
Value: "us-east-1",
Destination: &region,
Aliases: []string{"r"},
},
&cli.StringFlag{
Name: "cert",
Usage: "TLS cert file",
Destination: &certFile,
},
&cli.StringFlag{
Name: "key",
Usage: "TLS key file",
Destination: &keyFile,
},
&cli.BoolFlag{
Name: "debug",
Usage: "enable debug output",
Destination: &debug,
},
}
}
func runGateway(be backend.Backend) error {
app := fiber.New(fiber.Config{
AppName: "versitygw",
ServerHeader: "VERSITYGW",
BodyLimit: 5 * 1024 * 1024 * 1024,
})
var opts []s3api.Option
if certFile != "" || keyFile != "" {
if certFile == "" {
return fmt.Errorf("TLS key specified without cert file")
}
if keyFile == "" {
return fmt.Errorf("TLS cert specified without key file")
}
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return fmt.Errorf("tls: load certs: %v", err)
}
opts = append(opts, s3api.WithTLS(cert))
}
if debug {
opts = append(opts, s3api.WithDebug())
}
srv, err := s3api.New(app, be, middlewares.RootUserConfig{
Access: rootUserAccess,
Secret: rootUserSecret,
Region: region,
}, port, auth.New(), opts...)
if err != nil {
return fmt.Errorf("init gateway: %v", err)
}
return srv.Serve()
}

View File

@@ -1,53 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"fmt"
"github.com/urfave/cli/v2"
"github.com/versity/versitygw/backend/posix"
)
func posixCommand() *cli.Command {
return &cli.Command{
Name: "posix",
Usage: "posix filesystem storage backend",
Description: `Any posix filesystem that supports extended attributes. The top level
directory for the gateway must be provided. All sub directories of the
top level directory are treated as buckets, and all files/directories
below the "bucket directory" are treated as the objects. The object
name is split on "/" separator to translate to posix storage.
For example:
top level: /mnt/fs/gwroot
bucket: mybucket
object: a/b/c/myobject
will be translated into the file /mnt/fs/gwroot/mybucket/a/b/c/myobject`,
Action: runPosix,
}
}
func runPosix(ctx *cli.Context) error {
if ctx.NArg() == 0 {
return fmt.Errorf("no directory provided for operation")
}
be, err := posix.New(ctx.Args().Get(0))
if err != nil {
return fmt.Errorf("init posix: %v", err)
}
return runGateway(be)
}

14
go.mod
View File

@@ -1,16 +1,14 @@
module github.com/versity/versitygw
module github.com/versity/scoutgw
go 1.20
require (
github.com/aws/aws-sdk-go-v2 v1.18.0
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1
github.com/aws/smithy-go v1.13.5
github.com/gofiber/fiber/v2 v2.46.0
github.com/gofiber/fiber/v2 v2.45.0
github.com/valyala/fasthttp v1.47.0
github.com/google/uuid v1.3.0
github.com/pkg/xattr v0.4.9
github.com/urfave/cli/v2 v2.25.4
github.com/valyala/fasthttp v1.47.0
golang.org/x/sys v0.8.0
)
@@ -24,18 +22,16 @@ require (
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 // indirect
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee // indirect
github.com/tinylib/msgp v1.1.8 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
)

16
go.sum
View File

@@ -22,11 +22,9 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1 h1:O+9nAy9Bb6bJFTpeNFtd9UfHbgxO1
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1/go.mod h1:J9kLNzEiHSeGMyN7238EjJmBpCniVzFda75Gxl/NqB8=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gofiber/fiber/v2 v2.46.0 h1:wkkWotblsGVlLjXj2dpgKQAYHtXumsK/HyFugQM68Ns=
github.com/gofiber/fiber/v2 v2.46.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc=
github.com/gofiber/fiber/v2 v2.45.0 h1:p4RpkJT9GAW6parBSbcNFH2ApnAuW3OzaQzbOCoDu+s=
github.com/gofiber/fiber/v2 v2.45.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
@@ -38,8 +36,8 @@ github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@@ -51,8 +49,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8=
github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4=
@@ -62,16 +58,12 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
github.com/urfave/cli/v2 v2.25.4 h1:HyYwPrTO3im9rYhUff/ZNs78eolxt0nJ4LN+9yJKSH4=
github.com/urfave/cli/v2 v2.25.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.47.0 h1:y7moDoxYzMooFpT5aHgNgVOQDrS3qlkfiP9mDtGGK9c=
github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=

View File

@@ -1,49 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package controllers
import (
"fmt"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/backend/auth"
)
type AdminController struct {
IAMService auth.IAMService
}
func NewAdminController() AdminController {
return AdminController{IAMService: auth.New()}
}
func (c AdminController) CreateUser(ctx *fiber.Ctx) error {
access, secret, role, region := ctx.Query("access"), ctx.Query("secret"), ctx.Query("role"), ctx.Query("region")
requesterRole := ctx.Locals("role")
if requesterRole != "admin" {
return fmt.Errorf("access denied: only admin users have access to this resource")
}
user := auth.Account{Secret: secret, Role: role, Region: region}
err := c.IAMService.CreateAccount(access, &user)
if err != nil {
return fmt.Errorf("failed to create a user: %w", err)
}
ctx.SendString("The user has been created successfully")
return nil
}

View File

@@ -4,23 +4,23 @@
package controllers
import (
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3response"
"io"
"sync"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/scoutgw/backend"
)
// Ensure, that BackendMock does implement backend.Backend.
// Ensure, that BackendMock does implement Backend.
// If this is not the case, regenerate this file with moq.
var _ backend.Backend = &BackendMock{}
// BackendMock is a mock implementation of backend.Backend.
// BackendMock is a mock implementation of Backend.
//
// func TestSomethingThatUsesBackend(t *testing.T) {
//
// // make and configure a mocked backend.Backend
// // make and configure a mocked Backend
// mockedBackend := &BackendMock{
// AbortMultipartUploadFunc: func(abortMultipartUploadInput *s3.AbortMultipartUploadInput) error {
// panic("mock out the AbortMultipartUpload method")
@@ -49,7 +49,10 @@ var _ backend.Backend = &BackendMock{}
// GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) {
// panic("mock out the GetBucketAcl method")
// },
// GetObjectFunc: func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
// GetIAMConfigFunc: func() ([]byte, error) {
// panic("mock out the GetIAMConfig method")
// },
// GetObjectFunc: func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
// panic("mock out the GetObject method")
// },
// GetObjectAclFunc: func(bucket string, object string) (*s3.GetObjectAclOutput, error) {
@@ -64,22 +67,25 @@ var _ backend.Backend = &BackendMock{}
// HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
// panic("mock out the HeadBucket method")
// },
// HeadObjectFunc: func(bucket string, object string) (*s3.HeadObjectOutput, error) {
// HeadObjectFunc: func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
// panic("mock out the HeadObject method")
// },
// IsTaggingSupportedFunc: func() bool {
// panic("mock out the IsTaggingSupported method")
// },
// ListBucketsFunc: func() (*s3.ListBucketsOutput, error) {
// panic("mock out the ListBuckets method")
// },
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
// panic("mock out the ListMultipartUploads method")
// },
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
// panic("mock out the ListObjectParts method")
// },
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
// panic("mock out the ListObjects method")
// },
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
// ListObjectsV2Func: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
// panic("mock out the ListObjectsV2 method")
// },
// PutBucketFunc: func(bucket string) error {
@@ -88,13 +94,13 @@ var _ backend.Backend = &BackendMock{}
// PutBucketAclFunc: func(putBucketAclInput *s3.PutBucketAclInput) error {
// panic("mock out the PutBucketAcl method")
// },
// PutObjectFunc: func(putObjectInput *s3.PutObjectInput) (string, error) {
// PutObjectFunc: func(bucket string, object string, r io.Reader) (string, error) {
// panic("mock out the PutObject method")
// },
// PutObjectAclFunc: func(putObjectAclInput *s3.PutObjectAclInput) error {
// panic("mock out the PutObjectAcl method")
// },
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
// PutObjectPartFunc: func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
// panic("mock out the PutObjectPart method")
// },
// RemoveTagsFunc: func(bucket string, object string) error {
@@ -120,7 +126,7 @@ var _ backend.Backend = &BackendMock{}
// },
// }
//
// // use mockedBackend in code that requires backend.Backend
// // use mockedBackend in code that requires Backend
// // and then make assertions.
//
// }
@@ -152,8 +158,11 @@ type BackendMock struct {
// GetBucketAclFunc mocks the GetBucketAcl method.
GetBucketAclFunc func(bucket string) (*s3.GetBucketAclOutput, error)
// GetIAMConfigFunc mocks the GetIAMConfig method.
GetIAMConfigFunc func() ([]byte, error)
// GetObjectFunc mocks the GetObject method.
GetObjectFunc func(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
GetObjectFunc func(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error)
// GetObjectAclFunc mocks the GetObjectAcl method.
GetObjectAclFunc func(bucket string, object string) (*s3.GetObjectAclOutput, error)
@@ -168,22 +177,25 @@ type BackendMock struct {
HeadBucketFunc func(bucket string) (*s3.HeadBucketOutput, error)
// HeadObjectFunc mocks the HeadObject method.
HeadObjectFunc func(bucket string, object string) (*s3.HeadObjectOutput, error)
HeadObjectFunc func(bucket string, object string, etag string) (*s3.HeadObjectOutput, error)
// IsTaggingSupportedFunc mocks the IsTaggingSupported method.
IsTaggingSupportedFunc func() bool
// ListBucketsFunc mocks the ListBuckets method.
ListBucketsFunc func() (*s3.ListBucketsOutput, error)
// ListMultipartUploadsFunc mocks the ListMultipartUploads method.
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
// ListObjectPartsFunc mocks the ListObjectParts method.
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error)
// ListObjectsFunc mocks the ListObjects method.
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
// ListObjectsV2Func mocks the ListObjectsV2 method.
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
ListObjectsV2Func func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error)
// PutBucketFunc mocks the PutBucket method.
PutBucketFunc func(bucket string) error
@@ -192,13 +204,13 @@ type BackendMock struct {
PutBucketAclFunc func(putBucketAclInput *s3.PutBucketAclInput) error
// PutObjectFunc mocks the PutObject method.
PutObjectFunc func(putObjectInput *s3.PutObjectInput) (string, error)
PutObjectFunc func(bucket string, object string, r io.Reader) (string, error)
// PutObjectAclFunc mocks the PutObjectAcl method.
PutObjectAclFunc func(putObjectAclInput *s3.PutObjectAclInput) error
// PutObjectPartFunc mocks the PutObjectPart method.
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error)
PutObjectPartFunc func(bucket string, object string, uploadID string, part int, r io.Reader) (string, error)
// RemoveTagsFunc mocks the RemoveTags method.
RemoveTagsFunc func(bucket string, object string) error
@@ -294,16 +306,23 @@ type BackendMock struct {
// Bucket is the bucket argument value.
Bucket string
}
// GetIAMConfig holds details about calls to the GetIAMConfig method.
GetIAMConfig []struct {
}
// GetObject holds details about calls to the GetObject method.
GetObject []struct {
// Bucket is the bucket argument value.
Bucket string
// Object is the object argument value.
Object string
// AcceptRange is the acceptRange argument value.
AcceptRange string
// StartOffset is the startOffset argument value.
StartOffset int64
// Length is the length argument value.
Length int64
// Writer is the writer argument value.
Writer io.Writer
// Etag is the etag argument value.
Etag string
}
// GetObjectAcl holds details about calls to the GetObjectAcl method.
GetObjectAcl []struct {
@@ -339,6 +358,11 @@ type BackendMock struct {
Bucket string
// Object is the object argument value.
Object string
// Etag is the etag argument value.
Etag string
}
// IsTaggingSupported holds details about calls to the IsTaggingSupported method.
IsTaggingSupported []struct {
}
// ListBuckets holds details about calls to the ListBuckets method.
ListBuckets []struct {
@@ -399,8 +423,12 @@ type BackendMock struct {
}
// PutObject holds details about calls to the PutObject method.
PutObject []struct {
// PutObjectInput is the putObjectInput argument value.
PutObjectInput *s3.PutObjectInput
// Bucket is the bucket argument value.
Bucket string
// Object is the object argument value.
Object string
// R is the r argument value.
R io.Reader
}
// PutObjectAcl holds details about calls to the PutObjectAcl method.
PutObjectAcl []struct {
@@ -417,8 +445,6 @@ type BackendMock struct {
UploadID string
// Part is the part argument value.
Part int
// Length is the length argument value.
Length int64
// R is the r argument value.
R io.Reader
}
@@ -479,12 +505,14 @@ type BackendMock struct {
lockDeleteObject sync.RWMutex
lockDeleteObjects sync.RWMutex
lockGetBucketAcl sync.RWMutex
lockGetIAMConfig sync.RWMutex
lockGetObject sync.RWMutex
lockGetObjectAcl sync.RWMutex
lockGetObjectAttributes sync.RWMutex
lockGetTags sync.RWMutex
lockHeadBucket sync.RWMutex
lockHeadObject sync.RWMutex
lockIsTaggingSupported sync.RWMutex
lockListBuckets sync.RWMutex
lockListMultipartUploads sync.RWMutex
lockListObjectParts sync.RWMutex
@@ -844,26 +872,57 @@ func (mock *BackendMock) GetBucketAclCalls() []struct {
return calls
}
// GetIAMConfig calls GetIAMConfigFunc.
func (mock *BackendMock) GetIAMConfig() ([]byte, error) {
if mock.GetIAMConfigFunc == nil {
panic("BackendMock.GetIAMConfigFunc: method is nil but Backend.GetIAMConfig was just called")
}
callInfo := struct {
}{}
mock.lockGetIAMConfig.Lock()
mock.calls.GetIAMConfig = append(mock.calls.GetIAMConfig, callInfo)
mock.lockGetIAMConfig.Unlock()
return mock.GetIAMConfigFunc()
}
// GetIAMConfigCalls gets all the calls that were made to GetIAMConfig.
// Check the length with:
//
// len(mockedBackend.GetIAMConfigCalls())
func (mock *BackendMock) GetIAMConfigCalls() []struct {
} {
var calls []struct {
}
mock.lockGetIAMConfig.RLock()
calls = mock.calls.GetIAMConfig
mock.lockGetIAMConfig.RUnlock()
return calls
}
// GetObject calls GetObjectFunc.
func (mock *BackendMock) GetObject(bucket string, object string, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
func (mock *BackendMock) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
if mock.GetObjectFunc == nil {
panic("BackendMock.GetObjectFunc: method is nil but Backend.GetObject was just called")
}
callInfo := struct {
Bucket string
Object string
AcceptRange string
StartOffset int64
Length int64
Writer io.Writer
Etag string
}{
Bucket: bucket,
Object: object,
AcceptRange: acceptRange,
StartOffset: startOffset,
Length: length,
Writer: writer,
Etag: etag,
}
mock.lockGetObject.Lock()
mock.calls.GetObject = append(mock.calls.GetObject, callInfo)
mock.lockGetObject.Unlock()
return mock.GetObjectFunc(bucket, object, acceptRange, writer)
return mock.GetObjectFunc(bucket, object, startOffset, length, writer, etag)
}
// GetObjectCalls gets all the calls that were made to GetObject.
@@ -873,14 +932,18 @@ func (mock *BackendMock) GetObject(bucket string, object string, acceptRange str
func (mock *BackendMock) GetObjectCalls() []struct {
Bucket string
Object string
AcceptRange string
StartOffset int64
Length int64
Writer io.Writer
Etag string
} {
var calls []struct {
Bucket string
Object string
AcceptRange string
StartOffset int64
Length int64
Writer io.Writer
Etag string
}
mock.lockGetObject.RLock()
calls = mock.calls.GetObject
@@ -1033,21 +1096,23 @@ func (mock *BackendMock) HeadBucketCalls() []struct {
}
// HeadObject calls HeadObjectFunc.
func (mock *BackendMock) HeadObject(bucket string, object string) (*s3.HeadObjectOutput, error) {
func (mock *BackendMock) HeadObject(bucket string, object string, etag string) (*s3.HeadObjectOutput, error) {
if mock.HeadObjectFunc == nil {
panic("BackendMock.HeadObjectFunc: method is nil but Backend.HeadObject was just called")
}
callInfo := struct {
Bucket string
Object string
Etag string
}{
Bucket: bucket,
Object: object,
Etag: etag,
}
mock.lockHeadObject.Lock()
mock.calls.HeadObject = append(mock.calls.HeadObject, callInfo)
mock.lockHeadObject.Unlock()
return mock.HeadObjectFunc(bucket, object)
return mock.HeadObjectFunc(bucket, object, etag)
}
// HeadObjectCalls gets all the calls that were made to HeadObject.
@@ -1057,10 +1122,12 @@ func (mock *BackendMock) HeadObject(bucket string, object string) (*s3.HeadObjec
func (mock *BackendMock) HeadObjectCalls() []struct {
Bucket string
Object string
Etag string
} {
var calls []struct {
Bucket string
Object string
Etag string
}
mock.lockHeadObject.RLock()
calls = mock.calls.HeadObject
@@ -1068,6 +1135,33 @@ func (mock *BackendMock) HeadObjectCalls() []struct {
return calls
}
// IsTaggingSupported calls IsTaggingSupportedFunc.
func (mock *BackendMock) IsTaggingSupported() bool {
if mock.IsTaggingSupportedFunc == nil {
panic("BackendMock.IsTaggingSupportedFunc: method is nil but Backend.IsTaggingSupported was just called")
}
callInfo := struct {
}{}
mock.lockIsTaggingSupported.Lock()
mock.calls.IsTaggingSupported = append(mock.calls.IsTaggingSupported, callInfo)
mock.lockIsTaggingSupported.Unlock()
return mock.IsTaggingSupportedFunc()
}
// IsTaggingSupportedCalls gets all the calls that were made to IsTaggingSupported.
// Check the length with:
//
// len(mockedBackend.IsTaggingSupportedCalls())
func (mock *BackendMock) IsTaggingSupportedCalls() []struct {
} {
var calls []struct {
}
mock.lockIsTaggingSupported.RLock()
calls = mock.calls.IsTaggingSupported
mock.lockIsTaggingSupported.RUnlock()
return calls
}
// ListBuckets calls ListBucketsFunc.
func (mock *BackendMock) ListBuckets() (*s3.ListBucketsOutput, error) {
if mock.ListBucketsFunc == nil {
@@ -1096,7 +1190,7 @@ func (mock *BackendMock) ListBucketsCalls() []struct {
}
// ListMultipartUploads calls ListMultipartUploadsFunc.
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
if mock.ListMultipartUploadsFunc == nil {
panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called")
}
@@ -1128,7 +1222,7 @@ func (mock *BackendMock) ListMultipartUploadsCalls() []struct {
}
// ListObjectParts calls ListObjectPartsFunc.
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
if mock.ListObjectPartsFunc == nil {
panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called")
}
@@ -1176,7 +1270,7 @@ func (mock *BackendMock) ListObjectPartsCalls() []struct {
}
// ListObjects calls ListObjectsFunc.
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
func (mock *BackendMock) ListObjects(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
if mock.ListObjectsFunc == nil {
panic("BackendMock.ListObjectsFunc: method is nil but Backend.ListObjects was just called")
}
@@ -1224,7 +1318,7 @@ func (mock *BackendMock) ListObjectsCalls() []struct {
}
// ListObjectsV2 calls ListObjectsV2Func.
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
func (mock *BackendMock) ListObjectsV2(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
if mock.ListObjectsV2Func == nil {
panic("BackendMock.ListObjectsV2Func: method is nil but Backend.ListObjectsV2 was just called")
}
@@ -1336,19 +1430,23 @@ func (mock *BackendMock) PutBucketAclCalls() []struct {
}
// PutObject calls PutObjectFunc.
func (mock *BackendMock) PutObject(putObjectInput *s3.PutObjectInput) (string, error) {
func (mock *BackendMock) PutObject(bucket string, object string, r io.Reader) (string, error) {
if mock.PutObjectFunc == nil {
panic("BackendMock.PutObjectFunc: method is nil but Backend.PutObject was just called")
}
callInfo := struct {
PutObjectInput *s3.PutObjectInput
Bucket string
Object string
R io.Reader
}{
PutObjectInput: putObjectInput,
Bucket: bucket,
Object: object,
R: r,
}
mock.lockPutObject.Lock()
mock.calls.PutObject = append(mock.calls.PutObject, callInfo)
mock.lockPutObject.Unlock()
return mock.PutObjectFunc(putObjectInput)
return mock.PutObjectFunc(bucket, object, r)
}
// PutObjectCalls gets all the calls that were made to PutObject.
@@ -1356,10 +1454,14 @@ func (mock *BackendMock) PutObject(putObjectInput *s3.PutObjectInput) (string, e
//
// len(mockedBackend.PutObjectCalls())
func (mock *BackendMock) PutObjectCalls() []struct {
PutObjectInput *s3.PutObjectInput
Bucket string
Object string
R io.Reader
} {
var calls []struct {
PutObjectInput *s3.PutObjectInput
Bucket string
Object string
R io.Reader
}
mock.lockPutObject.RLock()
calls = mock.calls.PutObject
@@ -1400,7 +1502,7 @@ func (mock *BackendMock) PutObjectAclCalls() []struct {
}
// PutObjectPart calls PutObjectPartFunc.
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, length int64, r io.Reader) (string, error) {
func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID string, part int, r io.Reader) (string, error) {
if mock.PutObjectPartFunc == nil {
panic("BackendMock.PutObjectPartFunc: method is nil but Backend.PutObjectPart was just called")
}
@@ -1409,20 +1511,18 @@ func (mock *BackendMock) PutObjectPart(bucket string, object string, uploadID st
Object string
UploadID string
Part int
Length int64
R io.Reader
}{
Bucket: bucket,
Object: object,
UploadID: uploadID,
Part: part,
Length: length,
R: r,
}
mock.lockPutObjectPart.Lock()
mock.calls.PutObjectPart = append(mock.calls.PutObjectPart, callInfo)
mock.lockPutObjectPart.Unlock()
return mock.PutObjectPartFunc(bucket, object, uploadID, part, length, r)
return mock.PutObjectPartFunc(bucket, object, uploadID, part, r)
}
// PutObjectPartCalls gets all the calls that were made to PutObjectPart.
@@ -1434,7 +1534,6 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
Object string
UploadID string
Part int
Length int64
R io.Reader
} {
var calls []struct {
@@ -1442,7 +1541,6 @@ func (mock *BackendMock) PutObjectPartCalls() []struct {
Object string
UploadID string
Part int
Length int64
R io.Reader
}
mock.lockPutObjectPart.RLock()

View File

@@ -1,37 +1,20 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package controllers
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/scoutgw/backend"
"github.com/versity/scoutgw/s3err"
)
type S3ApiController struct {
@@ -44,111 +27,82 @@ func New(be backend.Backend) S3ApiController {
func (c S3ApiController) ListBuckets(ctx *fiber.Ctx) error {
res, err := c.be.ListBuckets()
return SendXMLResponse(ctx, res, err)
return responce(ctx, res, err)
}
func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
bucket := ctx.Params("bucket")
key := ctx.Params("key")
keyEnd := ctx.Params("*1")
uploadId := ctx.Query("uploadId")
maxParts := ctx.QueryInt("max-parts", 0)
partNumberMarker := ctx.QueryInt("part-number-marker", 0)
acceptRange := ctx.Get("Range")
bucket, key, keyEnd, uploadId, maxPartsStr, partNumberMarkerStr := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId"), ctx.Query("max-parts"), ctx.Query("part-number-marker")
if keyEnd != "" {
key = strings.Join([]string{key, keyEnd}, "/")
}
if uploadId != "" {
if maxParts < 0 || (maxParts == 0 && ctx.Query("max-parts") != "") {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidMaxParts))
maxParts, err := strconv.Atoi(maxPartsStr)
if err != nil && maxPartsStr != "" {
return errors.New("wrong api call")
}
if partNumberMarker < 0 || (partNumberMarker == 0 && ctx.Query("part-number-marker") != "") {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker))
partNumberMarker, err := strconv.Atoi(partNumberMarkerStr)
if err != nil && partNumberMarkerStr != "" {
return errors.New("wrong api call")
}
res, err := c.be.ListObjectParts(bucket, key, uploadId, partNumberMarker, maxParts)
return SendXMLResponse(ctx, res, err)
res, err := c.be.ListObjectParts(bucket, "", uploadId, partNumberMarker, maxParts)
return responce(ctx, res, err)
}
if ctx.Request().URI().QueryArgs().Has("acl") {
res, err := c.be.GetObjectAcl(bucket, key)
return SendXMLResponse(ctx, res, err)
return responce(ctx, res, err)
}
if attrs := ctx.Get("X-Amz-Object-Attributes"); attrs != "" {
res, err := c.be.GetObjectAttributes(bucket, key, strings.Split(attrs, ","))
return SendXMLResponse(ctx, res, err)
return responce(ctx, res, err)
}
res, err := c.be.GetObject(bucket, key, acceptRange, ctx.Response().BodyWriter())
bRangeSl := strings.Split(ctx.Get("Range"), "=")
if len(bRangeSl) < 2 {
return errors.New("wrong api call")
}
bRange := strings.Split(bRangeSl[1], "-")
if len(bRange) < 2 {
return errors.New("wrong api call")
}
startOffset, err := strconv.Atoi(bRange[0])
if err != nil {
return SendResponse(ctx, err)
}
if res == nil {
return SendResponse(ctx, fmt.Errorf("get object nil response"))
return errors.New("wrong api call")
}
utils.SetMetaHeaders(ctx, res.Metadata)
var lastmod string
if res.LastModified != nil {
lastmod = res.LastModified.Format(timefmt)
length, err := strconv.Atoi(bRange[1])
if err != nil {
return errors.New("wrong api call")
}
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
{
Key: "Content-Length",
Value: fmt.Sprint(res.ContentLength),
},
{
Key: "Content-Type",
Value: getstring(res.ContentType),
},
{
Key: "Content-Encoding",
Value: getstring(res.ContentEncoding),
},
{
Key: "ETag",
Value: getstring(res.ETag),
},
{
Key: "Last-Modified",
Value: lastmod,
},
})
return ctx.SendStatus(http.StatusOK)
}
func getstring(s *string) string {
if s == nil {
return ""
}
return *s
res, err := c.be.GetObject(bucket, key, int64(startOffset), int64(length), ctx.Response().BodyWriter(), "")
return responce(ctx, res, err)
}
func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
bucket := ctx.Params("bucket")
prefix := ctx.Query("prefix")
marker := ctx.Query("continuation-token")
delimiter := ctx.Query("delimiter")
maxkeys := ctx.QueryInt("max-keys")
if ctx.Request().URI().QueryArgs().Has("acl") {
res, err := c.be.GetBucketAcl(ctx.Params("bucket"))
return SendXMLResponse(ctx, res, err)
return responce(ctx, res, err)
}
if ctx.Request().URI().QueryArgs().Has("uploads") {
res, err := c.be.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: aws.String(ctx.Params("bucket"))})
return SendXMLResponse(ctx, res, err)
return responce(ctx, res, err)
}
if ctx.QueryInt("list-type") == 2 {
res, err := c.be.ListObjectsV2(bucket, prefix, marker, delimiter, maxkeys)
return SendXMLResponse(ctx, res, err)
res, err := c.be.ListObjectsV2(ctx.Params("bucket"), "", "", "", 1)
return responce(ctx, res, err)
}
res, err := c.be.ListObjects(bucket, prefix, marker, delimiter, maxkeys)
return SendXMLResponse(ctx, res, err)
res, err := c.be.ListObjects(ctx.Params("bucket"), "", "", "", 1)
return responce(ctx, res, err)
}
func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
@@ -177,68 +131,74 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
GrantWriteACP: &grantWriteACP,
})
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
err := c.be.PutBucket(bucket)
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
bucket := ctx.Params("bucket")
keyStart := ctx.Params("key")
keyEnd := ctx.Params("*1")
uploadId := ctx.Query("uploadId")
partNumberStr := ctx.Query("partNumber")
// Copy source headers
copySource := ctx.Get("X-Amz-Copy-Source")
copySrcIfMatch := ctx.Get("X-Amz-Copy-Source-If-Match")
copySrcIfNoneMatch := ctx.Get("X-Amz-Copy-Source-If-None-Match")
copySrcModifSince := ctx.Get("X-Amz-Copy-Source-If-Modified-Since")
copySrcUnmodifSince := ctx.Get("X-Amz-Copy-Source-If-Unmodified-Since")
// Permission headers
acl := ctx.Get("X-Amz-Acl")
grantFullControl := ctx.Get("X-Amz-Grant-Full-Control")
grantRead := ctx.Get("X-Amz-Grant-Read")
grantReadACP := ctx.Get("X-Amz-Grant-Read-Acp")
granWrite := ctx.Get("X-Amz-Grant-Write")
grantWriteACP := ctx.Get("X-Amz-Grant-Write-Acp")
// Other headers
contentLengthStr := ctx.Get("Content-Length")
dstBucket, dstKeyStart, dstKeyEnd, uploadId, partNumberStr := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId"), ctx.Query("partNumber")
copySource, copySrcIfMatch, copySrcIfNoneMatch,
copySrcModifSince, copySrcUnmodifSince, acl,
grantFullControl, grantRead, grantReadACP,
granWrite, grantWriteACP :=
// Copy source headers
ctx.Get("X-Amz-Copy-Source"),
ctx.Get("X-Amz-Copy-Source-If-Match"),
ctx.Get("X-Amz-Copy-Source-If-None-Match"),
ctx.Get("X-Amz-Copy-Source-If-Modified-Since"),
ctx.Get("X-Amz-Copy-Source-If-Unmodified-Since"),
// Permission headers
ctx.Get("X-Amz-Acl"),
ctx.Get("X-Amz-Grant-Full-Control"),
ctx.Get("X-Amz-Grant-Read"),
ctx.Get("X-Amz-Grant-Read-Acp"),
ctx.Get("X-Amz-Grant-Write"),
ctx.Get("X-Amz-Grant-Write-Acp")
grants := grantFullControl + grantRead + grantReadACP + granWrite + grantWriteACP
if keyEnd != "" {
keyStart = strings.Join([]string{keyStart, keyEnd}, "/")
}
path := ctx.Path()
if path[len(path)-1:] == "/" && keyStart[len(keyStart)-1:] != "/" {
keyStart = keyStart + "/"
if dstKeyEnd != "" {
dstKeyStart = strings.Join([]string{dstKeyStart, dstKeyEnd}, "/")
}
var contentLength int64
if contentLengthStr != "" {
var err error
contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
if partNumberStr != "" {
copySrcModifSinceDate, err := time.Parse(time.RFC3339, copySrcModifSince)
if err != nil && copySrcModifSince != "" {
return errors.New("wrong api call")
}
copySrcUnmodifSinceDate, err := time.Parse(time.RFC3339, copySrcUnmodifSince)
if err != nil && copySrcUnmodifSince != "" {
return errors.New("wrong api call")
}
partNumber, err := strconv.ParseInt(partNumberStr, 10, 64)
if err != nil {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest))
return errors.New("wrong api call")
}
res, err := c.be.UploadPartCopy(&s3.UploadPartCopyInput{
Bucket: &dstBucket,
Key: &dstKeyStart,
PartNumber: int32(partNumber),
UploadId: &uploadId,
CopySource: &copySource,
CopySourceIfMatch: &copySrcIfMatch,
CopySourceIfNoneMatch: &copySrcIfNoneMatch,
CopySourceIfModifiedSince: &copySrcModifSinceDate,
CopySourceIfUnmodifiedSince: &copySrcUnmodifSinceDate,
})
return responce(ctx, res, err)
}
if uploadId != "" && partNumberStr != "" {
partNumber := ctx.QueryInt("partNumber", -1)
if partNumber < 1 {
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPart))
}
if uploadId != "" {
body := io.ReadSeeker(bytes.NewReader([]byte(ctx.Body())))
etag, err := c.be.PutObjectPart(bucket, keyStart, uploadId,
partNumber, contentLength, body)
ctx.Response().Header.Set("Etag", etag)
return SendResponse(ctx, err)
res, err := c.be.UploadPart(dstBucket, dstKeyStart, uploadId, body)
return responce(ctx, res, err)
}
if grants != "" || acl != "" {
@@ -247,8 +207,8 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
}
err := c.be.PutObjectAcl(&s3.PutObjectAclInput{
Bucket: &bucket,
Key: &keyStart,
Bucket: &dstBucket,
Key: &dstKeyStart,
ACL: types.ObjectCannedACL(acl),
GrantFullControl: &grantFullControl,
GrantRead: &grantRead,
@@ -256,35 +216,24 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
GrantWrite: &granWrite,
GrantWriteACP: &grantWriteACP,
})
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
if copySource != "" {
_, _, _, _ = copySrcIfMatch, copySrcIfNoneMatch,
copySrcModifSince, copySrcUnmodifSince
copySourceSplit := strings.Split(copySource, "/")
srcBucket, srcObject := copySourceSplit[0], copySourceSplit[1:]
res, err := c.be.CopyObject(srcBucket, strings.Join(srcObject, "/"), bucket, keyStart)
return SendXMLResponse(ctx, res, err)
res, err := c.be.CopyObject(srcBucket, strings.Join(srcObject, "/"), dstBucket, dstKeyStart)
return responce(ctx, res, err)
}
metadata := utils.GetUserMetaData(&ctx.Request().Header)
etag, err := c.be.PutObject(&s3.PutObjectInput{
Bucket: &bucket,
Key: &keyStart,
ContentLength: contentLength,
Metadata: metadata,
Body: bytes.NewReader(ctx.Request().Body()),
})
ctx.Response().Header.Set("ETag", etag)
return SendResponse(ctx, err)
res, err := c.be.PutObject(dstBucket, dstKeyStart, bytes.NewReader(ctx.Request().Body()))
return responce(ctx, res, err)
}
func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
err := c.be.DeleteBucket(ctx.Params("bucket"))
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
@@ -294,14 +243,11 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
}
err := c.be.DeleteObjects(ctx.Params("bucket"), &s3.DeleteObjectsInput{Delete: &dObj})
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
bucket := ctx.Params("bucket")
key := ctx.Params("key")
keyEnd := ctx.Params("*1")
uploadId := ctx.Query("uploadId")
bucket, key, keyEnd, uploadId := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId")
if keyEnd != "" {
key = strings.Join([]string{key, keyEnd}, "/")
@@ -317,107 +263,60 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
ExpectedBucketOwner: &expectedBucketOwner,
RequestPayer: types.RequestPayer(requestPayer),
})
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
err := c.be.DeleteObject(bucket, key)
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) error {
_, err := c.be.HeadBucket(ctx.Params("bucket"))
// TODO: set bucket response headers
return SendResponse(ctx, err)
res, err := c.be.HeadBucket(ctx.Params("bucket"))
return responce(ctx, res, err)
}
const (
timefmt = "Mon, 02 Jan 2006 15:04:05 GMT"
)
func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
bucket := ctx.Params("bucket")
key := ctx.Params("key")
keyEnd := ctx.Params("*1")
bucket, key, keyEnd := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1")
if keyEnd != "" {
key = strings.Join([]string{key, keyEnd}, "/")
}
res, err := c.be.HeadObject(bucket, key)
if err != nil {
return SendResponse(ctx, err)
}
if res == nil {
return SendResponse(ctx, fmt.Errorf("head object nil response"))
}
utils.SetMetaHeaders(ctx, res.Metadata)
var lastmod string
if res.LastModified != nil {
lastmod = res.LastModified.Format(timefmt)
}
utils.SetResponseHeaders(ctx, []utils.CustomHeader{
{
Key: "Content-Length",
Value: fmt.Sprint(res.ContentLength),
},
{
Key: "Content-Type",
Value: getstring(res.ContentType),
},
{
Key: "Content-Encoding",
Value: getstring(res.ContentEncoding),
},
{
Key: "ETag",
Value: getstring(res.ETag),
},
{
Key: "Last-Modified",
Value: lastmod,
},
})
return SendResponse(ctx, nil)
res, err := c.be.HeadObject(bucket, key, "")
return responce(ctx, res, err)
}
func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
bucket := ctx.Params("bucket")
key := ctx.Params("key")
keyEnd := ctx.Params("*1")
uploadId := ctx.Query("uploadId")
bucket, key, keyEnd, uploadId := ctx.Params("bucket"), ctx.Params("key"), ctx.Params("*1"), ctx.Query("uploadId")
var restoreRequest s3.RestoreObjectInput
if keyEnd != "" {
key = strings.Join([]string{key, keyEnd}, "/")
}
var restoreRequest s3.RestoreObjectInput
if ctx.Request().URI().QueryArgs().Has("restore") {
xmlErr := xml.Unmarshal(ctx.Body(), &restoreRequest)
if xmlErr != nil {
return errors.New("wrong api call")
}
err := c.be.RestoreObject(bucket, key, &restoreRequest)
return SendResponse(ctx, err)
return responce[any](ctx, nil, err)
}
if uploadId != "" {
data := struct {
Parts []types.Part `xml:"Part"`
}{}
var parts []types.Part
if err := xml.Unmarshal(ctx.Body(), &data); err != nil {
if err := xml.Unmarshal(ctx.Body(), &parts); err != nil {
return errors.New("wrong api call")
}
res, err := c.be.CompleteMultipartUpload(bucket, key, uploadId, data.Parts)
return SendXMLResponse(ctx, res, err)
res, err := c.be.CompleteMultipartUpload(bucket, "", uploadId, parts)
return responce(ctx, res, err)
}
res, err := c.be.CreateMultipartUpload(&s3.CreateMultipartUploadInput{Bucket: &bucket, Key: &key})
return SendXMLResponse(ctx, res, err)
return responce(ctx, res, err)
}
func SendResponse(ctx *fiber.Ctx, err error) error {
func responce[R comparable](ctx *fiber.Ctx, resp R, err error) error {
if err != nil {
serr, ok := err.(s3err.APIError)
if ok {
@@ -428,37 +327,9 @@ func SendResponse(ctx *fiber.Ctx, err error) error {
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
}
// https://github.com/gofiber/fiber/issues/2080
// ctx.SendStatus() sets incorrect content length on HEAD request
ctx.Status(http.StatusOK)
return nil
}
func SendXMLResponse(ctx *fiber.Ctx, resp any, err error) error {
if err != nil {
serr, ok := err.(s3err.APIError)
if ok {
ctx.Status(serr.HTTPStatusCode)
return ctx.Send(s3err.GetAPIErrorResponse(serr, "", "", ""))
}
fmt.Fprintf(os.Stderr, "Internal Error, req:\n%v\nerr:\n%v\n",
ctx.Request(), err)
return ctx.Send(s3err.GetAPIErrorResponse(
s3err.GetAPIError(s3err.ErrInternalError), "", "", ""))
}
var b []byte
if resp != nil {
if b, err = xml.Marshal(resp); err != nil {
return err
}
if len(b) > 0 {
ctx.Response().Header.SetContentType(fiber.MIMEApplicationXML)
}
if b, err = xml.Marshal(resp); err != nil {
return err
}
return ctx.Send(b)

View File

@@ -1,17 +1,3 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package controllers
import (
@@ -21,15 +7,13 @@ import (
"reflect"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
"github.com/versity/scoutgw/backend"
"github.com/versity/scoutgw/s3err"
)
func TestNew(t *testing.T) {
@@ -129,8 +113,8 @@ func TestS3ApiController_GetActions(t *testing.T) {
app := fiber.New()
s3ApiController := S3ApiController{be: &BackendMock{
ListObjectPartsFunc: func(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
return s3response.ListPartsResponse{}, nil
ListObjectPartsFunc: func(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) {
return &s3.ListPartsOutput{}, nil
},
GetObjectAclFunc: func(bucket, object string) (*s3.GetObjectAclOutput, error) {
return &s3.GetObjectAclOutput{}, nil
@@ -138,7 +122,7 @@ func TestS3ApiController_GetActions(t *testing.T) {
GetObjectAttributesFunc: func(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error) {
return &s3.GetObjectAttributesOutput{}, nil
},
GetObjectFunc: func(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
GetObjectFunc: func(bucket, object string, startOffset, length int64, writer io.Writer, etag string) (*s3.GetObjectOutput, error) {
return &s3.GetObjectOutput{Metadata: nil}, nil
},
}}
@@ -170,16 +154,16 @@ func TestS3ApiController_GetActions(t *testing.T) {
req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=InvalidMaxParts", nil),
},
wantErr: false,
statusCode: 400,
statusCode: 500,
},
{
name: "Get-actions-invalid-part-number-marker",
name: "Get-actions-invalid-part-number",
app: app,
args: args{
req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=200&part-number-marker=InvalidPartNumber", nil),
},
wantErr: false,
statusCode: 400,
statusCode: 500,
},
{
name: "Get-actions-list-object-parts-success",
@@ -200,13 +184,22 @@ func TestS3ApiController_GetActions(t *testing.T) {
statusCode: 200,
},
{
name: "Get-actions-get-object-success",
name: "Get-actions-invalid-range-header",
app: app,
args: args{
req: getObjectReq,
},
wantErr: false,
statusCode: 500,
},
{
name: "Get-actions-get-object-error",
app: app,
args: args{
req: getObjectSuccessReq,
},
wantErr: false,
statusCode: 200,
statusCode: 500,
},
}
for _, tt := range tests {
@@ -234,21 +227,21 @@ func TestS3ApiController_ListActions(t *testing.T) {
GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) {
return &s3.GetBucketAclOutput{}, nil
},
ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
return s3response.ListMultipartUploadsResponse{}, nil
ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
return &s3.ListMultipartUploadsOutput{}, nil
},
ListObjectsV2Func: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
return &s3.ListObjectsV2Output{}, nil
ListObjectsV2Func: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
return &s3.ListBucketsOutput{}, nil
},
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
return &s3.ListObjectsOutput{}, nil
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
return &s3.ListBucketsOutput{}, nil
},
}}
app.Get("/:bucket", s3ApiController.ListActions)
//Error case
s3ApiControllerError := S3ApiController{be: &BackendMock{
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListBucketsOutput, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
},
}}
@@ -313,11 +306,11 @@ func TestS3ApiController_ListActions(t *testing.T) {
resp, err := tt.app.Test(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("S3ApiController.GetActions() error = %v, wantErr %v", err, tt.wantErr)
t.Errorf("S3ApiController.ListActions() error = %v, wantErr %v", err, tt.wantErr)
}
if resp.StatusCode != tt.statusCode {
t.Errorf("S3ApiController.GetActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
t.Errorf("S3ApiController.ListActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
}
})
}
@@ -387,11 +380,11 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
resp, err := tt.app.Test(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("S3ApiController.GetActions() error = %v, wantErr %v", err, tt.wantErr)
t.Errorf("S3ApiController.PutBucketActions() error = %v, wantErr %v", err, tt.wantErr)
}
if resp.StatusCode != tt.statusCode {
t.Errorf("S3ApiController.GetActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
t.Errorf("S3ApiController.PutBucketActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
}
}
}
@@ -415,8 +408,8 @@ func TestS3ApiController_PutActions(t *testing.T) {
CopyObjectFunc: func(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
return &s3.CopyObjectOutput{}, nil
},
PutObjectFunc: func(*s3.PutObjectInput) (string, error) {
return "Hey", nil
PutObjectFunc: func(bucket, object string, r io.Reader) (string, error) {
return "hello", nil
},
}}
app.Put("/:bucket/:key/*", s3ApiController.PutActions)
@@ -442,13 +435,13 @@ func TestS3ApiController_PutActions(t *testing.T) {
statusCode int
}{
{
name: "Upload-put-part-error-case",
name: "Upload-copy-part-error-case",
app: app,
args: args{
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?uploadId=abc&partNumber=invalid", nil),
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?partNumber=invalid", nil),
},
wantErr: false,
statusCode: 400,
statusCode: 500,
},
{
name: "Upload-copy-part-success",
@@ -518,13 +511,11 @@ func TestS3ApiController_PutActions(t *testing.T) {
resp, err := tt.app.Test(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("S3ApiController.GetActions() %v error = %v, wantErr %v",
tt.name, err, tt.wantErr)
t.Errorf("S3ApiController.PutActions() error = %v, wantErr %v", err, tt.wantErr)
}
if resp.StatusCode != tt.statusCode {
t.Errorf("S3ApiController.GetActions() %v statusCode = %v, wantStatusCode = %v",
tt.name, resp.StatusCode, tt.statusCode)
t.Errorf("S3ApiController.PutActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode)
}
}
}
@@ -798,22 +789,9 @@ func TestS3ApiController_HeadObject(t *testing.T) {
}
app := fiber.New()
// Mock values
contentEncoding := "gzip"
contentType := "application/xml"
eTag := "Valid etag"
lastModifie := time.Now()
s3ApiController := S3ApiController{be: &BackendMock{
HeadObjectFunc: func(bucket, object string) (*s3.HeadObjectOutput, error) {
return &s3.HeadObjectOutput{
ContentEncoding: &contentEncoding,
ContentLength: 64,
ContentType: &contentType,
LastModified: &lastModifie,
ETag: &eTag,
}, nil
HeadObjectFunc: func(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
return nil, nil
},
}}
@@ -823,7 +801,7 @@ func TestS3ApiController_HeadObject(t *testing.T) {
appErr := fiber.New()
s3ApiControllerErr := S3ApiController{be: &BackendMock{
HeadObjectFunc: func(bucket, object string) (*s3.HeadObjectOutput, error) {
HeadObjectFunc: func(bucket, object string, etag string) (*s3.HeadObjectOutput, error) {
return nil, s3err.GetAPIError(42)
},
}}
@@ -954,7 +932,7 @@ func TestS3ApiController_CreateActions(t *testing.T) {
}
}
func Test_XMLresponse(t *testing.T) {
func Test_responce(t *testing.T) {
type args struct {
ctx *fiber.Ctx
resp any
@@ -1011,74 +989,14 @@ func Test_XMLresponse(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := SendXMLResponse(tt.args.ctx, tt.args.resp, tt.args.err); (err != nil) != tt.wantErr {
t.Errorf("responce() %v error = %v, wantErr %v", tt.name, err, tt.wantErr)
if err := responce(tt.args.ctx, tt.args.resp, tt.args.err); (err != nil) != tt.wantErr {
t.Errorf("responce() error = %v, wantErr %v", err, tt.wantErr)
}
statusCode := tt.args.ctx.Response().StatusCode()
if statusCode != tt.statusCode {
t.Errorf("responce() %v code = %v, wantErr %v", tt.name, statusCode, tt.wantErr)
}
})
}
}
func Test_response(t *testing.T) {
type args struct {
ctx *fiber.Ctx
resp any
err error
}
app := fiber.New()
tests := []struct {
name string
args args
wantErr bool
statusCode int
}{
{
name: "Internal-server-error",
args: args{
ctx: app.AcquireCtx(&fasthttp.RequestCtx{}),
resp: nil,
err: s3err.GetAPIError(16),
},
wantErr: false,
statusCode: 500,
},
{
name: "Error-not-implemented",
args: args{
ctx: app.AcquireCtx(&fasthttp.RequestCtx{}),
resp: nil,
err: s3err.GetAPIError(50),
},
wantErr: false,
statusCode: 501,
},
{
name: "Successful-response",
args: args{
ctx: app.AcquireCtx(&fasthttp.RequestCtx{}),
resp: "Valid response",
err: nil,
},
wantErr: false,
statusCode: 200,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := SendResponse(tt.args.ctx, tt.args.err); (err != nil) != tt.wantErr {
t.Errorf("responce() %v error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
statusCode := tt.args.ctx.Response().StatusCode()
if statusCode != tt.statusCode {
t.Errorf("responce() %v code = %v, wantErr %v", tt.name, statusCode, tt.wantErr)
t.Errorf("responce() code = %v, wantErr %v", statusCode, tt.wantErr)
}
})
}

View File

@@ -1,160 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"crypto/sha256"
"encoding/hex"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/logging"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/backend/auth"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
)
const (
iso8601Format = "20060102T150405Z"
)
type RootUserConfig struct {
Access string
Secret string
Region string
}
func VerifyV4Signature(root RootUserConfig, iam auth.IAMService, debug bool) fiber.Handler {
acct := accounts{root: root, iam: iam}
return func(ctx *fiber.Ctx) error {
authorization := ctx.Get("Authorization")
if authorization == "" {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrAuthHeaderEmpty))
}
// Check the signature version
authParts := strings.Split(authorization, " ")
if len(authParts) < 4 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
}
if authParts[0] != "AWS4-HMAC-SHA256" {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureVersionNotSupported))
}
credKv := strings.Split(authParts[1], "=")
if len(credKv) != 2 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
}
creds := strings.Split(credKv[1], "/")
if len(creds) < 4 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
}
signHdrKv := strings.Split(authParts[2], "=")
if len(signHdrKv) != 2 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrCredMalformed))
}
signedHdrs := strings.Split(signHdrKv[1], ";")
account := acct.getAccount(creds[0])
if account == nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidAccessKeyID))
}
// Check X-Amz-Date header
date := ctx.Get("X-Amz-Date")
if date == "" {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingDateHeader))
}
// Parse the date and check the date validity
tdate, err := time.Parse(iso8601Format, date)
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMalformedDate))
}
// Calculate the hash of the request payload
hashedPayload := sha256.Sum256(ctx.Body())
hexPayload := hex.EncodeToString(hashedPayload[:])
hashPayloadHeader := ctx.Get("X-Amz-Content-Sha256")
// Compare the calculated hash with the hash provided
if hashPayloadHeader != hexPayload {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrContentSHA256Mismatch))
}
// Create a new http request instance from fasthttp request
req, err := utils.CreateHttpRequestFromCtx(ctx, signedHdrs)
if err != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
}
signer := v4.NewSigner()
signErr := signer.SignHTTP(req.Context(), aws.Credentials{
AccessKeyID: creds[0],
SecretAccessKey: account.Secret,
}, req, hexPayload, creds[3], account.Region, tdate, func(options *v4.SignerOptions) {
if debug {
options.LogSigning = true
options.Logger = logging.NewStandardLogger(os.Stderr)
}
})
if signErr != nil {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInternalError))
}
parts := strings.Split(req.Header.Get("Authorization"), " ")
if len(parts) < 4 {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrMissingFields))
}
calculatedSign := strings.Split(parts[3], "=")[1]
expectedSign := strings.Split(authParts[3], "=")[1]
if expectedSign != calculatedSign {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch))
}
ctx.Locals("role", account.Role)
return ctx.Next()
}
}
type accounts struct {
root RootUserConfig
iam auth.IAMService
}
func (a accounts) getAccount(access string) *auth.Account {
var account *auth.Account
if access == a.root.Access {
account = &auth.Account{
Secret: a.root.Secret,
Role: "admin",
Region: a.root.Region,
}
} else {
account = a.iam.GetUserAccount(access)
}
return account
}

View File

@@ -1,43 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middlewares
import (
"crypto/md5"
"encoding/base64"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/versitygw/s3err"
)
func VerifyMD5Body() fiber.Handler {
return func(ctx *fiber.Ctx) error {
incomingSum := ctx.Get("Content-Md5")
if incomingSum == "" {
return ctx.Next()
}
sum := md5.Sum(ctx.Body())
calculatedSum := base64.StdEncoding.EncodeToString(sum[:])
if incomingSum != calculatedSum {
return controllers.SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidDigest))
}
return ctx.Next()
}
}

View File

@@ -1,34 +1,15 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3api
import (
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/auth"
"github.com/versity/versitygw/s3api/controllers"
"github.com/versity/scoutgw/backend"
"github.com/versity/scoutgw/s3api/controllers"
)
type S3ApiRouter struct{}
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend, iam auth.IAMService) {
func (sa *S3ApiRouter) Init(app *fiber.App, be backend.Backend) {
s3ApiController := controllers.New(be)
adminController := controllers.AdminController{IAMService: iam}
// TODO: think of better routing system
app.Post("/create-user", adminController.CreateUser)
// ListBuckets action
app.Get("/", s3ApiController.ListBuckets)

View File

@@ -1,32 +1,16 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3api
import (
"testing"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/auth"
"github.com/versity/scoutgw/backend"
)
func TestS3ApiRouter_Init(t *testing.T) {
type args struct {
app *fiber.App
be backend.Backend
iam auth.IAMService
}
tests := []struct {
name string
@@ -39,13 +23,12 @@ func TestS3ApiRouter_Init(t *testing.T) {
args: args{
app: fiber.New(),
be: backend.BackendUnsupported{},
iam: auth.IAMServiceUnsupported{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.sa.Init(tt.args.app, tt.args.be, tt.args.iam)
tt.sa.Init(tt.args.app, tt.args.be)
})
}
}

View File

@@ -1,27 +1,9 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3api
import (
"crypto/tls"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/logger"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/auth"
"github.com/versity/versitygw/s3api/middlewares"
"github.com/versity/scoutgw/backend"
)
type S3ApiServer struct {
@@ -29,45 +11,16 @@ type S3ApiServer struct {
backend backend.Backend
router *S3ApiRouter
port string
cert *tls.Certificate
debug bool
}
func New(app *fiber.App, be backend.Backend, root middlewares.RootUserConfig, port string, iam auth.IAMService, opts ...Option) (*S3ApiServer, error) {
server := &S3ApiServer{
app: app,
backend: be,
router: new(S3ApiRouter),
port: port,
}
func New(app *fiber.App, be backend.Backend, port string) (s3ApiServer *S3ApiServer, err error) {
s3ApiServer = &S3ApiServer{app, be, new(S3ApiRouter), port}
for _, opt := range opts {
opt(server)
}
app.Use(middlewares.VerifyV4Signature(root, iam, server.debug))
app.Use(logger.New())
app.Use(middlewares.VerifyMD5Body())
server.router.Init(app, be, iam)
return server, nil
}
// Option sets various options for New()
type Option func(*S3ApiServer)
// WithTLS sets TLS Credentials
func WithTLS(cert tls.Certificate) Option {
return func(s *S3ApiServer) { s.cert = &cert }
}
// WithDebug sets debug output
func WithDebug() Option {
return func(s *S3ApiServer) { s.debug = true }
s3ApiServer.router.Init(app, be)
return
}
func (sa *S3ApiServer) Serve() (err error) {
if sa.cert != nil {
return sa.app.ListenTLSWithCertificate(sa.port, *sa.cert)
}
return sa.app.Listen(sa.port)
}

View File

@@ -1,17 +1,3 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3api
import (
@@ -19,9 +5,7 @@ import (
"testing"
"github.com/gofiber/fiber/v2"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/auth"
"github.com/versity/versitygw/s3api/middlewares"
"github.com/versity/scoutgw/backend"
)
func TestNew(t *testing.T) {
@@ -29,7 +13,6 @@ func TestNew(t *testing.T) {
app *fiber.App
be backend.Backend
port string
root middlewares.RootUserConfig
}
app := fiber.New()
@@ -49,7 +32,6 @@ func TestNew(t *testing.T) {
app: app,
be: be,
port: port,
root: middlewares.RootUserConfig{},
},
wantS3ApiServer: &S3ApiServer{
app: app,
@@ -62,8 +44,7 @@ func TestNew(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.root,
tt.args.port, auth.IAMServiceUnsupported{})
gotS3ApiServer, err := New(tt.args.app, tt.args.be, tt.args.port)
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
return

View File

@@ -1,93 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package utils
import (
"bytes"
"errors"
"fmt"
"net/http"
"strings"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
)
func GetUserMetaData(headers *fasthttp.RequestHeader) (metadata map[string]string) {
metadata = make(map[string]string)
headers.VisitAll(func(key, value []byte) {
if strings.HasPrefix(string(key), "X-Amz-Meta-") {
trimmedKey := strings.TrimPrefix(string(key), "X-Amz-Meta-")
headerValue := string(value)
metadata[trimmedKey] = headerValue
}
})
return
}
func CreateHttpRequestFromCtx(ctx *fiber.Ctx, signedHdrs []string) (*http.Request, error) {
req := ctx.Request()
httpReq, err := http.NewRequest(string(req.Header.Method()), req.URI().String(), bytes.NewReader(req.Body()))
if err != nil {
return nil, errors.New("error in creating an http request")
}
// Set the request headers
req.Header.VisitAll(func(key, value []byte) {
keyStr := string(key)
if includeHeader(keyStr, signedHdrs) {
httpReq.Header.Add(keyStr, string(value))
}
})
// Check if Content-Length in signed headers
// If content length is non 0, then the header will be included
if !includeHeader("Content-Length", signedHdrs) {
httpReq.ContentLength = 0
}
// Set the Host header
httpReq.Host = string(req.Header.Host())
return httpReq, nil
}
func SetMetaHeaders(ctx *fiber.Ctx, meta map[string]string) {
for key, val := range meta {
ctx.Set(fmt.Sprintf("X-Amz-Meta-%s", key), val)
}
}
type CustomHeader struct {
Key string
Value string
}
func SetResponseHeaders(ctx *fiber.Ctx, headers []CustomHeader) {
for _, header := range headers {
ctx.Set(header.Key, header.Value)
}
}
func includeHeader(hdr string, signedHdrs []string) bool {
for _, shdr := range signedHdrs {
if strings.EqualFold(hdr, shdr) {
return true
}
}
return false
}

View File

@@ -1,119 +0,0 @@
package utils
import (
"bytes"
"net/http"
"reflect"
"testing"
"github.com/gofiber/fiber/v2"
"github.com/valyala/fasthttp"
)
func TestCreateHttpRequestFromCtx(t *testing.T) {
type args struct {
ctx *fiber.Ctx
}
app := fiber.New()
// Expected output, Case 1
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
req := ctx.Request()
request, _ := http.NewRequest(string(req.Header.Method()), req.URI().String(), bytes.NewReader(req.Body()))
// Case 2
ctx2 := app.AcquireCtx(&fasthttp.RequestCtx{})
req2 := ctx2.Request()
req2.Header.Add("X-Amz-Mfa", "Some valid Mfa")
request2, _ := http.NewRequest(string(req2.Header.Method()), req2.URI().String(), bytes.NewReader(req2.Body()))
request2.Header.Add("X-Amz-Mfa", "Some valid Mfa")
tests := []struct {
name string
args args
want *http.Request
wantErr bool
}{
{
name: "Success-response",
args: args{
ctx: ctx,
},
want: request,
wantErr: false,
},
{
name: "Success-response-With-Headers",
args: args{
ctx: ctx2,
},
want: request2,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := CreateHttpRequestFromCtx(tt.args.ctx, []string{"X-Amz-Mfa"})
if (err != nil) != tt.wantErr {
t.Errorf("CreateHttpRequestFromCtx() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got.Header, tt.want.Header) {
t.Errorf("CreateHttpRequestFromCtx() got = %v, want %v", got, tt.want)
}
})
}
}
func TestGetUserMetaData(t *testing.T) {
type args struct {
headers *fasthttp.RequestHeader
}
app := fiber.New()
// Case 1
ctx := app.AcquireCtx(&fasthttp.RequestCtx{})
req := ctx.Request()
// Case 2
ctx2 := app.AcquireCtx(&fasthttp.RequestCtx{})
req2 := ctx2.Request()
req2.Header.Add("X-Amz-Meta-Name", "Nick")
req2.Header.Add("X-Amz-Meta-Age", "27")
tests := []struct {
name string
args args
wantMetadata map[string]string
}{
{
name: "Success-empty-response",
args: args{
headers: &req.Header,
},
wantMetadata: map[string]string{},
},
{
name: "Success-non-empty-response",
args: args{
headers: &req2.Header,
},
wantMetadata: map[string]string{
"Age": "27",
"Name": "Nick",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if gotMetadata := GetUserMetaData(tt.args.headers); !reflect.DeepEqual(gotMetadata, tt.wantMetadata) {
t.Errorf("GetUserMetaData() = %v, want %v", gotMetadata, tt.wantMetadata)
}
})
}
}

View File

@@ -1,17 +1,3 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3err
import (

View File

@@ -1,96 +0,0 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3response
import (
"encoding/xml"
)
// Part describes part metadata.
type Part struct {
PartNumber int
LastModified string
ETag string
Size int64
}
// ListPartsResponse - s3 api list parts response.
type ListPartsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
Bucket string
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
// The class of storage used to store the object.
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
// List of parts.
Parts []Part `xml:"Part"`
}
// ListMultipartUploadsResponse - s3 api list multipart uploads response.
type ListMultipartUploadsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
Bucket string
KeyMarker string
UploadIDMarker string `xml:"UploadIdMarker"`
NextKeyMarker string
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
Delimiter string
Prefix string
EncodingType string `xml:"EncodingType,omitempty"`
MaxUploads int
IsTruncated bool
// List of pending uploads.
Uploads []Upload `xml:"Upload"`
// Delimed common prefixes.
CommonPrefixes []CommonPrefix
}
// Upload desribes in progress multipart upload
type Upload struct {
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass string
Initiated string
}
// CommonPrefix ListObjectsResponse common prefixes (directory abstraction)
type CommonPrefix struct {
Prefix string
}
// Initiator same fields as Owner
type Initiator Owner
// Owner bucket ownership
type Owner struct {
ID string
DisplayName string
}

View File

@@ -1,31 +0,0 @@
%global debug_package %{nil}
%define pkg_version @@VERSION@@
Name: versitygw
Version: %{pkg_version}
Release: 1%{?dist}
Summary: Versity S3 Gateway
License: Apache-2.0
URL: https://github.com/versity/versitygw
Source0: %{name}-%{version}.tar.gz
%description
The S3 gateway is an S3 protocol translator that allows an S3 client
to access the supported backend storage as if it was a native S3 service.
BuildRequires: golang >= 1.20
%prep
%setup
%build
make
%install
mkdir -p %{buildroot}%{_bindir}
install -m 0755 %{name} %{buildroot}%{_bindir}/
%post
%files
%{_bindir}/%{name}