fix: get simple multipart upload tests passing

This commit is contained in:
Ben McClelland
2023-06-06 13:36:21 -07:00
parent 2697edd40a
commit ef06d11d7c
11 changed files with 257 additions and 136 deletions

View File

@@ -21,6 +21,7 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3err" "github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
) )
//go:generate moq -out backend_moq_test.go . Backend //go:generate moq -out backend_moq_test.go . Backend
@@ -39,8 +40,8 @@ type Backend interface {
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(*s3.AbortMultipartUploadInput) error AbortMultipartUpload(*s3.AbortMultipartUploadInput) error
ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error)
PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error)
@@ -115,11 +116,11 @@ func (BackendUnsupported) CompleteMultipartUpload(bucket, object, uploadID strin
func (BackendUnsupported) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) error { func (BackendUnsupported) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) error {
return s3err.GetAPIError(s3err.ErrNotImplemented) return s3err.GetAPIError(s3err.ErrNotImplemented)
} }
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented) return s3response.ListMultipartUploadsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
} }
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) { func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented) return s3response.ListPartsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
} }
func (BackendUnsupported) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) { func (BackendUnsupported) CopyPart(srcBucket, srcObject, DstBucket, uploadID, rangeHeader string, part int) (*types.CopyPartResult, error) {
return nil, s3err.GetAPIError(s3err.ErrNotImplemented) return nil, s3err.GetAPIError(s3err.ErrNotImplemented)

View File

@@ -6,6 +6,7 @@ package backend
import ( import (
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/s3response"
"io" "io"
"sync" "sync"
) )
@@ -68,10 +69,10 @@ var _ Backend = &BackendMock{}
// ListBucketsFunc: func() (*s3.ListBucketsOutput, error) { // ListBucketsFunc: func() (*s3.ListBucketsOutput, error) {
// panic("mock out the ListBuckets method") // panic("mock out the ListBuckets method")
// }, // },
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { // ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
// panic("mock out the ListMultipartUploads method") // panic("mock out the ListMultipartUploads method")
// }, // },
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) { // ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
// panic("mock out the ListObjectParts method") // panic("mock out the ListObjectParts method")
// }, // },
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) { // ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
@@ -172,10 +173,10 @@ type BackendMock struct {
ListBucketsFunc func() (*s3.ListBucketsOutput, error) ListBucketsFunc func() (*s3.ListBucketsOutput, error)
// ListMultipartUploadsFunc mocks the ListMultipartUploads method. // ListMultipartUploadsFunc mocks the ListMultipartUploads method.
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
// ListObjectPartsFunc mocks the ListObjectParts method. // ListObjectPartsFunc mocks the ListObjectParts method.
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
// ListObjectsFunc mocks the ListObjects method. // ListObjectsFunc mocks the ListObjects method.
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
@@ -1094,7 +1095,7 @@ func (mock *BackendMock) ListBucketsCalls() []struct {
} }
// ListMultipartUploads calls ListMultipartUploadsFunc. // ListMultipartUploads calls ListMultipartUploadsFunc.
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
if mock.ListMultipartUploadsFunc == nil { if mock.ListMultipartUploadsFunc == nil {
panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called") panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called")
} }
@@ -1126,7 +1127,7 @@ func (mock *BackendMock) ListMultipartUploadsCalls() []struct {
} }
// ListObjectParts calls ListObjectPartsFunc. // ListObjectParts calls ListObjectPartsFunc.
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) { func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
if mock.ListObjectPartsFunc == nil { if mock.ListObjectPartsFunc == nil {
panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called") panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called")
} }

View File

@@ -24,6 +24,11 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
) )
var (
// RFC3339TimeFormat RFC3339 time format
RFC3339TimeFormat = "2006-01-02T15:04:05.999Z"
)
func IsValidBucketName(name string) bool { return true } func IsValidBucketName(name string) bool { return true }
type ByBucketName []types.Bucket type ByBucketName []types.Bucket

View File

@@ -36,6 +36,7 @@ import (
"github.com/pkg/xattr" "github.com/pkg/xattr"
"github.com/versity/versitygw/backend" "github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err" "github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
) )
type Posix struct { type Posix struct {
@@ -231,8 +232,10 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
// check all parts ok // check all parts ok
last := len(parts) - 1 last := len(parts) - 1
partsize := int64(0) partsize := int64(0)
var totalsize int64
for i, p := range parts { for i, p := range parts {
fi, err := os.Lstat(filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber))) partPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", p.PartNumber))
fi, err := os.Lstat(partPath)
if err != nil { if err != nil {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart) return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
} }
@@ -240,13 +243,21 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
if i == 0 { if i == 0 {
partsize = fi.Size() partsize = fi.Size()
} }
totalsize += fi.Size()
// all parts except the last need to be the same size // all parts except the last need to be the same size
if i < last && partsize != fi.Size() { if i < last && partsize != fi.Size() {
return nil, s3err.GetAPIError(s3err.ErrInvalidPart) return nil, s3err.GetAPIError(s3err.ErrInvalidPart)
} }
b, err := xattr.Get(partPath, "user.etag")
etag := string(b)
if err != nil {
etag = ""
}
parts[i].ETag = &etag
} }
f, err := openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, 0) f, err := openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, totalsize)
if err != nil { if err != nil {
return nil, fmt.Errorf("open temp file: %w", err) return nil, fmt.Errorf("open temp file: %w", err)
} }
@@ -272,11 +283,8 @@ func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts [
dir := filepath.Dir(objname) dir := filepath.Dir(objname)
if dir != "" { if dir != "" {
if err = mkdirAll(dir, os.FileMode(0755), bucket, object); err != nil { if err = mkdirAll(dir, os.FileMode(0755), bucket, object); err != nil {
if err != nil && os.IsExist(err) {
return nil, s3err.GetAPIError(s3err.ErrObjectParentIsFile)
}
if err != nil { if err != nil {
return nil, fmt.Errorf("make object parent directories: %w", err) return nil, s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
} }
} }
} }
@@ -479,24 +487,40 @@ func (p *Posix) AbortMultipartUpload(mpu *s3.AbortMultipartUploadInput) error {
return nil return nil
} }
func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
bucket := *mpu.Bucket bucket := *mpu.Bucket
var delimiter string
if mpu.Delimiter != nil {
delimiter = *mpu.Delimiter
}
var prefix string
if mpu.Prefix != nil {
prefix = *mpu.Prefix
}
var lmu s3response.ListMultipartUploadsResponse
_, err := os.Stat(bucket) _, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket) return lmu, s3err.GetAPIError(s3err.ErrNoSuchBucket)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("stat bucket: %w", err) return lmu, fmt.Errorf("stat bucket: %w", err)
} }
// ignore readdir error and use the empty list returned // ignore readdir error and use the empty list returned
objs, _ := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir)) objs, _ := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir))
var uploads []types.MultipartUpload var uploads []s3response.Upload
keyMarker := *mpu.KeyMarker var keyMarker string
uploadIDMarker := *mpu.UploadIdMarker if mpu.KeyMarker != nil {
keyMarker = *mpu.KeyMarker
}
var uploadIDMarker string
if mpu.UploadIdMarker != nil {
uploadIDMarker = *mpu.UploadIdMarker
}
var pastMarker bool var pastMarker bool
if keyMarker == "" && uploadIDMarker == "" { if keyMarker == "" && uploadIDMarker == "" {
pastMarker = true pastMarker = true
@@ -512,7 +536,7 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (*s3.Lis
continue continue
} }
objectName := string(b) objectName := string(b)
if !strings.HasPrefix(objectName, *mpu.Prefix) { if mpu.Prefix != nil && !strings.HasPrefix(objectName, *mpu.Prefix) {
continue continue
} }
@@ -538,64 +562,71 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (*s3.Lis
upiddir := filepath.Join(bucket, metaTmpMultipartDir, obj.Name(), upid.Name()) upiddir := filepath.Join(bucket, metaTmpMultipartDir, obj.Name(), upid.Name())
loadUserMetaData(upiddir, userMetaData) loadUserMetaData(upiddir, userMetaData)
fi, err := upid.Info()
if err != nil {
return lmu, fmt.Errorf("stat %q: %w", upid.Name(), err)
}
uploadID := upid.Name() uploadID := upid.Name()
uploads = append(uploads, types.MultipartUpload{ uploads = append(uploads, s3response.Upload{
Key: &objectName, Key: objectName,
UploadId: &uploadID, UploadID: uploadID,
Initiated: fi.ModTime().Format(backend.RFC3339TimeFormat),
}) })
if len(uploads) == int(mpu.MaxUploads) { if len(uploads) == int(mpu.MaxUploads) {
return &s3.ListMultipartUploadsOutput{ return s3response.ListMultipartUploadsResponse{
Bucket: &bucket, Bucket: bucket,
Delimiter: mpu.Delimiter, Delimiter: delimiter,
IsTruncated: i != len(objs) || j != len(upids), IsTruncated: i != len(objs) || j != len(upids),
KeyMarker: &keyMarker, KeyMarker: keyMarker,
MaxUploads: mpu.MaxUploads, MaxUploads: int(mpu.MaxUploads),
NextKeyMarker: &objectName, NextKeyMarker: objectName,
NextUploadIdMarker: &uploadID, NextUploadIDMarker: uploadID,
Prefix: mpu.Prefix, Prefix: prefix,
UploadIdMarker: mpu.UploadIdMarker, UploadIDMarker: uploadIDMarker,
Uploads: uploads, Uploads: uploads,
}, nil }, nil
} }
} }
} }
return &s3.ListMultipartUploadsOutput{ return s3response.ListMultipartUploadsResponse{
Bucket: &bucket, Bucket: bucket,
Delimiter: mpu.Delimiter, Delimiter: delimiter,
KeyMarker: &keyMarker, KeyMarker: keyMarker,
MaxUploads: mpu.MaxUploads, MaxUploads: int(mpu.MaxUploads),
Prefix: mpu.Prefix, Prefix: prefix,
UploadIdMarker: mpu.UploadIdMarker, UploadIDMarker: uploadIDMarker,
Uploads: uploads, Uploads: uploads,
}, nil }, nil
} }
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) { func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
var lpr s3response.ListPartsResponse
_, err := os.Stat(bucket) _, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket) return lpr, s3err.GetAPIError(s3err.ErrNoSuchBucket)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("stat bucket: %w", err) return lpr, fmt.Errorf("stat bucket: %w", err)
} }
sum, err := p.checkUploadIDExists(bucket, object, uploadID) sum, err := p.checkUploadIDExists(bucket, object, uploadID)
if err != nil { if err != nil {
return nil, err return lpr, err
} }
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum)) objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
ents, err := os.ReadDir(filepath.Join(objdir, uploadID)) ents, err := os.ReadDir(filepath.Join(objdir, uploadID))
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
return nil, s3err.GetAPIError(s3err.ErrNoSuchUpload) return lpr, s3err.GetAPIError(s3err.ErrNoSuchUpload)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("readdir upload: %w", err) return lpr, fmt.Errorf("readdir upload: %w", err)
} }
var parts []types.Part var parts []s3response.Part
for _, e := range ents { for _, e := range ents {
pn, _ := strconv.Atoi(e.Name()) pn, _ := strconv.Atoi(e.Name())
if pn <= partNumberMarker { if pn <= partNumberMarker {
@@ -614,10 +645,10 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
continue continue
} }
parts = append(parts, types.Part{ parts = append(parts, s3response.Part{
PartNumber: int32(pn), PartNumber: pn,
ETag: &etag, ETag: etag,
LastModified: backend.GetTimePtr(fi.ModTime()), LastModified: fi.ModTime().Format(backend.RFC3339TimeFormat),
Size: fi.Size(), Size: fi.Size(),
}) })
} }
@@ -626,12 +657,12 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
func(i int, j int) bool { return parts[i].PartNumber < parts[j].PartNumber }) func(i int, j int) bool { return parts[i].PartNumber < parts[j].PartNumber })
oldLen := len(parts) oldLen := len(parts)
if len(parts) > maxParts { if maxParts > 0 && len(parts) > maxParts {
parts = parts[:maxParts] parts = parts[:maxParts]
} }
newLen := len(parts) newLen := len(parts)
nextpart := int32(0) nextpart := 0
if len(parts) != 0 { if len(parts) != 0 {
nextpart = parts[len(parts)-1].PartNumber nextpart = parts[len(parts)-1].PartNumber
} }
@@ -640,15 +671,15 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
upiddir := filepath.Join(objdir, uploadID) upiddir := filepath.Join(objdir, uploadID)
loadUserMetaData(upiddir, userMetaData) loadUserMetaData(upiddir, userMetaData)
return &s3.ListPartsOutput{ return s3response.ListPartsResponse{
Bucket: &bucket, Bucket: bucket,
IsTruncated: oldLen != newLen, IsTruncated: oldLen != newLen,
Key: &object, Key: object,
MaxParts: int32(maxParts), MaxParts: maxParts,
NextPartNumberMarker: backend.GetStringPtr(fmt.Sprintf("%v", nextpart)), NextPartNumberMarker: nextpart,
PartNumberMarker: backend.GetStringPtr(fmt.Sprintf("%v", partNumberMarker)), PartNumberMarker: partNumberMarker,
Parts: parts, Parts: parts,
UploadId: &uploadID, UploadID: uploadID,
}, nil }, nil
} }
@@ -689,7 +720,7 @@ func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length
} }
dataSum := hash.Sum(nil) dataSum := hash.Sum(nil)
etag := hex.EncodeToString(dataSum[:]) etag := hex.EncodeToString(dataSum)
xattr.Set(partPath, "user.etag", []byte(etag)) xattr.Set(partPath, "user.etag", []byte(etag))
return etag, nil return etag, nil
@@ -741,7 +772,7 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
if dir != "" { if dir != "" {
err = mkdirAll(dir, os.FileMode(0755), *po.Bucket, *po.Key) err = mkdirAll(dir, os.FileMode(0755), *po.Bucket, *po.Key)
if err != nil { if err != nil {
return "", fmt.Errorf("make object parent directories: %w", err) return "", s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)
} }
} }

View File

@@ -76,7 +76,7 @@ func (tmp *tmpfile) link() error {
func (tmp *tmpfile) Write(b []byte) (int, error) { func (tmp *tmpfile) Write(b []byte) (int, error) {
if int64(len(b)) > tmp.size { if int64(len(b)) > tmp.size {
return 0, fmt.Errorf("write exceeds content length") return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
} }
n, err := tmp.f.Write(b) n, err := tmp.f.Write(b)

View File

@@ -150,7 +150,7 @@ func (tmp *tmpfile) fallbackLink() error {
func (tmp *tmpfile) Write(b []byte) (int, error) { func (tmp *tmpfile) Write(b []byte) (int, error) {
if int64(len(b)) > tmp.size { if int64(len(b)) > tmp.size {
return 0, fmt.Errorf("write exceeds content length") return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
} }
n, err := tmp.f.Write(b) n, err := tmp.f.Write(b)

View File

@@ -136,6 +136,7 @@ func runGateway(be backend.Backend) error {
app := fiber.New(fiber.Config{ app := fiber.New(fiber.Config{
AppName: "versitygw", AppName: "versitygw",
ServerHeader: "VERSITYGW", ServerHeader: "VERSITYGW",
BodyLimit: 5 * 1024 * 1024 * 1024,
}) })
var opts []s3api.Option var opts []s3api.Option

View File

@@ -7,6 +7,7 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/versity/versitygw/backend" "github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3response"
"io" "io"
"sync" "sync"
) )
@@ -69,10 +70,10 @@ var _ backend.Backend = &BackendMock{}
// ListBucketsFunc: func() (*s3.ListBucketsOutput, error) { // ListBucketsFunc: func() (*s3.ListBucketsOutput, error) {
// panic("mock out the ListBuckets method") // panic("mock out the ListBuckets method")
// }, // },
// ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { // ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
// panic("mock out the ListMultipartUploads method") // panic("mock out the ListMultipartUploads method")
// }, // },
// ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) { // ListObjectPartsFunc: func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
// panic("mock out the ListObjectParts method") // panic("mock out the ListObjectParts method")
// }, // },
// ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) { // ListObjectsFunc: func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
@@ -173,10 +174,10 @@ type BackendMock struct {
ListBucketsFunc func() (*s3.ListBucketsOutput, error) ListBucketsFunc func() (*s3.ListBucketsOutput, error)
// ListMultipartUploadsFunc mocks the ListMultipartUploads method. // ListMultipartUploadsFunc mocks the ListMultipartUploads method.
ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsFunc func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
// ListObjectPartsFunc mocks the ListObjectParts method. // ListObjectPartsFunc mocks the ListObjectParts method.
ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) ListObjectPartsFunc func(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
// ListObjectsFunc mocks the ListObjects method. // ListObjectsFunc mocks the ListObjects method.
ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error) ListObjectsFunc func(bucket string, prefix string, marker string, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
@@ -1095,7 +1096,7 @@ func (mock *BackendMock) ListBucketsCalls() []struct {
} }
// ListMultipartUploads calls ListMultipartUploadsFunc. // ListMultipartUploads calls ListMultipartUploadsFunc.
func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { func (mock *BackendMock) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
if mock.ListMultipartUploadsFunc == nil { if mock.ListMultipartUploadsFunc == nil {
panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called") panic("BackendMock.ListMultipartUploadsFunc: method is nil but Backend.ListMultipartUploads was just called")
} }
@@ -1127,7 +1128,7 @@ func (mock *BackendMock) ListMultipartUploadsCalls() []struct {
} }
// ListObjectParts calls ListObjectPartsFunc. // ListObjectParts calls ListObjectPartsFunc.
func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) { func (mock *BackendMock) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
if mock.ListObjectPartsFunc == nil { if mock.ListObjectPartsFunc == nil {
panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called") panic("BackendMock.ListObjectPartsFunc: method is nil but Backend.ListObjectParts was just called")
} }

View File

@@ -24,7 +24,6 @@ import (
"os" "os"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
@@ -53,25 +52,21 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
key := ctx.Params("key") key := ctx.Params("key")
keyEnd := ctx.Params("*1") keyEnd := ctx.Params("*1")
uploadId := ctx.Query("uploadId") uploadId := ctx.Query("uploadId")
maxPartsStr := ctx.Query("max-parts") maxParts := ctx.QueryInt("max-parts", 0)
partNumberMarkerStr := ctx.Query("part-number-marker") partNumberMarker := ctx.QueryInt("part-number-marker", 0)
acceptRange := ctx.Get("Range") acceptRange := ctx.Get("Range")
if keyEnd != "" { if keyEnd != "" {
key = strings.Join([]string{key, keyEnd}, "/") key = strings.Join([]string{key, keyEnd}, "/")
} }
if uploadId != "" { if uploadId != "" {
maxParts, err := strconv.Atoi(maxPartsStr) if maxParts < 0 || (maxParts == 0 && ctx.Query("max-parts") != "") {
if err != nil && maxPartsStr != "" { return ErrorResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidMaxParts))
return errors.New("wrong api call")
} }
if partNumberMarker < 0 || (partNumberMarker == 0 && ctx.Query("part-number-marker") != "") {
partNumberMarker, err := strconv.Atoi(partNumberMarkerStr) return ErrorResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker))
if err != nil && partNumberMarkerStr != "" {
return errors.New("wrong api call")
} }
res, err := c.be.ListObjectParts(bucket, key, uploadId, partNumberMarker, maxParts)
res, err := c.be.ListObjectParts(bucket, "", uploadId, partNumberMarker, maxParts)
return Responce(ctx, res, err) return Responce(ctx, res, err)
} }
@@ -186,40 +181,24 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
keyStart = keyStart + "/" keyStart = keyStart + "/"
} }
if partNumberStr != "" { var contentLength int64
copySrcModifSinceDate, err := time.Parse(time.RFC3339, copySrcModifSince) if contentLengthStr != "" {
if err != nil && copySrcModifSince != "" { var err error
return errors.New("wrong api call") contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
}
copySrcUnmodifSinceDate, err := time.Parse(time.RFC3339, copySrcUnmodifSince)
if err != nil && copySrcUnmodifSince != "" {
return errors.New("wrong api call")
}
partNumber, err := strconv.ParseInt(partNumberStr, 10, 64)
if err != nil { if err != nil {
return errors.New("wrong api call") return ErrorResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest))
} }
res, err := c.be.UploadPartCopy(&s3.UploadPartCopyInput{
Bucket: &bucket,
Key: &keyStart,
PartNumber: int32(partNumber),
UploadId: &uploadId,
CopySource: &copySource,
CopySourceIfMatch: &copySrcIfMatch,
CopySourceIfNoneMatch: &copySrcIfNoneMatch,
CopySourceIfModifiedSince: &copySrcModifSinceDate,
CopySourceIfUnmodifiedSince: &copySrcUnmodifSinceDate,
})
return Responce(ctx, res, err)
} }
if uploadId != "" { if uploadId != "" && partNumberStr != "" {
partNumber := ctx.QueryInt("partNumber", -1)
if partNumber < 1 {
return ErrorResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPart))
}
body := io.ReadSeeker(bytes.NewReader([]byte(ctx.Body()))) body := io.ReadSeeker(bytes.NewReader([]byte(ctx.Body())))
res, err := c.be.UploadPart(bucket, keyStart, uploadId, body) res, err := c.be.PutObjectPart(bucket, keyStart, uploadId,
partNumber, contentLength, body)
return Responce(ctx, res, err) return Responce(ctx, res, err)
} }
@@ -242,6 +221,8 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
} }
if copySource != "" { if copySource != "" {
_, _, _, _ = copySrcIfMatch, copySrcIfNoneMatch,
copySrcModifSince, copySrcUnmodifSince
copySourceSplit := strings.Split(copySource, "/") copySourceSplit := strings.Split(copySource, "/")
srcBucket, srcObject := copySourceSplit[0], copySourceSplit[1:] srcBucket, srcObject := copySourceSplit[0], copySourceSplit[1:]
@@ -249,11 +230,6 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
return Responce(ctx, res, err) return Responce(ctx, res, err)
} }
contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
return errors.New("wrong api call")
}
metadata := utils.GetUserMetaData(&ctx.Request().Header) metadata := utils.GetUserMetaData(&ctx.Request().Header)
res, err := c.be.PutObject(&s3.PutObjectInput{ res, err := c.be.PutObject(&s3.PutObjectInput{
@@ -381,20 +357,22 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
} }
if uploadId != "" { if uploadId != "" {
var parts []types.Part data := struct {
Parts []types.Part `xml:"Part"`
}{}
if err := xml.Unmarshal(ctx.Body(), &parts); err != nil { if err := xml.Unmarshal(ctx.Body(), &data); err != nil {
return errors.New("wrong api call") return errors.New("wrong api call")
} }
res, err := c.be.CompleteMultipartUpload(bucket, "", uploadId, parts) res, err := c.be.CompleteMultipartUpload(bucket, key, uploadId, data.Parts)
return Responce(ctx, res, err) return Responce(ctx, res, err)
} }
res, err := c.be.CreateMultipartUpload(&s3.CreateMultipartUploadInput{Bucket: &bucket, Key: &key}) res, err := c.be.CreateMultipartUpload(&s3.CreateMultipartUploadInput{Bucket: &bucket, Key: &key})
return Responce(ctx, res, err) return Responce(ctx, res, err)
} }
func Responce[R comparable](ctx *fiber.Ctx, resp R, err error) error { func Responce[R any](ctx *fiber.Ctx, resp R, err error) error {
if err != nil { if err != nil {
serr, ok := err.(s3err.APIError) serr, ok := err.(s3err.APIError)
if ok { if ok {
@@ -414,6 +392,10 @@ func Responce[R comparable](ctx *fiber.Ctx, resp R, err error) error {
return err return err
} }
if len(b) > 0 {
ctx.Response().Header.SetContentType(fiber.MIMEApplicationXML)
}
return ctx.Send(b) return ctx.Send(b)
} }

View File

@@ -29,6 +29,7 @@ import (
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
"github.com/versity/versitygw/backend" "github.com/versity/versitygw/backend"
"github.com/versity/versitygw/s3err" "github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
) )
func TestNew(t *testing.T) { func TestNew(t *testing.T) {
@@ -128,8 +129,8 @@ func TestS3ApiController_GetActions(t *testing.T) {
app := fiber.New() app := fiber.New()
s3ApiController := S3ApiController{be: &BackendMock{ s3ApiController := S3ApiController{be: &BackendMock{
ListObjectPartsFunc: func(bucket, object, uploadID string, partNumberMarker int, maxParts int) (*s3.ListPartsOutput, error) { ListObjectPartsFunc: func(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
return &s3.ListPartsOutput{}, nil return s3response.ListPartsResponse{}, nil
}, },
GetObjectAclFunc: func(bucket, object string) (*s3.GetObjectAclOutput, error) { GetObjectAclFunc: func(bucket, object string) (*s3.GetObjectAclOutput, error) {
return &s3.GetObjectAclOutput{}, nil return &s3.GetObjectAclOutput{}, nil
@@ -169,16 +170,16 @@ func TestS3ApiController_GetActions(t *testing.T) {
req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=InvalidMaxParts", nil), req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=InvalidMaxParts", nil),
}, },
wantErr: false, wantErr: false,
statusCode: 500, statusCode: 400,
}, },
{ {
name: "Get-actions-invalid-part-number", name: "Get-actions-invalid-part-number-marker",
app: app, app: app,
args: args{ args: args{
req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=200&part-number-marker=InvalidPartNumber", nil), req: httptest.NewRequest(http.MethodGet, "/my-bucket/key?uploadId=hello&max-parts=200&part-number-marker=InvalidPartNumber", nil),
}, },
wantErr: false, wantErr: false,
statusCode: 500, statusCode: 400,
}, },
{ {
name: "Get-actions-list-object-parts-success", name: "Get-actions-list-object-parts-success",
@@ -233,8 +234,8 @@ func TestS3ApiController_ListActions(t *testing.T) {
GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) { GetBucketAclFunc: func(bucket string) (*s3.GetBucketAclOutput, error) {
return &s3.GetBucketAclOutput{}, nil return &s3.GetBucketAclOutput{}, nil
}, },
ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
return &s3.ListMultipartUploadsOutput{}, nil return s3response.ListMultipartUploadsResponse{}, nil
}, },
ListObjectsV2Func: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) { ListObjectsV2Func: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
return &s3.ListObjectsV2Output{}, nil return &s3.ListObjectsV2Output{}, nil
@@ -441,13 +442,13 @@ func TestS3ApiController_PutActions(t *testing.T) {
statusCode int statusCode int
}{ }{
{ {
name: "Upload-copy-part-error-case", name: "Upload-put-part-error-case",
app: app, app: app,
args: args{ args: args{
req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?partNumber=invalid", nil), req: httptest.NewRequest(http.MethodPut, "/my-bucket/my-key?uploadId=abc&partNumber=invalid", nil),
}, },
wantErr: false, wantErr: false,
statusCode: 500, statusCode: 400,
}, },
{ {
name: "Upload-copy-part-success", name: "Upload-copy-part-success",
@@ -517,11 +518,13 @@ func TestS3ApiController_PutActions(t *testing.T) {
resp, err := tt.app.Test(tt.args.req) resp, err := tt.app.Test(tt.args.req)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("S3ApiController.GetActions() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("S3ApiController.GetActions() %v error = %v, wantErr %v",
tt.name, err, tt.wantErr)
} }
if resp.StatusCode != tt.statusCode { if resp.StatusCode != tt.statusCode {
t.Errorf("S3ApiController.GetActions() statusCode = %v, wantStatusCode = %v", resp.StatusCode, tt.statusCode) t.Errorf("S3ApiController.GetActions() %v statusCode = %v, wantStatusCode = %v",
tt.name, resp.StatusCode, tt.statusCode)
} }
} }
} }

96
s3response/s3response.go Normal file
View File

@@ -0,0 +1,96 @@
// Copyright 2023 Versity Software
// This file is licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package s3response
import (
"encoding/xml"
)
// Part describes part metadata.
type Part struct {
PartNumber int
LastModified string
ETag string
Size int64
}
// ListPartsResponse - s3 api list parts response.
type ListPartsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
Bucket string
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
// The class of storage used to store the object.
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
// List of parts.
Parts []Part `xml:"Part"`
}
// ListMultipartUploadsResponse - s3 api list multipart uploads response.
type ListMultipartUploadsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
Bucket string
KeyMarker string
UploadIDMarker string `xml:"UploadIdMarker"`
NextKeyMarker string
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
Delimiter string
Prefix string
EncodingType string `xml:"EncodingType,omitempty"`
MaxUploads int
IsTruncated bool
// List of pending uploads.
Uploads []Upload `xml:"Upload"`
// Delimed common prefixes.
CommonPrefixes []CommonPrefix
}
// Upload desribes in progress multipart upload
type Upload struct {
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass string
Initiated string
}
// CommonPrefix ListObjectsResponse common prefixes (directory abstraction)
type CommonPrefix struct {
Prefix string
}
// Initiator same fields as Owner
type Initiator Owner
// Owner bucket ownership
type Owner struct {
ID string
DisplayName string
}