mirror of
https://github.com/versity/versitygw.git
synced 2026-01-03 10:35:15 +00:00
fix: standardize Backend interface args for s3 types
This commit is contained in:
@@ -19,7 +19,6 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/versity/versitygw/s3err"
|
||||
"github.com/versity/versitygw/s3response"
|
||||
)
|
||||
@@ -29,33 +28,33 @@ type Backend interface {
|
||||
fmt.Stringer
|
||||
Shutdown()
|
||||
|
||||
ListBuckets() (s3response.ListAllMyBucketsResult, error)
|
||||
HeadBucket(bucket string) (*s3.HeadBucketOutput, error)
|
||||
GetBucketAcl(bucket string) ([]byte, error)
|
||||
PutBucket(bucket, owner string) error
|
||||
ListBuckets(owner string, isRoot bool) (s3response.ListAllMyBucketsResult, error)
|
||||
HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
GetBucketAcl(*s3.GetBucketAclInput) ([]byte, error)
|
||||
CreateBucket(*s3.CreateBucketInput) error
|
||||
PutBucketAcl(bucket string, data []byte) error
|
||||
DeleteBucket(bucket string) error
|
||||
DeleteBucket(*s3.DeleteBucketInput) error
|
||||
|
||||
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error)
|
||||
CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
AbortMultipartUpload(*s3.AbortMultipartUploadInput) error
|
||||
ListMultipartUploads(*s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error)
|
||||
PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error)
|
||||
ListParts(*s3.ListPartsInput) (s3response.ListPartsResponse, error)
|
||||
UploadPart(*s3.UploadPartInput) (etag string, err error)
|
||||
UploadPartCopy(*s3.UploadPartCopyInput) (s3response.CopyObjectResult, error)
|
||||
|
||||
PutObject(*s3.PutObjectInput) (string, error)
|
||||
HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
|
||||
GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error)
|
||||
CopyObject(srcBucket, srcObject, dstBucket, dstObject string) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error)
|
||||
DeleteObject(bucket, object string) error
|
||||
DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error
|
||||
HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
GetObject(*s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error)
|
||||
GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAttributes(*s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
|
||||
CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
|
||||
DeleteObject(*s3.DeleteObjectInput) error
|
||||
DeleteObjects(*s3.DeleteObjectsInput) error
|
||||
PutObjectAcl(*s3.PutObjectAclInput) error
|
||||
RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error
|
||||
RestoreObject(*s3.RestoreObjectInput) error
|
||||
|
||||
GetTags(bucket, object string) (map[string]string, error)
|
||||
SetTags(bucket, object string, tags map[string]string) error
|
||||
@@ -73,7 +72,7 @@ func (BackendUnsupported) Shutdown() {}
|
||||
func (BackendUnsupported) String() string {
|
||||
return "Unsupported"
|
||||
}
|
||||
func (BackendUnsupported) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
|
||||
func (BackendUnsupported) ListBuckets(string, bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucketAcl(bucket string, data []byte) error {
|
||||
@@ -82,72 +81,72 @@ func (BackendUnsupported) PutBucketAcl(bucket string, data []byte) error {
|
||||
func (BackendUnsupported) PutObjectAcl(*s3.PutObjectAclInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error {
|
||||
func (BackendUnsupported) RestoreObject(*s3.RestoreObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) UploadPartCopy(*s3.UploadPartCopyInput) (s3response.CopyObjectResult, error) {
|
||||
return s3response.CopyObjectResult{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetBucketAcl(bucket string) ([]byte, error) {
|
||||
func (BackendUnsupported) GetBucketAcl(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
func (BackendUnsupported) HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutBucket(bucket, owner string) error {
|
||||
func (BackendUnsupported) CreateBucket(*s3.CreateBucketInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteBucket(bucket string) error {
|
||||
func (BackendUnsupported) DeleteBucket(*s3.DeleteBucketInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) CreateMultipartUpload(input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
func (BackendUnsupported) CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (BackendUnsupported) CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) error {
|
||||
func (BackendUnsupported) AbortMultipartUpload(*s3.AbortMultipartUploadInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListMultipartUploads(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
func (BackendUnsupported) ListMultipartUploads(*s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
return s3response.ListMultipartUploadsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
func (BackendUnsupported) ListParts(*s3.ListPartsInput) (s3response.ListPartsResponse, error) {
|
||||
return s3response.ListPartsResponse{}, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (etag string, err error) {
|
||||
func (BackendUnsupported) UploadPart(*s3.UploadPartInput) (etag string, err error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
func (BackendUnsupported) PutObject(*s3.PutObjectInput) (string, error) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObject(bucket, object string) error {
|
||||
func (BackendUnsupported) DeleteObject(*s3.DeleteObjectInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
|
||||
func (BackendUnsupported) DeleteObjects(*s3.DeleteObjectsInput) error {
|
||||
return s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (BackendUnsupported) GetObject(*s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
func (BackendUnsupported) HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAcl(bucket, object string) (*s3.GetObjectAclOutput, error) {
|
||||
func (BackendUnsupported) GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) GetObjectAttributes(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error) {
|
||||
func (BackendUnsupported) GetObjectAttributes(*s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) CopyObject(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
func (BackendUnsupported) CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
func (BackendUnsupported) ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
func (BackendUnsupported) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
func (BackendUnsupported) ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
}
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func ParseRange(file fs.FileInfo, acceptRange string) (int64, int64, error) {
|
||||
return startOffset, endOffset - startOffset + 1, nil
|
||||
}
|
||||
|
||||
func GetMultipartMD5(parts []types.Part) string {
|
||||
func GetMultipartMD5(parts []types.CompletedPart) string {
|
||||
var partsEtagBytes []byte
|
||||
for _, part := range parts {
|
||||
partsEtagBytes = append(partsEtagBytes, getEtagBytes(*part.ETag)...)
|
||||
|
||||
@@ -96,7 +96,7 @@ func (p *Posix) String() string {
|
||||
return "Posix Gateway"
|
||||
}
|
||||
|
||||
func (p *Posix) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
|
||||
func (p *Posix) ListBuckets(owner string, isRoot bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
entries, err := os.ReadDir(".")
|
||||
if err != nil {
|
||||
return s3response.ListAllMyBucketsResult{},
|
||||
@@ -131,8 +131,8 @@ func (p *Posix) ListBuckets() (s3response.ListAllMyBucketsResult, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
_, err := os.Lstat(bucket)
|
||||
func (p *Posix) HeadBucket(input *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
_, err := os.Lstat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
@@ -143,7 +143,10 @@ func (p *Posix) HeadBucket(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
return &s3.HeadBucketOutput{}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutBucket(bucket string, owner string) error {
|
||||
func (p *Posix) CreateBucket(input *s3.CreateBucketInput) error {
|
||||
bucket := *input.Bucket
|
||||
owner := string(input.ObjectOwnership)
|
||||
|
||||
err := os.Mkdir(bucket, 0777)
|
||||
if err != nil && os.IsExist(err) {
|
||||
return s3err.GetAPIError(s3err.ErrBucketAlreadyExists)
|
||||
@@ -165,8 +168,8 @@ func (p *Posix) PutBucket(bucket string, owner string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteBucket(bucket string) error {
|
||||
names, err := os.ReadDir(bucket)
|
||||
func (p *Posix) DeleteBucket(input *s3.DeleteBucketInput) error {
|
||||
names, err := os.ReadDir(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
@@ -177,13 +180,13 @@ func (p *Posix) DeleteBucket(bucket string) error {
|
||||
if len(names) == 1 && names[0].Name() == metaTmpDir {
|
||||
// if .sgwtmp is only item in directory
|
||||
// then clean this up before trying to remove the bucket
|
||||
err = os.RemoveAll(filepath.Join(bucket, metaTmpDir))
|
||||
err = os.RemoveAll(filepath.Join(*input.Bucket, metaTmpDir))
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("remove temp dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = os.Remove(bucket)
|
||||
err = os.Remove(*input.Bucket)
|
||||
if err != nil && err.(*os.PathError).Err == syscall.ENOTEMPTY {
|
||||
return s3err.GetAPIError(s3err.ErrBucketNotEmpty)
|
||||
}
|
||||
@@ -245,7 +248,12 @@ func (p *Posix) CreateMultipartUpload(mpu *s3.CreateMultipartUploadInput) (*s3.C
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (p *Posix) CompleteMultipartUpload(input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
parts := input.MultipartUpload.Parts
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -611,8 +619,24 @@ func (p *Posix) ListMultipartUploads(mpu *s3.ListMultipartUploadsInput) (s3respo
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
func (p *Posix) ListParts(input *s3.ListPartsInput) (s3response.ListPartsResponse, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
stringMarker := *input.PartNumberMarker
|
||||
maxParts := int(input.MaxParts)
|
||||
|
||||
var lpr s3response.ListPartsResponse
|
||||
|
||||
var partNumberMarker int
|
||||
if stringMarker != "" {
|
||||
var err error
|
||||
partNumberMarker, err = strconv.Atoi(stringMarker)
|
||||
if err != nil {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return lpr, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -693,7 +717,14 @@ func (p *Posix) ListObjectParts(bucket, object, uploadID string, partNumberMarke
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) PutObjectPart(bucket, object, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
func (p *Posix) UploadPart(input *s3.UploadPartInput) (string, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
part := input.PartNumber
|
||||
length := input.ContentLength
|
||||
r := input.Body
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -912,7 +943,10 @@ func (p *Posix) PutObject(po *s3.PutObjectInput) (string, error) {
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteObject(bucket, object string) error {
|
||||
func (p *Posix) DeleteObject(input *s3.DeleteObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -966,10 +1000,13 @@ func (p *Posix) removeParents(bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) error {
|
||||
func (p *Posix) DeleteObjects(input *s3.DeleteObjectsInput) error {
|
||||
// delete object already checks bucket
|
||||
for _, obj := range objects.Delete.Objects {
|
||||
err := p.DeleteObject(bucket, *obj.Key)
|
||||
for _, obj := range input.Delete.Objects {
|
||||
err := p.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: input.Bucket,
|
||||
Key: obj.Key,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -978,7 +1015,11 @@ func (p *Posix) DeleteObjects(bucket string, objects *s3.DeleteObjectsInput) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (p *Posix) GetObject(input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
acceptRange := *input.Range
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1051,7 +1092,10 @@ func (p *Posix) GetObject(bucket, object, acceptRange string, writer io.Writer)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
func (p *Posix) HeadObject(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1088,7 +1132,14 @@ func (p *Posix) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) CopyObject(srcBucket, srcObject, dstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
func (p *Posix) CopyObject(input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
srcBucket, srcObject, ok := strings.Cut(*input.CopySource, "/")
|
||||
if !ok {
|
||||
return nil, s3err.GetAPIError(s3err.ErrInvalidCopySource)
|
||||
}
|
||||
dstBucket := *input.Bucket
|
||||
dstObject := *input.Key
|
||||
|
||||
_, err := os.Stat(srcBucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1138,7 +1189,13 @@ func (p *Posix) CopyObject(srcBucket, srcObject, dstBucket, dstObject string) (*
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
func (p *Posix) ListObjects(input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.Marker
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1160,7 +1217,7 @@ func (p *Posix) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (
|
||||
Delimiter: &delim,
|
||||
IsTruncated: results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: int32(maxkeys),
|
||||
MaxKeys: maxkeys,
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
@@ -1228,7 +1285,13 @@ func fileToObj(bucket string) backend.GetObjFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
func (p *Posix) ListObjectsV2(input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.ContinuationToken
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -1238,7 +1301,7 @@ func (p *Posix) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int)
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -1273,8 +1336,8 @@ func (p *Posix) PutBucketAcl(bucket string, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
|
||||
_, err := os.Stat(bucket)
|
||||
func (p *Posix) GetBucketAcl(input *s3.GetBucketAclInput) ([]byte, error) {
|
||||
_, err := os.Stat(*input.Bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
@@ -1282,7 +1345,7 @@ func (p *Posix) GetBucketAcl(bucket string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("stat bucket: %w", err)
|
||||
}
|
||||
|
||||
b, err := xattr.Get(bucket, aclkey)
|
||||
b, err := xattr.Get(*input.Bucket, aclkey)
|
||||
if isNoAttr(err) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
@@ -114,7 +114,12 @@ func (*ScoutFS) String() string {
|
||||
// CompleteMultipartUpload scoutfs complete upload uses scoutfs move blocks
|
||||
// ioctl to not have to read and copy the part data to the final object. This
|
||||
// saves a read and write cycle for all mutlipart uploads.
|
||||
func (s *ScoutFS) CompleteMultipartUpload(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
func (s *ScoutFS) CompleteMultipartUpload(input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
uploadID := *input.UploadId
|
||||
parts := input.MultipartUpload.Parts
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -347,7 +352,10 @@ func mkdirAll(path string, perm os.FileMode, bucket, object string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
func (s *ScoutFS) HeadObject(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -417,7 +425,11 @@ func (s *ScoutFS) HeadObject(bucket, object string) (*s3.HeadObjectOutput, error
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) GetObject(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
func (s *ScoutFS) GetObject(input *s3.GetObjectInput, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
acceptRange := *input.Range
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -527,7 +539,13 @@ func (s *ScoutFS) getXattrTags(bucket, object string) (map[string]string, error)
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
func (s *ScoutFS) ListObjects(input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.Marker
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -549,14 +567,20 @@ func (s *ScoutFS) ListObjects(bucket, prefix, marker, delim string, maxkeys int)
|
||||
Delimiter: &delim,
|
||||
IsTruncated: results.Truncated,
|
||||
Marker: &marker,
|
||||
MaxKeys: int32(maxkeys),
|
||||
MaxKeys: maxkeys,
|
||||
Name: &bucket,
|
||||
NextMarker: &results.NextMarker,
|
||||
Prefix: &prefix,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
func (s *ScoutFS) ListObjectsV2(input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
bucket := *input.Bucket
|
||||
prefix := *input.Prefix
|
||||
marker := *input.ContinuationToken
|
||||
delim := *input.Delimiter
|
||||
maxkeys := input.MaxKeys
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
@@ -566,7 +590,7 @@ func (s *ScoutFS) ListObjectsV2(bucket, prefix, marker, delim string, maxkeys in
|
||||
}
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, maxkeys,
|
||||
results, err := backend.Walk(fileSystem, prefix, delim, marker, int32(maxkeys),
|
||||
s.fileToObj(bucket), []string{metaTmpDir})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
@@ -663,7 +687,10 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
||||
|
||||
// RestoreObject will set stage request on file if offline and do nothing if
|
||||
// file is online
|
||||
func (s *ScoutFS) RestoreObject(bucket, object string, restoreRequest *s3.RestoreObjectInput) error {
|
||||
func (s *ScoutFS) RestoreObject(input *s3.RestoreObjectInput) error {
|
||||
bucket := *input.Bucket
|
||||
object := *input.Key
|
||||
|
||||
_, err := os.Stat(bucket)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
|
||||
@@ -38,7 +38,7 @@ var ErrSkipObj = errors.New("skip this object")
|
||||
|
||||
// Walk walks the supplied fs.FS and returns results compatible with list
|
||||
// objects responses
|
||||
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
|
||||
func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int32, getObj GetObjFunc, skipdirs []string) (WalkResults, error) {
|
||||
cpmap := make(map[string]struct{})
|
||||
var objects []types.Object
|
||||
|
||||
@@ -129,7 +129,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
|
||||
if max > 0 && (len(objects)+len(cpmap)) == max {
|
||||
if max > 0 && (len(objects)+len(cpmap)) == int(max) {
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
return fmt.Errorf("file to object %q: %w", path, err)
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
if (len(objects) + len(cpmap)) == max {
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
pastMax = true
|
||||
}
|
||||
return nil
|
||||
@@ -178,7 +178,7 @@ func Walk(fileSystem fs.FS, prefix, delimiter, marker string, max int, getObj Ge
|
||||
// These are abstractly a "directory", so need to include the
|
||||
// delimiter at the end.
|
||||
cpmap[prefix+before+delimiter] = struct{}{}
|
||||
if (len(objects) + len(cpmap)) == max {
|
||||
if (len(objects) + len(cpmap)) == int(max) {
|
||||
pastMax = true
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
@@ -44,6 +45,10 @@ type S3ApiController struct {
|
||||
evSender s3event.S3EventSender
|
||||
}
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
)
|
||||
|
||||
func New(be backend.Backend, iam auth.IAMService, logger s3log.AuditLogger, evs s3event.S3EventSender) S3ApiController {
|
||||
return S3ApiController{be: be, iam: iam, logger: logger, evSender: evs}
|
||||
}
|
||||
@@ -53,7 +58,7 @@ func (c S3ApiController) ListBuckets(ctx *fiber.Ctx) error {
|
||||
if err := auth.IsAdmin(access, isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListBucket"})
|
||||
}
|
||||
res, err := c.be.ListBuckets()
|
||||
res, err := c.be.ListBuckets(access, isRoot)
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListBucket"})
|
||||
}
|
||||
|
||||
@@ -63,7 +68,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
keyEnd := ctx.Params("*1")
|
||||
uploadId := ctx.Query("uploadId")
|
||||
maxParts := ctx.QueryInt("max-parts", 0)
|
||||
partNumberMarker := ctx.QueryInt("part-number-marker", 0)
|
||||
partNumberMarker := ctx.Query("part-number-marker")
|
||||
acceptRange := ctx.Get("Range")
|
||||
access := ctx.Locals("access").(string)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
@@ -71,7 +76,7 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger})
|
||||
}
|
||||
@@ -101,25 +106,37 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if uploadId != "" {
|
||||
if maxParts < 0 || (maxParts == 0 && ctx.Query("max-parts") != "") {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidMaxParts), &MetaOpts{Logger: c.logger, Action: "ListObjectParts", BucketOwner: parsedAcl.Owner})
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidMaxParts), &MetaOpts{Logger: c.logger, Action: "ListParts", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
if partNumberMarker < 0 || (partNumberMarker == 0 && ctx.Query("part-number-marker") != "") {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker), &MetaOpts{Logger: c.logger, Action: "ListObjectParts", BucketOwner: parsedAcl.Owner})
|
||||
if partNumberMarker != "" {
|
||||
n, err := strconv.Atoi(partNumberMarker)
|
||||
if err != nil || n < 0 {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPartNumberMarker), &MetaOpts{Logger: c.logger, Action: "ListParts", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListObjectParts", BucketOwner: parsedAcl.Owner})
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListParts", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
res, err := c.be.ListObjectParts(bucket, key, uploadId, partNumberMarker, maxParts)
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListObjectParts", BucketOwner: parsedAcl.Owner})
|
||||
res, err := c.be.ListParts(&s3.ListPartsInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
UploadId: &uploadId,
|
||||
PartNumberMarker: &partNumberMarker,
|
||||
MaxParts: int32(maxParts),
|
||||
})
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListParts", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ_ACP", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetObjectAcl", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
res, err := c.be.GetObjectAcl(bucket, key)
|
||||
res, err := c.be.GetObjectAcl(&s3.GetObjectAclInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
})
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "GetObjectAcl", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -127,7 +144,15 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "GetObjectAttributes", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
res, err := c.be.GetObjectAttributes(bucket, key, strings.Split(attrs, ","))
|
||||
var oattrs []types.ObjectAttributes
|
||||
for _, a := range strings.Split(attrs, ",") {
|
||||
oattrs = append(oattrs, types.ObjectAttributes(a))
|
||||
}
|
||||
res, err := c.be.GetObjectAttributes(&s3.GetObjectAttributesInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
ObjectAttributes: oattrs,
|
||||
})
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "GetObjectAttributes", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -136,7 +161,11 @@ func (c S3ApiController) GetActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
ctx.Locals("logResBody", false)
|
||||
res, err := c.be.GetObject(bucket, key, acceptRange, ctx.Response().BodyWriter())
|
||||
res, err := c.be.GetObject(&s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
Range: &acceptRange,
|
||||
}, ctx.Response().BodyWriter())
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "GetObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
@@ -194,7 +223,7 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
access := ctx.Locals("access").(string)
|
||||
isRoot := ctx.Locals("isRoot").(bool)
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger})
|
||||
}
|
||||
@@ -225,7 +254,13 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "READ", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListObjectsV2", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
res, err := c.be.ListObjectsV2(bucket, prefix, marker, delimiter, maxkeys)
|
||||
res, err := c.be.ListObjectsV2(&s3.ListObjectsV2Input{
|
||||
Bucket: &bucket,
|
||||
Prefix: &prefix,
|
||||
ContinuationToken: &marker,
|
||||
Delimiter: &delimiter,
|
||||
MaxKeys: int32(maxkeys),
|
||||
})
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListObjectsV2", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -233,7 +268,13 @@ func (c S3ApiController) ListActions(ctx *fiber.Ctx) error {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "ListObjects", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
res, err := c.be.ListObjects(bucket, prefix, marker, delimiter, maxkeys)
|
||||
res, err := c.be.ListObjects(&s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Prefix: &prefix,
|
||||
Marker: &marker,
|
||||
Delimiter: &delimiter,
|
||||
MaxKeys: int32(maxkeys),
|
||||
})
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{Logger: c.logger, Action: "ListObjects", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -255,7 +296,7 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
var input *s3.PutBucketAclInput
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutBucketAcl"})
|
||||
}
|
||||
@@ -322,7 +363,10 @@ func (c S3ApiController) PutBucketActions(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutBucketAcl", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err := c.be.PutBucket(bucket, access)
|
||||
err := c.be.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ObjectOwnership: types.ObjectOwnership(access),
|
||||
})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutBucket", BucketOwner: ctx.Locals("access").(string)})
|
||||
}
|
||||
|
||||
@@ -364,7 +408,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
keyStart = keyStart + "/"
|
||||
}
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger})
|
||||
}
|
||||
@@ -422,24 +466,30 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
if ctx.Request().URI().QueryArgs().Has("uploadId") && ctx.Request().URI().QueryArgs().Has("partNumber") {
|
||||
partNumber := ctx.QueryInt("partNumber", -1)
|
||||
if partNumber < 1 || partNumber > 10000 {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPart), &MetaOpts{Logger: c.logger, Action: "PutObjectPart", BucketOwner: parsedAcl.Owner})
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidPart), &MetaOpts{Logger: c.logger, Action: "UploadPart", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutObjectPart", BucketOwner: parsedAcl.Owner})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "UploadPart", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), &MetaOpts{Logger: c.logger, Action: "PutObjectPart", BucketOwner: parsedAcl.Owner})
|
||||
return SendResponse(ctx, s3err.GetAPIError(s3err.ErrInvalidRequest), &MetaOpts{Logger: c.logger, Action: "UploadPart", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
body := io.ReadSeeker(bytes.NewReader([]byte(ctx.Body())))
|
||||
ctx.Locals("logReqBody", false)
|
||||
etag, err := c.be.PutObjectPart(bucket, keyStart, uploadId,
|
||||
partNumber, contentLength, body)
|
||||
etag, err := c.be.UploadPart(&s3.UploadPartInput{
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
UploadId: &uploadId,
|
||||
PartNumber: int32(partNumber),
|
||||
ContentLength: contentLength,
|
||||
Body: body,
|
||||
})
|
||||
ctx.Response().Header.Set("Etag", etag)
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "PutObjectPart", BucketOwner: parsedAcl.Owner})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "UploadPart", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
if ctx.Request().URI().QueryArgs().Has("acl") {
|
||||
@@ -503,16 +553,33 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
}
|
||||
|
||||
if copySource != "" {
|
||||
_, _, _, _ = copySrcIfMatch, copySrcIfNoneMatch,
|
||||
copySrcModifSince, copySrcUnmodifSince
|
||||
copySourceSplit := strings.Split(copySource, "/")
|
||||
srcBucket, srcObject := copySourceSplit[0], copySourceSplit[1:]
|
||||
|
||||
if err := auth.VerifyACL(parsedAcl, bucket, access, "WRITE", isRoot); err != nil {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "CopyObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
res, err := c.be.CopyObject(srcBucket, strings.Join(srcObject, "/"), bucket, keyStart)
|
||||
var mtime time.Time
|
||||
if copySrcModifSince != "" {
|
||||
mtime, err = time.Parse(iso8601Format, copySrcModifSince)
|
||||
if err != nil {
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidCopySource), &MetaOpts{Logger: c.logger, Action: "CopyObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
}
|
||||
var umtime time.Time
|
||||
if copySrcModifSince != "" {
|
||||
mtime, err = time.Parse(iso8601Format, copySrcUnmodifSince)
|
||||
if err != nil {
|
||||
return SendXMLResponse(ctx, nil, s3err.GetAPIError(s3err.ErrInvalidCopySource), &MetaOpts{Logger: c.logger, Action: "CopyObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
}
|
||||
res, err := c.be.CopyObject(&s3.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &keyStart,
|
||||
CopySource: ©Source,
|
||||
CopySourceIfMatch: ©SrcIfMatch,
|
||||
CopySourceIfNoneMatch: ©SrcIfNoneMatch,
|
||||
CopySourceIfModifiedSince: &mtime,
|
||||
CopySourceIfUnmodifiedSince: &umtime,
|
||||
})
|
||||
if err == nil {
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{
|
||||
Logger: c.logger,
|
||||
@@ -566,7 +633,7 @@ func (c S3ApiController) PutActions(ctx *fiber.Ctx) error {
|
||||
func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
bucket, access, isRoot := ctx.Params("bucket"), ctx.Locals("access").(string), ctx.Locals("isRoot").(bool)
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBuckets"})
|
||||
}
|
||||
@@ -580,7 +647,9 @@ func (c S3ApiController) DeleteBucket(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err = c.be.DeleteBucket(bucket)
|
||||
err = c.be.DeleteBucket(&s3.DeleteBucketInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteBucket", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -588,7 +657,7 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
bucket, access, isRoot := ctx.Params("bucket"), ctx.Locals("access").(string), ctx.Locals("isRoot").(bool)
|
||||
var dObj types.Delete
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteObjects"})
|
||||
}
|
||||
@@ -606,7 +675,10 @@ func (c S3ApiController) DeleteObjects(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteObjects", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err = c.be.DeleteObjects(bucket, &s3.DeleteObjectsInput{Delete: &dObj})
|
||||
err = c.be.DeleteObjects(&s3.DeleteObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Delete: &dObj,
|
||||
})
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteObjects", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
@@ -622,7 +694,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger})
|
||||
}
|
||||
@@ -668,7 +740,10 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "DeleteObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err = c.be.DeleteObject(bucket, key)
|
||||
err = c.be.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
})
|
||||
return SendResponse(ctx, err, &MetaOpts{
|
||||
Logger: c.logger,
|
||||
EvSender: c.evSender,
|
||||
@@ -681,7 +756,7 @@ func (c S3ApiController) DeleteActions(ctx *fiber.Ctx) error {
|
||||
func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) error {
|
||||
bucket, access, isRoot := ctx.Params("bucket"), ctx.Locals("access").(string), ctx.Locals("isRoot").(bool)
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadBucket"})
|
||||
}
|
||||
@@ -695,7 +770,9 @@ func (c S3ApiController) HeadBucket(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadBucket", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
_, err = c.be.HeadBucket(bucket)
|
||||
_, err = c.be.HeadBucket(&s3.HeadBucketInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
// TODO: set bucket response headers
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadBucket", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
@@ -712,7 +789,9 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{
|
||||
Bucket: &bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadObject"})
|
||||
}
|
||||
@@ -726,7 +805,10 @@ func (c S3ApiController) HeadObject(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
res, err := c.be.HeadObject(bucket, key)
|
||||
res, err := c.be.HeadObject(&s3.HeadObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "HeadObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
@@ -785,7 +867,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
key = strings.Join([]string{key, keyEnd}, "/")
|
||||
}
|
||||
|
||||
data, err := c.be.GetBucketAcl(bucket)
|
||||
data, err := c.be.GetBucketAcl(&s3.GetBucketAclInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger})
|
||||
}
|
||||
@@ -806,7 +888,10 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
return SendResponse(ctx, err, &MetaOpts{Logger: c.logger, Action: "RestoreObject", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
err = c.be.RestoreObject(bucket, key, &restoreRequest)
|
||||
restoreRequest.Bucket = &bucket
|
||||
restoreRequest.Key = &key
|
||||
|
||||
err = c.be.RestoreObject(&restoreRequest)
|
||||
return SendResponse(ctx, err, &MetaOpts{
|
||||
Logger: c.logger,
|
||||
EvSender: c.evSender,
|
||||
@@ -818,7 +903,7 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
|
||||
if uploadId != "" {
|
||||
data := struct {
|
||||
Parts []types.Part `xml:"Part"`
|
||||
Parts []types.CompletedPart `xml:"Part"`
|
||||
}{}
|
||||
|
||||
if err := xml.Unmarshal(ctx.Body(), &data); err != nil {
|
||||
@@ -829,7 +914,14 @@ func (c S3ApiController) CreateActions(ctx *fiber.Ctx) error {
|
||||
return SendXMLResponse(ctx, nil, err, &MetaOpts{Logger: c.logger, Action: "CompleteMultipartUpload", BucketOwner: parsedAcl.Owner})
|
||||
}
|
||||
|
||||
res, err := c.be.CompleteMultipartUpload(bucket, key, uploadId, data.Parts)
|
||||
res, err := c.be.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
UploadId: &uploadId,
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: data.Parts,
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
return SendXMLResponse(ctx, res, err, &MetaOpts{
|
||||
Logger: c.logger,
|
||||
|
||||
@@ -89,10 +89,10 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
ListBucketsFunc: func() (s3response.ListAllMyBucketsResult, error) {
|
||||
ListBucketsFunc: func(string, bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
return s3response.ListAllMyBucketsResult{}, nil
|
||||
},
|
||||
},
|
||||
@@ -110,10 +110,10 @@ func TestS3ApiController_ListBuckets(t *testing.T) {
|
||||
appErr := fiber.New()
|
||||
s3ApiControllerErr := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
ListBucketsFunc: func() (s3response.ListAllMyBucketsResult, error) {
|
||||
ListBucketsFunc: func(string, bool) (s3response.ListAllMyBucketsResult, error) {
|
||||
return s3response.ListAllMyBucketsResult{}, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
|
||||
},
|
||||
},
|
||||
@@ -201,19 +201,19 @@ func TestS3ApiController_GetActions(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
ListObjectPartsFunc: func(bucket, object, uploadID string, partNumberMarker int, maxParts int) (s3response.ListPartsResponse, error) {
|
||||
ListPartsFunc: func(*s3.ListPartsInput) (s3response.ListPartsResponse, error) {
|
||||
return s3response.ListPartsResponse{}, nil
|
||||
},
|
||||
GetObjectAclFunc: func(bucket, object string) (*s3.GetObjectAclOutput, error) {
|
||||
GetObjectAclFunc: func(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) {
|
||||
return &s3.GetObjectAclOutput{}, nil
|
||||
},
|
||||
GetObjectAttributesFunc: func(bucket, object string, attributes []string) (*s3.GetObjectAttributesOutput, error) {
|
||||
GetObjectAttributesFunc: func(*s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) {
|
||||
return &s3.GetObjectAttributesOutput{}, nil
|
||||
},
|
||||
GetObjectFunc: func(bucket, object, acceptRange string, writer io.Writer) (*s3.GetObjectOutput, error) {
|
||||
GetObjectFunc: func(*s3.GetObjectInput, io.Writer) (*s3.GetObjectOutput, error) {
|
||||
return &s3.GetObjectOutput{
|
||||
Metadata: map[string]string{"hello": "world"},
|
||||
ContentType: getPtr("application/xml"),
|
||||
@@ -353,16 +353,16 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
ListMultipartUploadsFunc: func(output *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResponse, error) {
|
||||
return s3response.ListMultipartUploadsResponse{}, nil
|
||||
},
|
||||
ListObjectsV2Func: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsV2Output, error) {
|
||||
ListObjectsV2Func: func(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) {
|
||||
return &s3.ListObjectsV2Output{}, nil
|
||||
},
|
||||
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
ListObjectsFunc: func(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
return &s3.ListObjectsOutput{}, nil
|
||||
},
|
||||
},
|
||||
@@ -380,10 +380,10 @@ func TestS3ApiController_ListActions(t *testing.T) {
|
||||
//Error case
|
||||
s3ApiControllerError := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
ListObjectsFunc: func(bucket, prefix, marker, delim string, maxkeys int) (*s3.ListObjectsOutput, error) {
|
||||
ListObjectsFunc: func(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
},
|
||||
},
|
||||
@@ -498,13 +498,13 @@ func TestS3ApiController_PutBucketActions(t *testing.T) {
|
||||
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
PutBucketAclFunc: func(string, []byte) error {
|
||||
return nil
|
||||
},
|
||||
PutBucketFunc: func(bucket, owner string) error {
|
||||
CreateBucketFunc: func(*s3.CreateBucketInput) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -650,13 +650,13 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
PutObjectAclFunc: func(*s3.PutObjectAclInput) error {
|
||||
return nil
|
||||
},
|
||||
CopyObjectFunc: func(srcBucket, srcObject, DstBucket, dstObject string) (*s3.CopyObjectOutput, error) {
|
||||
CopyObjectFunc: func(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) {
|
||||
return &s3.CopyObjectOutput{
|
||||
CopyObjectResult: &types.CopyObjectResult{},
|
||||
}, nil
|
||||
@@ -664,7 +664,7 @@ func TestS3ApiController_PutActions(t *testing.T) {
|
||||
PutObjectFunc: func(*s3.PutObjectInput) (string, error) {
|
||||
return "ETag", nil
|
||||
},
|
||||
PutObjectPartFunc: func(bucket, object, uploadID string, part int, length int64, r io.Reader) (string, error) {
|
||||
UploadPartFunc: func(*s3.UploadPartInput) (string, error) {
|
||||
return "hello", nil
|
||||
},
|
||||
SetTagsFunc: func(bucket, object string, tags map[string]string) error {
|
||||
@@ -864,10 +864,10 @@ func TestS3ApiController_DeleteBucket(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
DeleteBucketFunc: func(bucket string) error {
|
||||
DeleteBucketFunc: func(*s3.DeleteBucketInput) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -920,10 +920,10 @@ func TestS3ApiController_DeleteObjects(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
DeleteObjectsFunc: func(bucket string, objects *s3.DeleteObjectsInput) error {
|
||||
DeleteObjectsFunc: func(*s3.DeleteObjectsInput) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -990,10 +990,10 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
DeleteObjectFunc: func(bucket, object string) error {
|
||||
DeleteObjectFunc: func(*s3.DeleteObjectInput) error {
|
||||
return nil
|
||||
},
|
||||
AbortMultipartUploadFunc: func(*s3.AbortMultipartUploadInput) error {
|
||||
@@ -1017,10 +1017,10 @@ func TestS3ApiController_DeleteActions(t *testing.T) {
|
||||
appErr := fiber.New()
|
||||
|
||||
s3ApiControllerErr := S3ApiController{be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
DeleteObjectFunc: func(bucket, object string) error {
|
||||
DeleteObjectFunc: func(*s3.DeleteObjectInput) error {
|
||||
return s3err.GetAPIError(7)
|
||||
},
|
||||
}}
|
||||
@@ -1098,10 +1098,10 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
HeadBucketFunc: func(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
return &s3.HeadBucketOutput{}, nil
|
||||
},
|
||||
},
|
||||
@@ -1120,10 +1120,10 @@ func TestS3ApiController_HeadBucket(t *testing.T) {
|
||||
appErr := fiber.New()
|
||||
|
||||
s3ApiControllerErr := S3ApiController{be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
HeadBucketFunc: func(bucket string) (*s3.HeadBucketOutput, error) {
|
||||
HeadBucketFunc: func(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) {
|
||||
return nil, s3err.GetAPIError(3)
|
||||
},
|
||||
},
|
||||
@@ -1192,10 +1192,10 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
HeadObjectFunc: func(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
HeadObjectFunc: func(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentEncoding: &contentEncoding,
|
||||
ContentLength: 64,
|
||||
@@ -1220,10 +1220,10 @@ func TestS3ApiController_HeadObject(t *testing.T) {
|
||||
|
||||
s3ApiControllerErr := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
HeadObjectFunc: func(bucket, object string) (*s3.HeadObjectOutput, error) {
|
||||
HeadObjectFunc: func(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
|
||||
return nil, s3err.GetAPIError(42)
|
||||
},
|
||||
},
|
||||
@@ -1283,13 +1283,13 @@ func TestS3ApiController_CreateActions(t *testing.T) {
|
||||
app := fiber.New()
|
||||
s3ApiController := S3ApiController{
|
||||
be: &BackendMock{
|
||||
GetBucketAclFunc: func(bucket string) ([]byte, error) {
|
||||
GetBucketAclFunc: func(*s3.GetBucketAclInput) ([]byte, error) {
|
||||
return acldata, nil
|
||||
},
|
||||
RestoreObjectFunc: func(bucket, object string, restoreRequest *s3.RestoreObjectInput) error {
|
||||
RestoreObjectFunc: func(restoreRequest *s3.RestoreObjectInput) error {
|
||||
return nil
|
||||
},
|
||||
CompleteMultipartUploadFunc: func(bucket, object, uploadID string, parts []types.Part) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
CompleteMultipartUploadFunc: func(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
||||
return &s3.CompleteMultipartUploadOutput{}, nil
|
||||
},
|
||||
CreateMultipartUploadFunc: func(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
||||
|
||||
Reference in New Issue
Block a user