mirror of
https://github.com/versity/versitygw.git
synced 2025-12-23 13:15:18 +00:00
Merged scoutfs and posix ListObjects and ListObjectsV2
This commit is contained in:
@@ -4206,6 +4206,10 @@ func (p *Posix) CopyObject(ctx context.Context, input s3response.CopyObjectInput
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Posix) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
func (p *Posix) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||||
|
return p.ListObjectsParametrized(ctx, input, p.FileToObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Posix) ListObjectsParametrized(ctx context.Context, input *s3.ListObjectsInput, customFileToObj func(string, bool) backend.GetObjFunc) (s3response.ListObjectsResult, error) {
|
||||||
bucket := *input.Bucket
|
bucket := *input.Bucket
|
||||||
prefix := ""
|
prefix := ""
|
||||||
if input.Prefix != nil {
|
if input.Prefix != nil {
|
||||||
@@ -4234,7 +4238,7 @@ func (p *Posix) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3
|
|||||||
|
|
||||||
fileSystem := os.DirFS(bucket)
|
fileSystem := os.DirFS(bucket)
|
||||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
||||||
p.fileToObj(bucket, true), []string{metaTmpDir})
|
customFileToObj(bucket, true), []string{metaTmpDir})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s3response.ListObjectsResult{}, fmt.Errorf("walk %v: %w", bucket, err)
|
return s3response.ListObjectsResult{}, fmt.Errorf("walk %v: %w", bucket, err)
|
||||||
}
|
}
|
||||||
@@ -4252,7 +4256,7 @@ func (p *Posix) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Posix) fileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
func (p *Posix) FileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||||
var owner *types.Owner
|
var owner *types.Owner
|
||||||
// Retreive the object owner data from bucket ACL, if fetchOwner is true
|
// Retreive the object owner data from bucket ACL, if fetchOwner is true
|
||||||
@@ -4355,6 +4359,10 @@ func (p *Posix) fileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Posix) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
func (p *Posix) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||||
|
return p.ListObjectsV2Parametrized(ctx, input, p.FileToObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Posix) ListObjectsV2Parametrized(ctx context.Context, input *s3.ListObjectsV2Input, customFileToObj func(string, bool) backend.GetObjFunc) (s3response.ListObjectsV2Result, error) {
|
||||||
bucket := *input.Bucket
|
bucket := *input.Bucket
|
||||||
prefix := ""
|
prefix := ""
|
||||||
if input.Prefix != nil {
|
if input.Prefix != nil {
|
||||||
@@ -4391,7 +4399,7 @@ func (p *Posix) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input)
|
|||||||
|
|
||||||
fileSystem := os.DirFS(bucket)
|
fileSystem := os.DirFS(bucket)
|
||||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
||||||
p.fileToObj(bucket, fetchOwner), []string{metaTmpDir})
|
customFileToObj(bucket, fetchOwner), []string{metaTmpDir})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s3response.ListObjectsV2Result{}, fmt.Errorf("walk %v: %w", bucket, err)
|
return s3response.ListObjectsV2Result{}, fmt.Errorf("walk %v: %w", bucket, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||||
"github.com/pkg/xattr"
|
"github.com/pkg/xattr"
|
||||||
"github.com/versity/versitygw/backend"
|
"github.com/versity/versitygw/backend"
|
||||||
"github.com/versity/versitygw/backend/meta"
|
|
||||||
"github.com/versity/versitygw/backend/posix"
|
"github.com/versity/versitygw/backend/posix"
|
||||||
"github.com/versity/versitygw/s3err"
|
"github.com/versity/versitygw/s3err"
|
||||||
"github.com/versity/versitygw/s3response"
|
"github.com/versity/versitygw/s3response"
|
||||||
@@ -50,9 +49,6 @@ type ScoutFS struct {
|
|||||||
rootfd *os.File
|
rootfd *os.File
|
||||||
rootdir string
|
rootdir string
|
||||||
|
|
||||||
// bucket/object metadata storage facility
|
|
||||||
meta meta.MetadataStorer
|
|
||||||
|
|
||||||
// glaciermode enables the following behavior:
|
// glaciermode enables the following behavior:
|
||||||
// GET object: if file offline, return invalid object state
|
// GET object: if file offline, return invalid object state
|
||||||
// HEAD object: if file offline, set obj storage class to GLACIER
|
// HEAD object: if file offline, set obj storage class to GLACIER
|
||||||
@@ -75,8 +71,6 @@ var _ backend.Backend = &ScoutFS{}
|
|||||||
const (
|
const (
|
||||||
metaTmpDir = ".sgwtmp"
|
metaTmpDir = ".sgwtmp"
|
||||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||||
etagkey = "etag"
|
|
||||||
checksumsKey = "checksums"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -245,173 +239,25 @@ func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||||
bucket := *input.Bucket
|
return s.Posix.ListObjectsParametrized(ctx, input, s.fileToObj)
|
||||||
prefix := ""
|
|
||||||
if input.Prefix != nil {
|
|
||||||
prefix = *input.Prefix
|
|
||||||
}
|
|
||||||
marker := ""
|
|
||||||
if input.Marker != nil {
|
|
||||||
marker = *input.Marker
|
|
||||||
}
|
|
||||||
delim := ""
|
|
||||||
if input.Delimiter != nil {
|
|
||||||
delim = *input.Delimiter
|
|
||||||
}
|
|
||||||
maxkeys := int32(0)
|
|
||||||
if input.MaxKeys != nil {
|
|
||||||
maxkeys = *input.MaxKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := os.Stat(bucket)
|
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return s3response.ListObjectsResult{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return s3response.ListObjectsResult{}, fmt.Errorf("stat bucket: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileSystem := os.DirFS(bucket)
|
|
||||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
|
||||||
s.fileToObj(bucket), []string{metaTmpDir})
|
|
||||||
if err != nil {
|
|
||||||
return s3response.ListObjectsResult{}, fmt.Errorf("walk %v: %w", bucket, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s3response.ListObjectsResult{
|
|
||||||
CommonPrefixes: results.CommonPrefixes,
|
|
||||||
Contents: results.Objects,
|
|
||||||
Delimiter: backend.GetPtrFromString(delim),
|
|
||||||
Marker: backend.GetPtrFromString(marker),
|
|
||||||
NextMarker: backend.GetPtrFromString(results.NextMarker),
|
|
||||||
Prefix: backend.GetPtrFromString(prefix),
|
|
||||||
IsTruncated: &results.Truncated,
|
|
||||||
MaxKeys: &maxkeys,
|
|
||||||
Name: &bucket,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||||
bucket := *input.Bucket
|
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.fileToObj)
|
||||||
prefix := ""
|
|
||||||
if input.Prefix != nil {
|
|
||||||
prefix = *input.Prefix
|
|
||||||
}
|
|
||||||
marker := ""
|
|
||||||
if input.ContinuationToken != nil {
|
|
||||||
if input.StartAfter != nil {
|
|
||||||
marker = max(*input.StartAfter, *input.ContinuationToken)
|
|
||||||
} else {
|
|
||||||
marker = *input.ContinuationToken
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delim := ""
|
|
||||||
if input.Delimiter != nil {
|
|
||||||
delim = *input.Delimiter
|
|
||||||
}
|
|
||||||
maxkeys := int32(0)
|
|
||||||
if input.MaxKeys != nil {
|
|
||||||
maxkeys = *input.MaxKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := os.Stat(bucket)
|
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return s3response.ListObjectsV2Result{}, s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return s3response.ListObjectsV2Result{}, fmt.Errorf("stat bucket: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileSystem := os.DirFS(bucket)
|
|
||||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, int32(maxkeys),
|
|
||||||
s.fileToObj(bucket), []string{metaTmpDir})
|
|
||||||
if err != nil {
|
|
||||||
return s3response.ListObjectsV2Result{}, fmt.Errorf("walk %v: %w", bucket, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
count := int32(len(results.Objects))
|
|
||||||
|
|
||||||
return s3response.ListObjectsV2Result{
|
|
||||||
CommonPrefixes: results.CommonPrefixes,
|
|
||||||
Contents: results.Objects,
|
|
||||||
IsTruncated: &results.Truncated,
|
|
||||||
MaxKeys: &maxkeys,
|
|
||||||
Name: &bucket,
|
|
||||||
KeyCount: &count,
|
|
||||||
Delimiter: backend.GetPtrFromString(delim),
|
|
||||||
ContinuationToken: backend.GetPtrFromString(marker),
|
|
||||||
NextContinuationToken: backend.GetPtrFromString(results.NextMarker),
|
|
||||||
Prefix: backend.GetPtrFromString(prefix),
|
|
||||||
StartAfter: backend.GetPtrFromString(*input.StartAfter),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
func (s *ScoutFS) fileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||||
|
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||||
|
|
||||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||||
|
res, err := posixFileToObj(path, d)
|
||||||
|
if err != nil || d.IsDir() {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
objPath := filepath.Join(bucket, path)
|
objPath := filepath.Join(bucket, path)
|
||||||
if d.IsDir() {
|
|
||||||
// directory object only happens if directory empty
|
|
||||||
// check to see if this is a directory object by checking etag
|
|
||||||
etagBytes, err := s.meta.RetrieveAttribute(nil, bucket, path, etagkey)
|
|
||||||
if errors.Is(err, meta.ErrNoSuchKey) || errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return s3response.Object{}, backend.ErrSkipObj
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return s3response.Object{}, fmt.Errorf("get etag: %w", err)
|
|
||||||
}
|
|
||||||
etag := string(etagBytes)
|
|
||||||
|
|
||||||
fi, err := d.Info()
|
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return s3response.Object{}, backend.ErrSkipObj
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return s3response.Object{}, fmt.Errorf("get fileinfo: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
size := int64(0)
|
|
||||||
mtime := fi.ModTime()
|
|
||||||
|
|
||||||
return s3response.Object{
|
|
||||||
ETag: &etag,
|
|
||||||
Key: &path,
|
|
||||||
LastModified: &mtime,
|
|
||||||
Size: &size,
|
|
||||||
StorageClass: types.ObjectStorageClassStandard,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retreive the object checksum algorithm
|
|
||||||
checksums, err := s.retrieveChecksums(nil, bucket, path)
|
|
||||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
|
||||||
return s3response.Object{}, backend.ErrSkipObj
|
|
||||||
}
|
|
||||||
|
|
||||||
// file object, get object info and fill out object data
|
|
||||||
b, err := s.meta.RetrieveAttribute(nil, bucket, path, etagkey)
|
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return s3response.Object{}, backend.ErrSkipObj
|
|
||||||
}
|
|
||||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
|
||||||
return s3response.Object{}, fmt.Errorf("get etag: %w", err)
|
|
||||||
}
|
|
||||||
// note: meta.ErrNoSuchKey will return etagBytes = []byte{}
|
|
||||||
// so this will just set etag to "" if its not already set
|
|
||||||
|
|
||||||
etag := string(b)
|
|
||||||
|
|
||||||
fi, err := d.Info()
|
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return s3response.Object{}, backend.ErrSkipObj
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return s3response.Object{}, fmt.Errorf("get fileinfo: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sc := types.ObjectStorageClassStandard
|
|
||||||
if s.glaciermode {
|
if s.glaciermode {
|
||||||
// Check if there are any offline exents associated with this file.
|
// Check if there are any offline exents associated with this file.
|
||||||
// If so, we will return the InvalidObjectState error.
|
// If so, we will return the Glacier storage class
|
||||||
st, err := statMore(objPath)
|
st, err := statMore(objPath)
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
return s3response.Object{}, backend.ErrSkipObj
|
return s3response.Object{}, backend.ErrSkipObj
|
||||||
@@ -420,35 +266,13 @@ func (s *ScoutFS) fileToObj(bucket string) backend.GetObjFunc {
|
|||||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||||
}
|
}
|
||||||
if st.Offline_blocks != 0 {
|
if st.Offline_blocks != 0 {
|
||||||
sc = types.ObjectStorageClassGlacier
|
res.StorageClass = types.ObjectStorageClassGlacier
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return res, nil
|
||||||
size := fi.Size()
|
|
||||||
mtime := fi.ModTime()
|
|
||||||
|
|
||||||
return s3response.Object{
|
|
||||||
ETag: &etag,
|
|
||||||
Key: &path,
|
|
||||||
LastModified: &mtime,
|
|
||||||
Size: &size,
|
|
||||||
StorageClass: sc,
|
|
||||||
ChecksumAlgorithm: []types.ChecksumAlgorithm{checksums.Algorithm},
|
|
||||||
ChecksumType: checksums.Type,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScoutFS) retrieveChecksums(f *os.File, bucket, object string) (checksums s3response.Checksum, err error) {
|
|
||||||
checksumsAtr, err := s.meta.RetrieveAttribute(f, bucket, object, checksumsKey)
|
|
||||||
if err != nil {
|
|
||||||
return checksums, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal(checksumsAtr, &checksums)
|
|
||||||
return checksums, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestoreObject will set stage request on file if offline and do nothing if
|
// RestoreObject will set stage request on file if offline and do nothing if
|
||||||
// file is online
|
// file is online
|
||||||
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput) error {
|
||||||
|
|||||||
@@ -48,7 +48,6 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
|
|||||||
Posix: p,
|
Posix: p,
|
||||||
rootfd: f,
|
rootfd: f,
|
||||||
rootdir: rootdir,
|
rootdir: rootdir,
|
||||||
meta: metastore,
|
|
||||||
glaciermode: opts.GlacierMode,
|
glaciermode: opts.GlacierMode,
|
||||||
disableNoArchive: opts.DisableNoArchive,
|
disableNoArchive: opts.DisableNoArchive,
|
||||||
}, nil
|
}, nil
|
||||||
|
|||||||
Reference in New Issue
Block a user