mirror of
https://github.com/versity/versitygw.git
synced 2025-12-23 05:05:16 +00:00
Small cleanups.
This commit is contained in:
@@ -83,8 +83,8 @@ type Posix struct {
|
||||
var _ backend.Backend = &Posix{}
|
||||
|
||||
const (
|
||||
metaTmpDir = ".sgwtmp"
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
MetaTmpDir = ".sgwtmp"
|
||||
MetaTmpMultipartDir = MetaTmpDir + "/multipart"
|
||||
onameAttr = "objname"
|
||||
tagHdr = "X-Amz-Tagging"
|
||||
metaHdr = "X-Amz-Meta"
|
||||
@@ -433,7 +433,7 @@ func (p *Posix) isBucketEmpty(bucket string) error {
|
||||
return fmt.Errorf("readdir bucket: %w", err)
|
||||
}
|
||||
if err == nil {
|
||||
if len(ents) == 1 && ents[0].Name() != metaTmpDir {
|
||||
if len(ents) == 1 && ents[0].Name() != MetaTmpDir {
|
||||
return s3err.GetAPIError(s3err.ErrVersionedBucketNotEmpty)
|
||||
} else if len(ents) > 1 {
|
||||
return s3err.GetAPIError(s3err.ErrVersionedBucketNotEmpty)
|
||||
@@ -448,7 +448,7 @@ func (p *Posix) isBucketEmpty(bucket string) error {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3err.GetAPIError(s3err.ErrNoSuchBucket)
|
||||
}
|
||||
if len(ents) == 1 && ents[0].Name() != metaTmpDir {
|
||||
if len(ents) == 1 && ents[0].Name() != MetaTmpDir {
|
||||
return s3err.GetAPIError(s3err.ErrBucketNotEmpty)
|
||||
} else if len(ents) > 1 {
|
||||
return s3err.GetAPIError(s3err.ErrBucketNotEmpty)
|
||||
@@ -694,7 +694,7 @@ func (p *Posix) createObjVersion(bucket, key string, size int64, acc auth.Accoun
|
||||
|
||||
versionBucketPath := filepath.Join(p.versioningDir, bucket)
|
||||
versioningKey := filepath.Join(genObjVersionKey(key), versionId)
|
||||
versionTmpPath := filepath.Join(versionBucketPath, metaTmpDir)
|
||||
versionTmpPath := filepath.Join(versionBucketPath, MetaTmpDir)
|
||||
f, err := p.openTmpFile(versionTmpPath, versionBucketPath, versioningKey,
|
||||
size, acc, doFalloc, p.forceNoTmpFile)
|
||||
if err != nil {
|
||||
@@ -765,7 +765,7 @@ func (p *Posix) ListObjectVersions(ctx context.Context, input *s3.ListObjectVers
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.WalkVersions(ctx, fileSystem, prefix, delim, keyMarker, versionIdMarker, max,
|
||||
p.fileToObjVersions(bucket), []string{metaTmpDir})
|
||||
p.fileToObjVersions(bucket), []string{MetaTmpDir})
|
||||
if err != nil {
|
||||
return s3response.ListVersionsResult{}, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
@@ -1211,7 +1211,7 @@ func (p *Posix) CreateMultipartUpload(ctx context.Context, mpu s3response.Create
|
||||
objNameSum := sha256.Sum256([]byte(*mpu.Key))
|
||||
// multiple uploads for same object name allowed,
|
||||
// they will all go into the same hashed name directory
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", objNameSum))
|
||||
objdir := filepath.Join(MetaTmpMultipartDir, fmt.Sprintf("%x", objNameSum))
|
||||
tmppath := filepath.Join(bucket, objdir)
|
||||
// the unique upload id is a directory for all of the parts
|
||||
// associated with this specific multipart upload
|
||||
@@ -1400,7 +1400,7 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
return res, "", err
|
||||
}
|
||||
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(MetaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
checksums, err := p.retrieveChecksums(nil, bucket, filepath.Join(objdir, uploadID))
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
@@ -1502,7 +1502,7 @@ func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.C
|
||||
}
|
||||
}
|
||||
|
||||
f, err := p.openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object,
|
||||
f, err := p.openTmpFile(filepath.Join(bucket, MetaTmpDir), bucket, object,
|
||||
totalsize, acct, skipFalloc, p.forceNoTmpFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EDQUOT) {
|
||||
@@ -1838,7 +1838,7 @@ func numberOfChecksums(part types.CompletedPart) int {
|
||||
|
||||
func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte, error) {
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(bucket, MetaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
_, err := os.Stat(filepath.Join(objdir, uploadID))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -1852,7 +1852,7 @@ func (p *Posix) checkUploadIDExists(bucket, object, uploadID string) ([32]byte,
|
||||
|
||||
func (p *Posix) retrieveUploadId(bucket, object string) (string, [32]byte, error) {
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(bucket, MetaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
entries, err := os.ReadDir(objdir)
|
||||
if err != nil || len(entries) == 0 {
|
||||
@@ -2004,7 +2004,7 @@ func (p *Posix) AbortMultipartUpload(_ context.Context, mpu *s3.AbortMultipartUp
|
||||
}
|
||||
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(bucket, MetaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
_, err = os.Stat(filepath.Join(objdir, uploadID))
|
||||
if err != nil {
|
||||
@@ -2042,7 +2042,7 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
}
|
||||
|
||||
// ignore readdir error and use the empty list returned
|
||||
objs, _ := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir))
|
||||
objs, _ := os.ReadDir(filepath.Join(bucket, MetaTmpMultipartDir))
|
||||
|
||||
var uploads []s3response.Upload
|
||||
var resultUpds []s3response.Upload
|
||||
@@ -2062,7 +2062,7 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
continue
|
||||
}
|
||||
|
||||
b, err := p.meta.RetrieveAttribute(nil, bucket, filepath.Join(metaTmpMultipartDir, obj.Name()), onameAttr)
|
||||
b, err := p.meta.RetrieveAttribute(nil, bucket, filepath.Join(MetaTmpMultipartDir, obj.Name()), onameAttr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -2071,7 +2071,7 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
continue
|
||||
}
|
||||
|
||||
upids, err := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir, obj.Name()))
|
||||
upids, err := os.ReadDir(filepath.Join(bucket, MetaTmpMultipartDir, obj.Name()))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -2098,7 +2098,7 @@ func (p *Posix) ListMultipartUploads(_ context.Context, mpu *s3.ListMultipartUpl
|
||||
keyMarkerInd = len(uploads)
|
||||
}
|
||||
|
||||
checksum, err := p.retrieveChecksums(nil, bucket, filepath.Join(metaTmpMultipartDir, obj.Name(), uploadID))
|
||||
checksum, err := p.retrieveChecksums(nil, bucket, filepath.Join(MetaTmpMultipartDir, obj.Name(), uploadID))
|
||||
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
|
||||
return lmu, fmt.Errorf("get mp checksum: %w", err)
|
||||
}
|
||||
@@ -2214,7 +2214,7 @@ func (p *Posix) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3resp
|
||||
return lpr, err
|
||||
}
|
||||
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(MetaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
tmpdir := filepath.Join(bucket, objdir)
|
||||
|
||||
ents, err := os.ReadDir(filepath.Join(tmpdir, uploadID))
|
||||
@@ -2351,7 +2351,7 @@ func (p *Posix) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.
|
||||
}
|
||||
|
||||
sum := sha256.Sum256([]byte(object))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(MetaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
mpPath := filepath.Join(objdir, uploadID)
|
||||
|
||||
_, err = os.Stat(filepath.Join(bucket, mpPath))
|
||||
@@ -2525,7 +2525,7 @@ func (p *Posix) UploadPartCopy(ctx context.Context, upi *s3.UploadPartCopyInput)
|
||||
}
|
||||
|
||||
sum := sha256.Sum256([]byte(*upi.Key))
|
||||
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
objdir := filepath.Join(MetaTmpMultipartDir, fmt.Sprintf("%x", sum))
|
||||
|
||||
_, err = os.Stat(filepath.Join(*upi.Bucket, objdir, *upi.UploadId))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -2835,7 +2835,7 @@ func (p *Posix) PutObject(ctx context.Context, po s3response.PutObjectInput) (s3
|
||||
return s3response.PutObjectOutput{}, fmt.Errorf("stat object: %w", err)
|
||||
}
|
||||
|
||||
f, err := p.openTmpFile(filepath.Join(*po.Bucket, metaTmpDir),
|
||||
f, err := p.openTmpFile(filepath.Join(*po.Bucket, MetaTmpDir),
|
||||
*po.Bucket, *po.Key, contentLength, acct, doFalloc, p.forceNoTmpFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EDQUOT) {
|
||||
@@ -3189,7 +3189,7 @@ func (p *Posix) DeleteObject(ctx context.Context, input *s3.DeleteObjectInput) (
|
||||
acct = auth.Account{}
|
||||
}
|
||||
|
||||
f, err := p.openTmpFile(filepath.Join(bucket, metaTmpDir),
|
||||
f, err := p.openTmpFile(filepath.Join(bucket, MetaTmpDir),
|
||||
bucket, object, srcObjVersion.Size(), acct, doFalloc,
|
||||
p.forceNoTmpFile)
|
||||
if err != nil {
|
||||
@@ -3646,7 +3646,7 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ents, err := os.ReadDir(filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId))
|
||||
ents, err := os.ReadDir(filepath.Join(bucket, MetaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, s3err.GetAPIError(s3err.ErrNoSuchKey)
|
||||
}
|
||||
@@ -3654,7 +3654,7 @@ func (p *Posix) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.
|
||||
return nil, fmt.Errorf("read parts: %w", err)
|
||||
}
|
||||
|
||||
partPath := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId, fmt.Sprintf("%v", *input.PartNumber))
|
||||
partPath := filepath.Join(MetaTmpMultipartDir, fmt.Sprintf("%x", sum), uploadId, fmt.Sprintf("%v", *input.PartNumber))
|
||||
|
||||
part, err := os.Stat(filepath.Join(bucket, partPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
@@ -4238,7 +4238,7 @@ func (p *Posix) ListObjectsParametrized(ctx context.Context, input *s3.ListObjec
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
||||
customFileToObj(bucket, true), []string{metaTmpDir})
|
||||
customFileToObj(bucket, true), []string{MetaTmpDir})
|
||||
if err != nil {
|
||||
return s3response.ListObjectsResult{}, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
@@ -4399,7 +4399,7 @@ func (p *Posix) ListObjectsV2Parametrized(ctx context.Context, input *s3.ListObj
|
||||
|
||||
fileSystem := os.DirFS(bucket)
|
||||
results, err := backend.Walk(ctx, fileSystem, prefix, delim, marker, maxkeys,
|
||||
customFileToObj(bucket, fetchOwner), []string{metaTmpDir})
|
||||
customFileToObj(bucket, fetchOwner), []string{MetaTmpDir})
|
||||
if err != nil {
|
||||
return s3response.ListObjectsV2Result{}, fmt.Errorf("walk %v: %w", bucket, err)
|
||||
}
|
||||
|
||||
@@ -69,11 +69,6 @@ type ScoutFS struct {
|
||||
var _ backend.Backend = &ScoutFS{}
|
||||
|
||||
const (
|
||||
metaTmpDir = ".sgwtmp"
|
||||
metaTmpMultipartDir = metaTmpDir + "/multipart"
|
||||
)
|
||||
|
||||
var (
|
||||
stageComplete = "ongoing-request=\"false\", expiry-date=\"Fri, 2 Dec 2050 00:00:00 GMT\""
|
||||
stageInProgress = "true"
|
||||
stageNotInProgress = "false"
|
||||
@@ -123,7 +118,7 @@ func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s
|
||||
sum := sha256.Sum256([]byte(*input.Key))
|
||||
partPath := filepath.Join(
|
||||
*input.Bucket, // bucket
|
||||
metaTmpMultipartDir, // temp multipart dir
|
||||
posix.MetaTmpMultipartDir, // temp multipart dir
|
||||
fmt.Sprintf("%x", sum), // hashed objname
|
||||
*input.UploadId, // upload id
|
||||
fmt.Sprintf("%v", *input.PartNumber), // part number
|
||||
@@ -239,14 +234,23 @@ func (s *ScoutFS) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.fileToObj)
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsParametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjects(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input) (s3response.ListObjectsV2Result, error) {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.fileToObj)
|
||||
if s.glaciermode {
|
||||
return s.Posix.ListObjectsV2Parametrized(ctx, input, s.glacierFileToObj)
|
||||
} else {
|
||||
return s.Posix.ListObjectsV2(ctx, input)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScoutFS) fileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
// FileToObj function for ListObject calls that adds a Glacier storage class if the file is offline
|
||||
func (s *ScoutFS) glacierFileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
posixFileToObj := s.Posix.FileToObj(bucket, fetchOwner)
|
||||
|
||||
return func(path string, d fs.DirEntry) (s3response.Object, error) {
|
||||
@@ -255,19 +259,17 @@ func (s *ScoutFS) fileToObj(bucket string, fetchOwner bool) backend.GetObjFunc {
|
||||
return res, err
|
||||
}
|
||||
objPath := filepath.Join(bucket, path)
|
||||
if s.glaciermode {
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
// Check if there are any offline exents associated with this file.
|
||||
// If so, we will return the Glacier storage class
|
||||
st, err := statMore(objPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return s3response.Object{}, backend.ErrSkipObj
|
||||
}
|
||||
if err != nil {
|
||||
return s3response.Object{}, fmt.Errorf("stat more: %w", err)
|
||||
}
|
||||
if st.Offline_blocks != 0 {
|
||||
res.StorageClass = types.ObjectStorageClassGlacier
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user