DRY of scoutfs integration, alignment testing for scoutfs.MoveData

This commit is contained in:
Ondrej Palkovsky
2025-08-07 18:20:44 +02:00
parent e62337f055
commit 936239b619
4 changed files with 36 additions and 664 deletions

View File

@@ -39,6 +39,7 @@ import (
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/s3api/debuglogger"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
@@ -1360,6 +1361,10 @@ func getPartChecksum(algo types.ChecksumAlgorithm, part types.CompletedPart) str
}
func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
return p.CompleteMultipartUploadWithCopy(ctx, input, nil)
}
func (p *Posix) CompleteMultipartUploadWithCopy(ctx context.Context, input *s3.CompleteMultipartUploadInput, customMove func(from *os.File, to *os.File) error) (s3response.CompleteMultipartUploadResult, string, error) {
acct, ok := ctx.Value("account").(auth.Account)
if !ok {
acct = auth.Account{}
@@ -1550,7 +1555,16 @@ func (p *Posix) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteM
}
}
_, err = io.Copy(f.File(), rdr)
if customMove != nil {
err = customMove(pf, f.File())
if err != nil {
// Fail back to standard copy
debuglogger.Logf("Custom data block move failed (%w), failing back to io.Copy()", err)
_, err = io.Copy(f.File(), rdr)
}
} else {
_, err = io.Copy(f.File(), rdr)
}
pf.Close()
if err != nil {
if errors.Is(err, syscall.EDQUOT) {

View File

@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io/fs"
"net/http"
"os"
"path/filepath"
"strings"
@@ -30,11 +29,9 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/pkg/xattr"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
"github.com/versity/versitygw/s3api/utils"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3response"
)
@@ -67,19 +64,6 @@ type ScoutFS struct {
// RestoreObject: add batch stage request to file
glaciermode bool
// chownuid/gid enable chowning of files to the account uid/gid
// when objects are uploaded
chownuid bool
chowngid bool
// euid/egid are the effective uid/gid of the running versitygw process
// used to determine if chowning is needed
euid int
egid int
// newDirPerm is the permissions to use when creating new directories
newDirPerm fs.FileMode
// disableNoArchive is used to disable setting scoutam noarchive flag
// on mutlipart parts. This is enabled by default to prevent archive
// copies of temporary multipart parts.
@@ -91,19 +75,8 @@ var _ backend.Backend = &ScoutFS{}
const (
metaTmpDir = ".sgwtmp"
metaTmpMultipartDir = metaTmpDir + "/multipart"
tagHdr = "X-Amz-Tagging"
metaHdr = "X-Amz-Meta"
contentTypeHdr = "content-type"
contentEncHdr = "content-encoding"
contentLangHdr = "content-language"
contentDispHdr = "content-disposition"
cacheCtrlHdr = "cache-control"
expiresHdr = "expires"
emptyMD5 = "d41d8cd98f00b204e9800998ecf8427e"
etagkey = "etag"
checksumsKey = "checksums"
objectRetentionKey = "object-retention"
objectLegalHoldKey = "object-legal-hold"
)
var (
@@ -146,25 +119,6 @@ func (*ScoutFS) String() string {
return "ScoutFS Gateway"
}
// getChownIDs returns the uid and gid that should be used for chowning
// the object to the account uid/gid. It also returns a boolean indicating
// if chowning is needed.
func (s *ScoutFS) getChownIDs(acct auth.Account) (int, int, bool) {
uid := s.euid
gid := s.egid
var needsChown bool
if s.chownuid && acct.UserID != s.euid {
uid = acct.UserID
needsChown = true
}
if s.chowngid && acct.GroupID != s.egid {
gid = acct.GroupID
needsChown = true
}
return uid, gid, needsChown
}
func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
out, err := s.Posix.UploadPart(ctx, input)
if err != nil {
@@ -194,443 +148,7 @@ func (s *ScoutFS) UploadPart(ctx context.Context, input *s3.UploadPartInput) (*s
// ioctl to not have to read and copy the part data to the final object. This
// saves a read and write cycle for all mutlipart uploads.
func (s *ScoutFS) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (s3response.CompleteMultipartUploadResult, string, error) {
acct, ok := ctx.Value("account").(auth.Account)
if !ok {
acct = auth.Account{}
}
var res s3response.CompleteMultipartUploadResult
if input.Key == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchKey)
}
if input.UploadId == nil {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if input.MultipartUpload == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidRequest)
}
bucket := *input.Bucket
object := *input.Key
uploadID := *input.UploadId
parts := input.MultipartUpload.Parts
_, err := os.Stat(bucket)
if errors.Is(err, fs.ErrNotExist) {
return res, "", s3err.GetAPIError(s3err.ErrNoSuchBucket)
}
if err != nil {
return res, "", fmt.Errorf("stat bucket: %w", err)
}
sum, err := s.checkUploadIDExists(bucket, object, uploadID)
if err != nil {
return res, "", err
}
objdir := filepath.Join(metaTmpMultipartDir, fmt.Sprintf("%x", sum))
checksums, err := s.retrieveChecksums(nil, bucket, filepath.Join(objdir, uploadID))
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get mp checksums: %w", err)
}
// ChecksumType should be the same as specified on CreateMultipartUpload
if input.ChecksumType != "" && checksums.Type != input.ChecksumType {
checksumType := checksums.Type
if checksumType == "" {
checksumType = types.ChecksumType("null")
}
return res, "", s3err.GetChecksumTypeMismatchOnMpErr(checksumType)
}
// check all parts ok
last := len(parts) - 1
var totalsize int64
// The initialie values is the lower limit of partNumber: 0
var partNumber int32
for i, part := range parts {
if part.PartNumber == nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
}
if *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidCompleteMpPartNumber)
}
if *part.PartNumber <= partNumber {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPartOrder)
}
partNumber = *part.PartNumber
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
fullPartPath := filepath.Join(bucket, partObjPath)
fi, err := os.Lstat(fullPartPath)
if err != nil {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
}
totalsize += fi.Size()
// all parts except the last need to be greater, thena
// the minimum allowed size (5 Mib)
if i < last && fi.Size() < backend.MinPartSize {
return res, "", s3err.GetAPIError(s3err.ErrEntityTooSmall)
}
b, err := s.meta.RetrieveAttribute(nil, bucket, partObjPath, etagkey)
etag := string(b)
if err != nil {
etag = ""
}
if parts[i].ETag == nil || !backend.AreEtagsSame(etag, *parts[i].ETag) {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
}
partChecksum, err := s.retrieveChecksums(nil, bucket, partObjPath)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get part checksum: %w", err)
}
// If checksum has been provided on mp initalization
err = validatePartChecksum(partChecksum, part)
if err != nil {
return res, "", err
}
}
if input.MpuObjectSize != nil && totalsize != *input.MpuObjectSize {
return res, "", s3err.GetIncorrectMpObjectSizeErr(totalsize, *input.MpuObjectSize)
}
// use totalsize=0 because we wont be writing to the file, only moving
// extents around. so we dont want to fallocate this.
f, err := s.openTmpFile(filepath.Join(bucket, metaTmpDir), bucket, object, 0, acct)
if err != nil {
if errors.Is(err, syscall.EDQUOT) {
return res, "", s3err.GetAPIError(s3err.ErrQuotaExceeded)
}
return res, "", fmt.Errorf("open temp file: %w", err)
}
defer f.cleanup()
for _, part := range parts {
if part.PartNumber == nil || *part.PartNumber < 1 {
return res, "", s3err.GetAPIError(s3err.ErrInvalidPart)
}
partObjPath := filepath.Join(objdir, uploadID, fmt.Sprintf("%v", *part.PartNumber))
fullPartPath := filepath.Join(bucket, partObjPath)
pf, err := os.Open(fullPartPath)
if err != nil {
return res, "", fmt.Errorf("open part %v: %v", *part.PartNumber, err)
}
// scoutfs move data is a metadata only operation that moves the data
// extent references from the source, appeding to the destination.
// this needs to be 4k aligned.
err = moveData(pf, f.File())
pf.Close()
if err != nil {
return res, "", fmt.Errorf("move blocks part %v: %v", *part.PartNumber, err)
}
}
userMetaData := make(map[string]string)
upiddir := filepath.Join(objdir, uploadID)
objMeta := s.loadUserMetaData(bucket, upiddir, userMetaData)
err = s.storeObjectMetadata(f.File(), bucket, object, objMeta)
if err != nil {
return res, "", err
}
objname := filepath.Join(bucket, object)
dir := filepath.Dir(objname)
if dir != "" {
uid, gid, doChown := s.getChownIDs(acct)
err = backend.MkdirAll(dir, uid, gid, doChown, s.newDirPerm)
if err != nil {
return res, "", err
}
}
for k, v := range userMetaData {
err = s.meta.StoreAttribute(f.File(), bucket, object, fmt.Sprintf("%v.%v", metaHdr, k), []byte(v))
if err != nil {
return res, "", fmt.Errorf("set user attr %q: %w", k, err)
}
}
// load and set tagging
tagging, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, tagHdr)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object tagging: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, tagHdr, tagging)
if err != nil {
return res, "", fmt.Errorf("set object tagging: %w", err)
}
}
// load and set legal hold
lHold, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectLegalHoldKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object legal hold: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectLegalHoldKey, lHold)
if err != nil {
return res, "", fmt.Errorf("set object legal hold: %w", err)
}
}
// load and set retention
ret, err := s.meta.RetrieveAttribute(nil, bucket, upiddir, objectRetentionKey)
if err != nil && !errors.Is(err, meta.ErrNoSuchKey) {
return res, "", fmt.Errorf("get object retention: %w", err)
}
if err == nil {
err := s.meta.StoreAttribute(f.File(), bucket, object, objectRetentionKey, ret)
if err != nil {
return res, "", fmt.Errorf("set object retention: %w", err)
}
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5 := backend.GetMultipartMD5(parts)
err = s.meta.StoreAttribute(f.File(), bucket, object, etagkey, []byte(s3MD5))
if err != nil {
return res, "", fmt.Errorf("set etag attr: %w", err)
}
err = f.link()
if err != nil {
return res, "", fmt.Errorf("link object in namespace: %w", err)
}
// cleanup tmp dirs
os.RemoveAll(filepath.Join(bucket, upiddir))
// use Remove for objdir in case there are still other uploads
// for same object name outstanding
os.Remove(filepath.Join(bucket, objdir))
return s3response.CompleteMultipartUploadResult{
Bucket: &bucket,
ETag: &s3MD5,
Key: &object,
}, "", nil
}
func (s *ScoutFS) storeObjectMetadata(f *os.File, bucket, object string, m objectMetadata) error {
if getString(m.ContentType) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentTypeHdr, []byte(*m.ContentType))
if err != nil {
return fmt.Errorf("set content-type: %w", err)
}
}
if getString(m.ContentEncoding) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentEncHdr, []byte(*m.ContentEncoding))
if err != nil {
return fmt.Errorf("set content-encoding: %w", err)
}
}
if getString(m.ContentDisposition) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentDispHdr, []byte(*m.ContentDisposition))
if err != nil {
return fmt.Errorf("set content-disposition: %w", err)
}
}
if getString(m.ContentLanguage) != "" {
err := s.meta.StoreAttribute(f, bucket, object, contentLangHdr, []byte(*m.ContentLanguage))
if err != nil {
return fmt.Errorf("set content-language: %w", err)
}
}
if getString(m.CacheControl) != "" {
err := s.meta.StoreAttribute(f, bucket, object, cacheCtrlHdr, []byte(*m.CacheControl))
if err != nil {
return fmt.Errorf("set cache-control: %w", err)
}
}
if getString(m.Expires) != "" {
err := s.meta.StoreAttribute(f, bucket, object, expiresHdr, []byte(*m.Expires))
if err != nil {
return fmt.Errorf("set cache-control: %w", err)
}
}
return nil
}
func validatePartChecksum(checksum s3response.Checksum, part types.CompletedPart) error {
n := numberOfChecksums(part)
if n > 1 {
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
}
if checksum.Algorithm == "" {
if n != 0 {
return s3err.GetAPIError(s3err.ErrInvalidPart)
}
return nil
}
algo := checksum.Algorithm
if n == 0 {
return s3err.APIError{
Code: "InvalidRequest",
Description: fmt.Sprintf("The upload was created using a %v checksum. The complete request must include the checksum for each part. It was missing for part %v in the request.", strings.ToLower(string(algo)), *part.PartNumber),
HTTPStatusCode: http.StatusBadRequest,
}
}
for _, cs := range []struct {
checksum *string
expectedChecksum string
algo types.ChecksumAlgorithm
}{
{part.ChecksumCRC32, getString(checksum.CRC32), types.ChecksumAlgorithmCrc32},
{part.ChecksumCRC32C, getString(checksum.CRC32C), types.ChecksumAlgorithmCrc32c},
{part.ChecksumSHA1, getString(checksum.SHA1), types.ChecksumAlgorithmSha1},
{part.ChecksumSHA256, getString(checksum.SHA256), types.ChecksumAlgorithmSha256},
{part.ChecksumCRC64NVME, getString(checksum.CRC64NVME), types.ChecksumAlgorithmCrc64nvme},
} {
if cs.checksum == nil {
continue
}
if !utils.IsValidChecksum(*cs.checksum, cs.algo) {
return s3err.GetAPIError(s3err.ErrInvalidChecksumPart)
}
if *cs.checksum != cs.expectedChecksum {
if algo == cs.algo {
return s3err.GetAPIError(s3err.ErrInvalidPart)
}
return s3err.APIError{
Code: "BadDigest",
Description: fmt.Sprintf("The %v you specified for part %v did not match what we received.", strings.ToLower(string(cs.algo)), *part.PartNumber),
HTTPStatusCode: http.StatusBadRequest,
}
}
}
return nil
}
func numberOfChecksums(part types.CompletedPart) int {
counter := 0
if getString(part.ChecksumCRC32) != "" {
counter++
}
if getString(part.ChecksumCRC32C) != "" {
counter++
}
if getString(part.ChecksumSHA1) != "" {
counter++
}
if getString(part.ChecksumSHA256) != "" {
counter++
}
if getString(part.ChecksumCRC64NVME) != "" {
counter++
}
return counter
}
func (s *ScoutFS) checkUploadIDExists(bucket, object, uploadID string) ([32]byte, error) {
sum := sha256.Sum256([]byte(object))
objdir := filepath.Join(bucket, metaTmpMultipartDir, fmt.Sprintf("%x", sum))
_, err := os.Stat(filepath.Join(objdir, uploadID))
if errors.Is(err, fs.ErrNotExist) {
return [32]byte{}, s3err.GetAPIError(s3err.ErrNoSuchUpload)
}
if err != nil {
return [32]byte{}, fmt.Errorf("stat upload: %w", err)
}
return sum, nil
}
type objectMetadata struct {
ContentType *string
ContentEncoding *string
ContentDisposition *string
ContentLanguage *string
CacheControl *string
Expires *string
}
// fll out the user metadata map with the metadata for the object
// and return the content type and encoding
func (s *ScoutFS) loadUserMetaData(bucket, object string, m map[string]string) objectMetadata {
ents, err := s.meta.ListAttributes(bucket, object)
if err != nil || len(ents) == 0 {
return objectMetadata{}
}
for _, e := range ents {
if !isValidMeta(e) {
continue
}
b, err := s.meta.RetrieveAttribute(nil, bucket, object, e)
if err != nil {
continue
}
if b == nil {
m[strings.TrimPrefix(e, fmt.Sprintf("%v.", metaHdr))] = ""
continue
}
m[strings.TrimPrefix(e, fmt.Sprintf("%v.", metaHdr))] = string(b)
}
var result objectMetadata
b, err := s.meta.RetrieveAttribute(nil, bucket, object, contentTypeHdr)
if err == nil {
result.ContentType = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, contentEncHdr)
if err == nil {
result.ContentEncoding = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, contentDispHdr)
if err == nil {
result.ContentDisposition = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, contentLangHdr)
if err == nil {
result.ContentLanguage = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, cacheCtrlHdr)
if err == nil {
result.CacheControl = backend.GetPtrFromString(string(b))
}
b, err = s.meta.RetrieveAttribute(nil, bucket, object, expiresHdr)
if err == nil {
result.Expires = backend.GetPtrFromString(string(b))
}
return result
}
func isValidMeta(val string) bool {
if strings.HasPrefix(val, metaHdr) {
return true
}
if strings.EqualFold(val, "Expires") {
return true
}
return false
return s.Posix.CompleteMultipartUploadWithCopy(ctx, input, moveData)
}
func (s *ScoutFS) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
@@ -956,13 +474,6 @@ func (s *ScoutFS) RestoreObject(_ context.Context, input *s3.RestoreObjectInput)
return nil
}
func getString(str *string) string {
if str == nil {
return ""
}
return *str
}
func isStaging(objname string) (bool, error) {
b, err := xattr.Get(objname, flagskey)
if err != nil && !isNoAttr(err) {

View File

@@ -17,22 +17,13 @@
package scoutfs
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"strconv"
"syscall"
"golang.org/x/sys/unix"
"github.com/versity/scoutfs-go"
"github.com/versity/versitygw/auth"
"github.com/versity/versitygw/backend"
"github.com/versity/versitygw/backend/meta"
"github.com/versity/versitygw/backend/posix"
"github.com/versity/versitygw/s3err"
"github.com/versity/versitygw/s3api/debuglogger"
)
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
@@ -58,147 +49,31 @@ func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
rootfd: f,
rootdir: rootdir,
meta: metastore,
chownuid: opts.ChownUID,
chowngid: opts.ChownGID,
glaciermode: opts.GlacierMode,
newDirPerm: opts.NewDirPerm,
disableNoArchive: opts.DisableNoArchive,
}, nil
}
const procfddir = "/proc/self/fd"
type tmpfile struct {
f *os.File
bucket string
objname string
size int64
needsChown bool
uid int
gid int
newDirPerm fs.FileMode
}
var (
defaultFilePerm uint32 = 0644
)
func (s *ScoutFS) openTmpFile(dir, bucket, obj string, size int64, acct auth.Account) (*tmpfile, error) {
uid, gid, doChown := s.getChownIDs(acct)
// O_TMPFILE allows for a file handle to an unnamed file in the filesystem.
// This can help reduce contention within the namespace (parent directories),
// etc. And will auto cleanup the inode on close if we never link this
// file descriptor into the namespace.
fd, err := unix.Open(dir, unix.O_RDWR|unix.O_TMPFILE|unix.O_CLOEXEC, defaultFilePerm)
if err != nil {
if errors.Is(err, syscall.EROFS) {
return nil, s3err.GetAPIError(s3err.ErrMethodNotAllowed)
}
return nil, err
}
// for O_TMPFILE, filename is /proc/self/fd/<fd> to be used
// later to link file into namespace
f := os.NewFile(uintptr(fd), filepath.Join(procfddir, strconv.Itoa(fd)))
tmp := &tmpfile{
f: f,
bucket: bucket,
objname: obj,
size: size,
needsChown: doChown,
uid: uid,
gid: gid,
newDirPerm: s.newDirPerm,
}
if doChown {
err := f.Chown(uid, gid)
if err != nil {
return nil, fmt.Errorf("set temp file ownership: %w", err)
}
}
return tmp, nil
}
func (tmp *tmpfile) link() error {
// We use Linkat/Rename as the atomic operation for object puts. The
// upload is written to a temp (or unnamed/O_TMPFILE) file to not conflict
// with any other simultaneous uploads. The final operation is to move the
// temp file into place for the object. This ensures the object semantics
// of last upload completed wins and is not some combination of writes
// from simultaneous uploads.
objPath := filepath.Join(tmp.bucket, tmp.objname)
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
dir := filepath.Dir(objPath)
err = backend.MkdirAll(dir, tmp.uid, tmp.gid, tmp.needsChown, tmp.newDirPerm)
if err != nil {
return fmt.Errorf("make parent dir: %w", err)
}
procdir, err := os.Open(procfddir)
if err != nil {
return fmt.Errorf("open proc dir: %w", err)
}
defer procdir.Close()
dirf, err := os.Open(dir)
if err != nil {
return fmt.Errorf("open parent dir: %w", err)
}
defer dirf.Close()
for {
err = unix.Linkat(int(procdir.Fd()), filepath.Base(tmp.f.Name()),
int(dirf.Fd()), filepath.Base(objPath), unix.AT_SYMLINK_FOLLOW)
if errors.Is(err, fs.ErrExist) {
err := os.Remove(objPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("remove stale path: %w", err)
}
continue
}
if err != nil {
return fmt.Errorf("link tmpfile: %w", err)
}
break
}
err = tmp.f.Close()
if err != nil {
return fmt.Errorf("close tmpfile: %w", err)
}
return nil
}
func (tmp *tmpfile) Write(b []byte) (int, error) {
if int64(len(b)) > tmp.size {
return 0, fmt.Errorf("write exceeds content length %v", tmp.size)
}
n, err := tmp.f.Write(b)
tmp.size -= int64(n)
return n, err
}
func (tmp *tmpfile) cleanup() {
tmp.f.Close()
}
func (tmp *tmpfile) File() *os.File {
return tmp.f
}
func moveData(from *os.File, to *os.File) error {
return scoutfs.MoveData(from, to)
// May fail if the files are not 4K aligned; check for alignment
ffi, err := from.Stat()
if err != nil {
return fmt.Errorf("stat from: %v", err)
}
tfi, err := to.Stat()
if err != nil {
return fmt.Errorf("stat to: %v", err)
}
if ffi.Size()%4096 != 0 || tfi.Size()%4096 != 0 {
return os.ErrInvalid
}
//
err = scoutfs.MoveData(from, to)
if err != nil {
debuglogger.Logf("ScoutFs MoveData failed: %v", err)
}
return err
}
func statMore(path string) (stat, error) {

View File

@@ -20,44 +20,16 @@ import (
"errors"
"fmt"
"os"
"github.com/versity/versitygw/auth"
)
func New(rootdir string, opts ScoutfsOpts) (*ScoutFS, error) {
return nil, fmt.Errorf("scoutfs only available on linux")
}
type tmpfile struct{}
var (
errNotSupported = errors.New("not supported")
)
func (s *ScoutFS) openTmpFile(_, _, _ string, _ int64, _ auth.Account) (*tmpfile, error) {
// make these look used for static check
_ = s.chownuid
_ = s.chowngid
_ = s.euid
_ = s.egid
return nil, errNotSupported
}
func (tmp *tmpfile) link() error {
return errNotSupported
}
func (tmp *tmpfile) Write(b []byte) (int, error) {
return 0, errNotSupported
}
func (tmp *tmpfile) cleanup() {
}
func (tmp *tmpfile) File() *os.File {
return nil
}
func moveData(_, _ *os.File) error {
return errNotSupported
}