feat: Make STFS API public

Also: Happy new year :)
This commit is contained in:
Felicitas Pojtinger
2021-12-31 23:57:21 +01:00
parent d52c45c603
commit 5089333d13
32 changed files with 214 additions and 222 deletions

View File

@@ -1,19 +0,0 @@
package cache
import (
"github.com/pojntfx/stfs/pkg/config"
)
const (
FileSystemCacheTypeMemory = "memory"
FileSystemCacheTypeDir = "dir"
WriteCacheTypeMemory = "memory"
WriteCacheTypeFile = "file"
)
var (
KnownFileSystemCacheTypes = []string{config.NoneKey, FileSystemCacheTypeMemory, FileSystemCacheTypeDir}
KnownWriteCacheTypes = []string{WriteCacheTypeMemory, WriteCacheTypeFile}
)

View File

@@ -1,11 +0,0 @@
package cache
import "errors"
var (
ErrFileSystemCacheTypeUnsupported = errors.New("file system cache type unsupported")
ErrFileSystemCacheTypeUnknown = errors.New("file system cache type unknown")
ErrWriteCacheTypeUnsupported = errors.New("write cache type unsupported")
ErrWriteCacheTypeUnknown = errors.New("write cache type unknown")
)

View File

@@ -1,49 +0,0 @@
package cache
import (
"os"
"time"
"github.com/pojntfx/stfs/internal/pathext"
"github.com/pojntfx/stfs/pkg/config"
"github.com/spf13/afero"
)
func NewCacheFilesystem(
base afero.Fs,
root string,
cacheType string,
ttl time.Duration,
cacheDir string,
) (afero.Fs, error) {
switch cacheType {
case FileSystemCacheTypeMemory:
if pathext.IsRoot(root) {
return afero.NewCacheOnReadFs(base, afero.NewMemMapFs(), ttl), nil
}
return afero.NewCacheOnReadFs(afero.NewBasePathFs(base, root), afero.NewMemMapFs(), ttl), nil
case FileSystemCacheTypeDir:
if err := os.RemoveAll(cacheDir); err != nil {
return nil, err
}
if err := os.MkdirAll(cacheDir, os.ModePerm); err != nil {
return nil, err
}
if pathext.IsRoot(root) {
return afero.NewCacheOnReadFs(base, afero.NewBasePathFs(afero.NewOsFs(), cacheDir), ttl), nil
}
return afero.NewCacheOnReadFs(afero.NewBasePathFs(base, root), afero.NewBasePathFs(afero.NewOsFs(), cacheDir), ttl), nil
case config.NoneKey:
if pathext.IsRoot(root) {
return base, nil
}
return afero.NewBasePathFs(base, root), nil
default:
return nil, ErrFileSystemCacheTypeUnsupported
}
}

View File

@@ -1,76 +0,0 @@
package cache
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/mattetti/filebuffer"
"github.com/pojntfx/stfs/internal/fs"
"github.com/spf13/afero"
)
type fileWithSize struct {
afero.File
}
func (f fileWithSize) Size() (int64, error) {
info, err := f.Stat()
if err != nil {
return -1, err
}
return info.Size(), nil
}
type filebufferWithSize struct {
*filebuffer.Buffer
}
func (f filebufferWithSize) Size() (int64, error) {
return int64(f.Buff.Len()), nil
}
func (f filebufferWithSize) Sync() error {
// No need to sync a in-memory buffer
return nil
}
func (f filebufferWithSize) Truncate(size int64) error {
f.Buff.Truncate(int(size))
return nil
}
func NewCacheWrite(
root string,
cacheType string,
) (cache fs.WriteCache, cleanup func() error, err error) {
switch cacheType {
case WriteCacheTypeMemory:
buff := &filebufferWithSize{filebuffer.New([]byte{})}
return buff, func() error {
buff = nil
return nil
}, nil
case WriteCacheTypeFile:
tmpdir := filepath.Join(root, "io")
if err := os.MkdirAll(tmpdir, os.ModePerm); err != nil {
return nil, nil, err
}
f, err := ioutil.TempFile(tmpdir, "*")
if err != nil {
return nil, nil, err
}
return fileWithSize{f}, func() error {
return os.Remove(f.Name())
}, nil
default:
return nil, nil, ErrWriteCacheTypeUnsupported
}
}

View File

@@ -1,4 +1,4 @@
package compression
package check
import (
"github.com/pojntfx/stfs/pkg/config"

View File

@@ -1,4 +1,4 @@
package encryption
package check
import "github.com/pojntfx/stfs/pkg/config"

View File

@@ -1,16 +1,18 @@
package cache
package check
import "github.com/pojntfx/stfs/pkg/config"
func CheckFileSystemCacheType(cacheType string) error {
cacheTypeIsKnown := false
for _, candidate := range KnownFileSystemCacheTypes {
for _, candidate := range config.KnownFileSystemCacheTypes {
if cacheType == candidate {
cacheTypeIsKnown = true
}
}
if !cacheTypeIsKnown {
return ErrFileSystemCacheTypeUnknown
return config.ErrFileSystemCacheTypeUnknown
}
return nil
@@ -19,14 +21,14 @@ func CheckFileSystemCacheType(cacheType string) error {
func CheckWriteCacheType(cacheType string) error {
cacheTypeIsKnown := false
for _, candidate := range KnownWriteCacheTypes {
for _, candidate := range config.KnownWriteCacheTypes {
if cacheType == candidate {
cacheTypeIsKnown = true
}
}
if !cacheTypeIsKnown {
return ErrWriteCacheTypeUnknown
return config.ErrWriteCacheTypeUnknown
}
return nil

View File

@@ -1,4 +1,4 @@
package keys
package check
import (
"errors"

View File

@@ -1,4 +1,4 @@
package signature
package check
import "github.com/pojntfx/stfs/pkg/config"

View File

@@ -3,7 +3,6 @@ package fs
import (
"bytes"
"database/sql"
"errors"
"io"
"io/fs"
"log"
@@ -13,33 +12,19 @@ import (
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/logging"
"github.com/pojntfx/stfs/pkg/cache"
"github.com/pojntfx/stfs/pkg/config"
"github.com/pojntfx/stfs/pkg/inventory"
"github.com/pojntfx/stfs/pkg/operations"
"github.com/spf13/afero"
)
var (
ErrIsDirectory = errors.New("is a directory")
)
type WriteCache interface {
io.Closer
io.Reader
io.Seeker
io.Writer
Truncate(size int64) error
Size() (int64, error)
Sync() error
}
type FileFlags struct {
read bool
write bool
Read bool
Write bool
append bool
truncate bool
Append bool
Truncate bool
}
type File struct {
@@ -55,7 +40,7 @@ type File struct {
flags *FileFlags
compressionLevel string
getFileBuffer func() (WriteCache, func() error, error)
getFileBuffer func() (cache.WriteCache, func() error, error)
name string
info os.FileInfo
@@ -65,7 +50,7 @@ type File struct {
readOpReader *ioext.CounterReadCloser
readOpWriter io.WriteCloser
writeBuf WriteCache
writeBuf cache.WriteCache
cleanWriteBuf func() error
onHeader func(hdr *models.Header)
@@ -83,7 +68,7 @@ func NewFile(
flags *FileFlags,
compressionLevel string,
getFileBuffer func() (WriteCache, func() error, error),
getFileBuffer func() (cache.WriteCache, func() error, error),
name string,
info os.FileInfo,
@@ -118,7 +103,7 @@ func (f *File) syncWithoutLocking() error {
})
if f.info.IsDir() {
return ErrIsDirectory
return config.ErrIsDirectory
}
if f.writeBuf != nil {
@@ -261,13 +246,13 @@ func (f *File) enterWriteMode() error {
}
}
if f.flags.truncate {
if f.flags.Truncate {
if err := f.writeBuf.Truncate(0); err != nil {
return err
}
}
if !f.flags.append {
if !f.flags.Append {
if _, err := f.writeBuf.Seek(0, io.SeekStart); err != nil {
return err
}
@@ -285,7 +270,7 @@ func (f *File) seekWithoutLocking(offset int64, whence int) (int64, error) {
})
if f.info.IsDir() {
return -1, ErrIsDirectory
return -1, config.ErrIsDirectory
}
if f.writeBuf != nil {
@@ -305,7 +290,7 @@ func (f *File) seekWithoutLocking(offset int64, whence int) (int64, error) {
case io.SeekEnd:
dst = f.info.Size() - offset
default:
return -1, ErrNotImplemented
return -1, config.ErrNotImplemented
}
if f.readOpReader == nil || f.readOpWriter == nil || dst < int64(f.readOpReader.BytesRead) { // We have to re-open as we can't seek backwards
@@ -426,10 +411,10 @@ func (f *File) Read(p []byte) (n int, err error) {
})
if f.info.IsDir() {
return -1, ErrIsDirectory
return -1, config.ErrIsDirectory
}
if !f.flags.read {
if !f.flags.Read {
return -1, os.ErrPermission
}
@@ -495,10 +480,10 @@ func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
})
if f.info.IsDir() {
return -1, ErrIsDirectory
return -1, config.ErrIsDirectory
}
if !f.flags.read {
if !f.flags.Read {
return -1, os.ErrPermission
}
@@ -531,10 +516,10 @@ func (f *File) Write(p []byte) (n int, err error) {
})
if f.info.IsDir() {
return -1, ErrIsDirectory
return -1, config.ErrIsDirectory
}
if !f.flags.write {
if !f.flags.Write {
return -1, os.ErrPermission
}
@@ -563,10 +548,10 @@ func (f *File) WriteAt(p []byte, off int64) (n int, err error) {
})
if f.info.IsDir() {
return -1, ErrIsDirectory
return -1, config.ErrIsDirectory
}
if !f.flags.write {
if !f.flags.Write {
return -1, os.ErrPermission
}
@@ -591,10 +576,10 @@ func (f *File) WriteString(s string) (ret int, err error) {
})
if f.info.IsDir() {
return -1, ErrIsDirectory
return -1, config.ErrIsDirectory
}
if !f.flags.write {
if !f.flags.Write {
return -1, os.ErrPermission
}
@@ -615,10 +600,10 @@ func (f *File) Truncate(size int64) error {
})
if f.info.IsDir() {
return ErrIsDirectory
return config.ErrIsDirectory
}
if !f.flags.write {
if !f.flags.Write {
return os.ErrPermission
}

View File

@@ -1,474 +0,0 @@
package fs
import (
"archive/tar"
"database/sql"
"errors"
"io"
"os"
"os/user"
"path"
"path/filepath"
"strconv"
"time"
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/logging"
"github.com/pojntfx/stfs/pkg/config"
"github.com/pojntfx/stfs/pkg/inventory"
"github.com/pojntfx/stfs/pkg/operations"
"github.com/spf13/afero"
)
var (
ErrNotImplemented = errors.New("not implemented")
)
const (
FileSystemNameSTFS = "STFS"
)
type FileSystem struct {
readOps *operations.Operations
writeOps *operations.Operations
metadata config.MetadataConfig
compressionLevel string
getFileBuffer func() (WriteCache, func() error, error)
ignoreReadWritePermissions bool
onHeader func(hdr *models.Header)
log *logging.JSONLogger
}
func NewFileSystem(
readOps *operations.Operations,
writeOps *operations.Operations,
metadata config.MetadataConfig,
compressionLevel string,
getFileBuffer func() (WriteCache, func() error, error),
ignorePermissionFlags bool,
onHeader func(hdr *models.Header),
log *logging.JSONLogger,
) afero.Fs {
return &FileSystem{
readOps: readOps,
writeOps: writeOps,
metadata: metadata,
compressionLevel: compressionLevel,
getFileBuffer: getFileBuffer,
ignoreReadWritePermissions: ignorePermissionFlags,
onHeader: onHeader,
log: log,
}
}
func (f *FileSystem) Name() string {
f.log.Debug("FileSystem.Name", map[string]interface{}{
"name": FileSystemNameSTFS,
})
return FileSystemNameSTFS
}
func (f *FileSystem) Create(name string) (afero.File, error) {
f.log.Debug("FileSystem.Name", map[string]interface{}{
"name": name,
})
return os.OpenFile(name, os.O_CREATE, 0666)
}
func (f *FileSystem) mknode(dir bool, name string, perm os.FileMode) error {
f.log.Trace("FileSystem.mknode", map[string]interface{}{
"name": name,
"perm": perm,
})
usr, err := user.Current()
if err != nil {
return err
}
uid, err := strconv.Atoi(usr.Uid)
if err != nil {
// Some OSes like i.e. Windows don't support numeric UIDs, so use 0 instead
uid = 0
}
gid, err := strconv.Atoi(usr.Gid)
if err != nil {
// Some OSes like i.e. Windows don't support numeric GIDs, so use 0 instead
gid = 0
}
groups, err := usr.GroupIds()
if err != nil {
return err
}
gname := ""
if len(groups) >= 1 {
gname = groups[0]
}
typeflag := tar.TypeReg
if dir {
typeflag = tar.TypeDir
}
hdr := &tar.Header{
Typeflag: byte(typeflag),
Name: name,
Mode: int64(perm),
Uid: uid,
Gid: gid,
Uname: usr.Username,
Gname: gname,
ModTime: time.Now(),
}
done := false
if _, err := f.writeOps.Archive(
func() (config.FileConfig, error) {
// Exit after the first write
if done {
return config.FileConfig{}, io.EOF
}
done = true
return config.FileConfig{
GetFile: nil, // Not required as we never replace
Info: hdr.FileInfo(),
Path: filepath.ToSlash(name),
Link: filepath.ToSlash(hdr.Linkname),
}, nil
},
f.compressionLevel,
false,
); err != nil {
return err
}
return nil
}
func (f *FileSystem) Mkdir(name string, perm os.FileMode) error {
f.log.Debug("FileSystem.Mkdir", map[string]interface{}{
"name": name,
"perm": perm,
})
return f.mknode(true, name, perm)
}
func (f *FileSystem) MkdirAll(path string, perm os.FileMode) error {
f.log.Debug("FileSystem.MkdirAll", map[string]interface{}{
"path": path,
"perm": perm,
})
parts := filepath.SplitList(path)
currentPath := ""
for _, part := range parts {
if currentPath == "" {
currentPath = part
} else {
currentPath = filepath.Join(currentPath, part)
}
if err := f.mknode(true, currentPath, perm); err != nil {
return err
}
}
return nil
}
func (f *FileSystem) Open(name string) (afero.File, error) {
f.log.Debug("FileSystem.Open", map[string]interface{}{
"name": name,
})
return f.OpenFile(name, os.O_RDWR, os.ModePerm)
}
func (f *FileSystem) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
f.log.Debug("FileSystem.OpenFile", map[string]interface{}{
"name": name,
"flag": flag,
"perm": perm,
})
flags := &FileFlags{}
if flag&os.O_RDONLY != 0 {
flags.read = true
}
if flag&os.O_WRONLY != 0 {
flags.write = true
}
if flag&os.O_RDWR != 0 {
flags.read = true
flags.write = true
}
if f.ignoreReadWritePermissions {
flags.read = true
flags.write = true
}
if flag&os.O_APPEND != 0 {
flags.append = true
}
if flag&os.O_TRUNC != 0 {
flags.truncate = true
}
hdr, err := inventory.Stat(
f.metadata,
name,
f.onHeader,
)
if err != nil {
if err == sql.ErrNoRows {
if flag&os.O_CREATE != 0 && flag&os.O_EXCL == 0 {
if err := f.mknode(false, name, perm); err != nil {
return nil, err
}
hdr, err = inventory.Stat(
f.metadata,
name,
f.onHeader,
)
if err != nil {
return nil, err
}
} else {
return nil, os.ErrNotExist
}
} else {
return nil, err
}
}
return NewFile(
f.readOps,
f.writeOps,
f.metadata,
hdr.Name,
hdr.Linkname,
flags,
f.compressionLevel,
f.getFileBuffer,
path.Base(hdr.Name),
NewFileInfoFromTarHeader(hdr, f.log),
f.onHeader,
f.log,
), nil
}
func (f *FileSystem) Remove(name string) error {
f.log.Debug("FileSystem.Remove", map[string]interface{}{
"name": name,
})
return f.writeOps.Delete(name)
}
func (f *FileSystem) RemoveAll(path string) error {
f.log.Debug("FileSystem.RemoveAll", map[string]interface{}{
"path": path,
})
return f.writeOps.Delete(path)
}
func (f *FileSystem) Rename(oldname, newname string) error {
f.log.Debug("FileSystem.Rename", map[string]interface{}{
"oldname": oldname,
"newname": newname,
})
return f.writeOps.Move(oldname, newname)
}
func (f *FileSystem) Stat(name string) (os.FileInfo, error) {
f.log.Debug("FileSystem.Stat", map[string]interface{}{
"name": name,
})
hdr, err := inventory.Stat(
f.metadata,
name,
f.onHeader,
)
if err != nil {
if err == sql.ErrNoRows {
return nil, os.ErrNotExist
}
return nil, err
}
return NewFileInfoFromTarHeader(hdr, f.log), nil
}
func (f *FileSystem) updateMetadata(hdr *tar.Header) error {
done := false
if _, err := f.writeOps.Update(
func() (config.FileConfig, error) {
// Exit after the first update
if done {
return config.FileConfig{}, io.EOF
}
done = true
return config.FileConfig{
GetFile: nil, // Not required as we never replace
Info: hdr.FileInfo(),
Path: filepath.ToSlash(hdr.Name),
Link: filepath.ToSlash(hdr.Linkname),
}, nil
},
f.compressionLevel,
false,
false,
); err != nil {
return err
}
return nil
}
func (f *FileSystem) Chmod(name string, mode os.FileMode) error {
f.log.Debug("FileSystem.Chmod", map[string]interface{}{
"name": mode,
})
hdr, err := inventory.Stat(
f.metadata,
name,
f.onHeader,
)
if err != nil {
if err == sql.ErrNoRows {
return os.ErrNotExist
}
return err
}
hdr.Mode = int64(mode)
return f.updateMetadata(hdr)
}
func (f *FileSystem) Chown(name string, uid, gid int) error {
f.log.Debug("FileSystem.Chown", map[string]interface{}{
"name": name,
"uid": uid,
"gid": gid,
})
hdr, err := inventory.Stat(
f.metadata,
name,
f.onHeader,
)
if err != nil {
if err == sql.ErrNoRows {
return os.ErrNotExist
}
return err
}
hdr.Uid = uid
hdr.Gid = gid
return f.updateMetadata(hdr)
}
func (f *FileSystem) Chtimes(name string, atime time.Time, mtime time.Time) error {
f.log.Debug("FileSystem.Chtimes", map[string]interface{}{
"name": name,
"atime": atime,
"mtime": mtime,
})
hdr, err := inventory.Stat(
f.metadata,
name,
f.onHeader,
)
if err != nil {
if err == sql.ErrNoRows {
return os.ErrNotExist
}
return err
}
hdr.AccessTime = atime
hdr.ModTime = mtime
return f.updateMetadata(hdr)
}
func (f *FileSystem) LstatIfPossible(name string) (os.FileInfo, bool, error) {
f.log.Debug("FileSystem.LstatIfPossible", map[string]interface{}{
"name": name,
})
return nil, false, ErrNotImplemented
}
func (f *FileSystem) SymlinkIfPossible(oldname, newname string) error {
f.log.Debug("FileSystem.SymlinkIfPossible", map[string]interface{}{
"oldname": oldname,
"newname": newname,
})
return ErrNotImplemented
}
func (f *FileSystem) ReadlinkIfPossible(name string) (string, error) {
f.log.Debug("FileSystem.ReadlinkIfPossible", map[string]interface{}{
"name": name,
})
return "", ErrNotImplemented
}

View File

@@ -1,445 +0,0 @@
package persisters
//go:generate sqlboiler sqlite3 -o ../db/sqlite/models/metadata -c ../../configs/sqlboiler/metadata.yaml
//go:generate go-bindata -pkg metadata -o ../db/sqlite/migrations/metadata/migrations.go ../../db/sqlite/migrations/metadata
import (
"context"
"database/sql"
"fmt"
"path"
"path/filepath"
"strings"
"github.com/pojntfx/stfs/internal/db/sqlite/migrations/metadata"
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/pathext"
migrate "github.com/rubenv/sql-migrate"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
type depth struct {
Depth int64 `boil:"depth" json:"depth" toml:"depth" yaml:"depth"`
}
type MetadataPersister struct {
*SQLite
}
func NewMetadataPersister(dbPath string) *MetadataPersister {
return &MetadataPersister{
&SQLite{
DBPath: dbPath,
Migrations: migrate.AssetMigrationSource{
Asset: metadata.Asset,
AssetDir: metadata.AssetDir,
Dir: "../../db/sqlite/migrations/metadata",
},
},
}
}
func (p *MetadataPersister) UpsertHeader(ctx context.Context, dbhdr *models.Header) error {
hdr := *dbhdr
if _, err := models.FindHeader(ctx, p.db, hdr.Name, models.HeaderColumns.Name); err != nil {
if err == sql.ErrNoRows {
if _, err := models.FindHeader(ctx, p.db, p.withRelativeRoot(ctx, hdr.Name), models.HeaderColumns.Name); err == nil {
hdr.Name = p.withRelativeRoot(ctx, hdr.Name)
} else {
if err := hdr.Insert(ctx, p.db, boil.Infer()); err != nil {
return err
}
return nil
}
} else {
return err
}
}
if _, err := hdr.Update(ctx, p.db, boil.Infer()); err != nil {
return err
}
return nil
}
func (p *MetadataPersister) UpdateHeaderMetadata(ctx context.Context, dbhdr *models.Header) error {
if _, err := dbhdr.Update(ctx, p.db, boil.Infer()); err != nil {
if err == sql.ErrNoRows {
hdr := *dbhdr
hdr.Name = p.withRelativeRoot(ctx, dbhdr.Name)
if _, err := hdr.Update(ctx, p.db, boil.Infer()); err != nil {
return err
}
} else {
return err
}
}
return nil
}
func (p *MetadataPersister) MoveHeader(ctx context.Context, oldName string, newName string, lastknownrecord, lastknownblock int64) error {
// We can't do this with `dbhdr.Update` because we are renaming the primary key
n, err := queries.Raw(
fmt.Sprintf(
`update %v set %v = ?, %v = ?, %v = ? where %v = ?;`,
models.TableNames.Headers,
models.HeaderColumns.Name,
models.HeaderColumns.Lastknownrecord,
models.HeaderColumns.Lastknownblock,
models.HeaderColumns.Name,
),
newName,
lastknownrecord,
lastknownblock,
oldName,
).ExecContext(ctx, p.db)
if err != nil {
return err
}
written, err := n.RowsAffected()
if err != nil {
return err
}
if written < 1 {
if _, err := queries.Raw(
fmt.Sprintf(
`update %v set %v = ?, %v = ?, %v = ? where %v = ?;`,
models.TableNames.Headers,
p.withRelativeRoot(ctx, models.HeaderColumns.Name),
models.HeaderColumns.Lastknownrecord,
models.HeaderColumns.Lastknownblock,
p.withRelativeRoot(ctx, models.HeaderColumns.Name),
),
newName,
lastknownrecord,
lastknownblock,
oldName,
).ExecContext(ctx, p.db); err != nil {
return err
}
}
return nil
}
func (p *MetadataPersister) GetHeaders(ctx context.Context) (models.HeaderSlice, error) {
return models.Headers(
qm.Where(models.HeaderColumns.Deleted+" != 1"),
).All(ctx, p.db)
}
func (p *MetadataPersister) GetHeader(ctx context.Context, name string) (*models.Header, error) {
hdr, err := models.Headers(
qm.Where(models.HeaderColumns.Name+" = ?", name),
qm.Where(models.HeaderColumns.Deleted+" != 1"),
).One(ctx, p.db)
if err != nil {
if err == sql.ErrNoRows {
hdr, err = models.Headers(
qm.Where(models.HeaderColumns.Name+" = ?", p.withRelativeRoot(ctx, name)),
qm.Where(models.HeaderColumns.Deleted+" != 1"),
).One(ctx, p.db)
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
return hdr, nil
}
func (p *MetadataPersister) GetHeaderChildren(ctx context.Context, name string) (models.HeaderSlice, error) {
headers, err := models.Headers(
qm.Where(models.HeaderColumns.Name+" like ?", strings.TrimSuffix(name, "/")+"/%"), // Prevent double trailing slashes
qm.Where(models.HeaderColumns.Deleted+" != 1"),
).All(ctx, p.db)
if err != nil {
return nil, err
}
if len(headers) < 1 {
headers, err = models.Headers(
qm.Where(models.HeaderColumns.Name+" like ?", p.withRelativeRoot(ctx, strings.TrimSuffix(name, "/")+"/%")), // Prevent double trailing slashes
qm.Where(models.HeaderColumns.Deleted+" != 1"),
).All(ctx, p.db)
if err != nil {
return nil, err
}
}
outhdrs := models.HeaderSlice{}
for _, hdr := range headers {
prefix := strings.TrimSuffix(hdr.Name, "/")
if name != prefix && name != prefix+"/" {
outhdrs = append(outhdrs, hdr)
}
}
return outhdrs, nil
}
func (p *MetadataPersister) GetRootPath(ctx context.Context) (string, error) {
root := models.Header{}
if err := queries.Raw(
fmt.Sprintf(
`select min(length(%v) - length(replace(%v, "/", ""))) as depth, name from %v where %v != 1`,
models.HeaderColumns.Name,
models.HeaderColumns.Name,
models.TableNames.Headers,
models.HeaderColumns.Deleted,
),
).Bind(ctx, p.db, &root); err != nil {
return "", err
}
return root.Name, nil
}
func (p *MetadataPersister) GetHeaderDirectChildren(ctx context.Context, name string, limit int) (models.HeaderSlice, error) {
prefix := strings.TrimSuffix(name, "/") + "/"
rootDepth := 0
headers := models.HeaderSlice{}
// Root node
if pathext.IsRoot(name) {
prefix = ""
depth := depth{}
if err := queries.Raw(
fmt.Sprintf(
`select min(length(%v) - length(replace(%v, "/", ""))) as depth from %v where %v != 1`,
models.HeaderColumns.Name,
models.HeaderColumns.Name,
models.TableNames.Headers,
models.HeaderColumns.Deleted,
),
).Bind(ctx, p.db, &depth); err != nil {
if err == sql.ErrNoRows {
return headers, nil
}
return nil, err
}
rootDepth = int(depth.Depth)
}
getHeaders := func(prefix string) (models.HeaderSlice, error) {
query := fmt.Sprintf(
`select %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v,
length(replace(%v, ?, '')) - length(replace(replace(%v, ?, ''), '/', '')) as depth
from %v
where %v like ?
and (
depth = ?
or (
%v like '%%/'
and depth = ?
)
)
and %v != 1
and not %v in ('', '.', '/', './')`,
models.HeaderColumns.Record,
models.HeaderColumns.Lastknownrecord,
models.HeaderColumns.Block,
models.HeaderColumns.Lastknownblock,
models.HeaderColumns.Deleted,
models.HeaderColumns.Typeflag,
models.HeaderColumns.Name,
models.HeaderColumns.Linkname,
models.HeaderColumns.Size,
models.HeaderColumns.Mode,
models.HeaderColumns.UID,
models.HeaderColumns.Gid,
models.HeaderColumns.Uname,
models.HeaderColumns.Gname,
models.HeaderColumns.Modtime,
models.HeaderColumns.Accesstime,
models.HeaderColumns.Changetime,
models.HeaderColumns.Devmajor,
models.HeaderColumns.Devminor,
models.HeaderColumns.Paxrecords,
models.HeaderColumns.Format,
models.HeaderColumns.Name,
models.HeaderColumns.Name,
models.TableNames.Headers,
models.HeaderColumns.Name,
models.HeaderColumns.Name,
models.HeaderColumns.Deleted,
models.HeaderColumns.Name,
)
if limit < 0 {
if err := queries.Raw(
query+`limit ?`,
prefix,
prefix,
prefix+"%",
rootDepth,
rootDepth+1,
limit+1, // +1 to accomodate the parent directory if it exists
).Bind(ctx, p.db, &headers); err != nil {
if err == sql.ErrNoRows {
return headers, nil
}
return nil, err
}
}
if err := queries.Raw(
query,
prefix,
prefix,
prefix+"%",
rootDepth,
rootDepth+1,
).Bind(ctx, p.db, &headers); err != nil {
if err == sql.ErrNoRows {
return headers, nil
}
return nil, err
}
return headers, nil
}
headers, err := getHeaders(prefix)
if err != nil {
headers, err = getHeaders(p.withRelativeRoot(ctx, prefix))
if err == sql.ErrNoRows {
return headers, nil
}
if err != nil {
return nil, err
}
}
outhdrs := models.HeaderSlice{}
for _, hdr := range headers {
prefix := strings.TrimSuffix(hdr.Name, "/")
if name != prefix && name != prefix+"/" {
outhdrs = append(outhdrs, hdr)
}
}
if limit < 0 || len(outhdrs) < limit {
return outhdrs, nil
}
return outhdrs[:limit-1], nil
}
func (p *MetadataPersister) DeleteHeader(ctx context.Context, name string, lastknownrecord, lastknownblock int64) (*models.Header, error) {
hdr, err := models.FindHeader(ctx, p.db, name)
if err != nil {
if err == sql.ErrNoRows {
hdr, err = models.FindHeader(ctx, p.db, p.withRelativeRoot(ctx, name))
if err == sql.ErrNoRows {
return nil, err
}
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
hdr.Deleted = 1
hdr.Lastknownrecord = lastknownrecord
hdr.Lastknownblock = lastknownblock
if _, err := hdr.Update(ctx, p.db, boil.Infer()); err != nil {
return nil, err
}
return hdr, nil
}
func (p *MetadataPersister) GetLastIndexedRecordAndBlock(ctx context.Context, recordSize int) (int64, int64, error) {
var header models.Header
if err := queries.Raw(
fmt.Sprintf(
`select %v, %v, ((%v*$1)+%v) as location from %v order by location desc limit 1`, // We include deleted headers here as they are still physically on the tape and have to be considered when re-indexing
models.HeaderColumns.Lastknownrecord,
models.HeaderColumns.Lastknownblock,
models.HeaderColumns.Lastknownrecord,
models.HeaderColumns.Lastknownblock,
models.TableNames.Headers,
),
recordSize,
).Bind(ctx, p.db, &header); err != nil {
if err == sql.ErrNoRows {
return 0, 0, nil
}
return 0, 0, err
}
return header.Lastknownrecord, header.Lastknownblock, nil
}
func (p *MetadataPersister) PurgeAllHeaders(ctx context.Context) error {
if _, err := models.Headers().DeleteAll(ctx, p.db); err != nil {
return err
}
return nil
}
func (p *MetadataPersister) headerExistsExact(ctx context.Context, name string) error {
exists, err := models.Headers(
qm.Where(models.HeaderColumns.Name+" = ?", name),
qm.Where(models.HeaderColumns.Deleted+" != 1"),
).Exists(ctx, p.db)
if err != nil {
return err
}
if !exists {
return sql.ErrNoRows
}
return nil
}
func (p *MetadataPersister) withRelativeRoot(ctx context.Context, root string) string {
prefix := ""
if err := p.headerExistsExact(ctx, ""); err == nil {
prefix = ""
} else if err := p.headerExistsExact(ctx, "."); err == nil {
prefix = "."
} else if err := p.headerExistsExact(ctx, "/"); err == nil {
prefix = "/"
} else {
prefix = "./" // Special case: There is no root directory, only files, and the files start with `./`
}
if pathext.IsRoot(root) {
return prefix
}
if prefix == "./" {
// Special case: There is no root directory, only files, and the files start with `./`; we can't do path.Join, as `./asdf.txt` would be shortened to `asdf.txt`
return prefix + filepath.Clean(strings.TrimPrefix(root, "/"))
}
return path.Join(prefix, filepath.Clean(strings.TrimPrefix(root, "/")))
}

View File

@@ -13,7 +13,7 @@ type SQLite struct {
DBPath string
Migrations migrate.MigrationSource
db *sql.DB
DB *sql.DB
}
func (s *SQLite) Open() error {
@@ -31,11 +31,11 @@ func (s *SQLite) Open() error {
// Configure the db
db.SetMaxOpenConns(1) // Prevent "database locked" errors
s.db = db
s.DB = db
// Run migrations if set
if s.Migrations != nil {
if _, err := migrate.Exec(s.db, "sqlite3", s.Migrations, migrate.Up); err != nil {
if _, err := migrate.Exec(s.DB, "sqlite3", s.Migrations, migrate.Up); err != nil {
return err
}
}