refactor: Normalize internal and pkg packages

This commit is contained in:
Felicitas Pojtinger
2021-12-08 21:44:57 +01:00
parent 6c5c932b2d
commit f6b1ab0320
36 changed files with 258 additions and 219 deletions

View File

@@ -1,11 +0,0 @@
//go:build js || windows
package adapters
import (
"archive/tar"
)
func EnhanceHeader(path string, hdr *tar.Header) error {
return nil
}

View File

@@ -10,8 +10,8 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
"github.com/pierrec/lz4/v4"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/noop"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/mtio"
"github.com/pojntfx/stfs/pkg/config"
)
@@ -21,14 +21,14 @@ func Compress(
compressionLevel string,
isRegular bool,
recordSize int,
) (noop.Flusher, error) {
) (ioext.Flusher, error) {
switch compressionFormat {
case config.CompressionFormatGZipKey:
fallthrough
case config.CompressionFormatParallelGZipKey:
if compressionFormat == config.CompressionFormatGZipKey {
if !isRegular {
maxSize := getNearestPowerOf2Lower(controllers.BlockSize * recordSize)
maxSize := getNearestPowerOf2Lower(mtio.BlockSize * recordSize)
if maxSize < 65535 { // See https://www.daylight.com/meetings/mug00/Sayle/gzip.html#:~:text=Stored%20blocks%20are%20allowed%20to,size%20of%20the%20gzip%20header.
return nil, config.ErrCompressionFormatRequiresLargerRecordSize
@@ -82,7 +82,7 @@ func Compress(
opts := []lz4.Option{lz4.CompressionLevelOption(l), lz4.ConcurrencyOption(-1)}
if !isRegular {
maxSize := getNearestPowerOf2Lower(controllers.BlockSize * recordSize)
maxSize := getNearestPowerOf2Lower(mtio.BlockSize * recordSize)
if uint32(maxSize) < uint32(lz4.Block64Kb) {
return nil, config.ErrCompressionFormatRequiresLargerRecordSize
@@ -104,7 +104,7 @@ func Compress(
return nil, err
}
return noop.AddFlush(lz), nil
return ioext.AddFlush(lz), nil
case config.CompressionFormatZStandardKey:
l := zstd.SpeedDefault
switch compressionLevel {
@@ -120,7 +120,7 @@ func Compress(
opts := []zstd.EOption{zstd.WithEncoderLevel(l)}
if !isRegular {
opts = append(opts, zstd.WithWindowSize(getNearestPowerOf2Lower(controllers.BlockSize*recordSize)))
opts = append(opts, zstd.WithWindowSize(getNearestPowerOf2Lower(mtio.BlockSize*recordSize)))
}
zz, err := zstd.NewWriter(dst, opts...)
@@ -171,9 +171,9 @@ func Compress(
return nil, err
}
return noop.AddFlush(bz), nil
return ioext.AddFlush(bz), nil
case config.NoneKey:
return noop.AddFlush(noop.AddClose(dst)), nil
return ioext.AddFlush(ioext.AddClose(dst)), nil
default:
return nil, config.ErrCompressionFormatUnsupported
}

View File

@@ -3,6 +3,8 @@ package converters
import (
"archive/tar"
"encoding/json"
"fmt"
"time"
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
)
@@ -66,3 +68,9 @@ func TarHeaderToDBHeader(record, lastKnownRecord, block, lastKnownBlock int64, t
return &hdr, nil
}
func TARHeaderToCSV(record, lastKnownRecord, block, lastKnownBlock int64, hdr *tar.Header) []string {
return []string{
fmt.Sprintf("%v", record), fmt.Sprintf("%v", lastKnownRecord), fmt.Sprintf("%v", block), fmt.Sprintf("%v", lastKnownBlock), fmt.Sprintf("%v", hdr.Typeflag), hdr.Name, hdr.Linkname, fmt.Sprintf("%v", hdr.Size), fmt.Sprintf("%v", hdr.Mode), fmt.Sprintf("%v", hdr.Uid), fmt.Sprintf("%v", hdr.Gid), fmt.Sprintf("%v", hdr.Uname), fmt.Sprintf("%v", hdr.Gname), hdr.ModTime.Format(time.RFC3339), hdr.AccessTime.Format(time.RFC3339), hdr.ChangeTime.Format(time.RFC3339), fmt.Sprintf("%v", hdr.Devmajor), fmt.Sprintf("%v", hdr.Devminor), fmt.Sprintf("%v", hdr.PAXRecords), fmt.Sprintf("%v", hdr.Format),
}
}

View File

@@ -9,7 +9,7 @@ import (
"filippo.io/age"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/pkg/config"
)
@@ -63,7 +63,7 @@ func DecryptHeader(
return config.ErrEmbeddedHeaderMissing
}
encryptedEmbeddedHeader, ok := hdr.PAXRecords[pax.STFSRecordEmbeddedHeader]
encryptedEmbeddedHeader, ok := hdr.PAXRecords[records.STFSRecordEmbeddedHeader]
if !ok {
return config.ErrEmbeddedHeaderMissing
}

View File

@@ -9,8 +9,8 @@ import (
"filippo.io/age"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/pojntfx/stfs/internal/noop"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/pkg/config"
)
@@ -35,7 +35,7 @@ func Encrypt(
return openpgp.Encrypt(dst, recipient, nil, nil, nil)
case config.NoneKey:
return noop.AddClose(dst), nil
return ioext.AddClose(dst), nil
default:
return nil, config.ErrEncryptionFormatUnsupported
}
@@ -61,7 +61,7 @@ func EncryptHeader(
return err
}
newHdr.PAXRecords[pax.STFSRecordEmbeddedHeader], err = EncryptString(string(wrappedHeader), encryptionFormat, recipient)
newHdr.PAXRecords[records.STFSRecordEmbeddedHeader], err = EncryptString(string(wrappedHeader), encryptionFormat, recipient)
if err != nil {
return err
}

View File

@@ -1,11 +1,8 @@
package formatting
import (
"archive/tar"
"encoding/csv"
"fmt"
"os"
"time"
)
var (
@@ -19,9 +16,3 @@ func PrintCSV(input []string) error {
return w.WriteAll([][]string{input})
}
func GetTARHeaderAsCSV(record, lastKnownRecord, block, lastKnownBlock int64, hdr *tar.Header) []string {
return []string{
fmt.Sprintf("%v", record), fmt.Sprintf("%v", lastKnownRecord), fmt.Sprintf("%v", block), fmt.Sprintf("%v", lastKnownBlock), fmt.Sprintf("%v", hdr.Typeflag), hdr.Name, hdr.Linkname, fmt.Sprintf("%v", hdr.Size), fmt.Sprintf("%v", hdr.Mode), fmt.Sprintf("%v", hdr.Uid), fmt.Sprintf("%v", hdr.Gid), fmt.Sprintf("%v", hdr.Uname), fmt.Sprintf("%v", hdr.Gname), hdr.ModTime.Format(time.RFC3339), hdr.AccessTime.Format(time.RFC3339), hdr.ChangeTime.Format(time.RFC3339), fmt.Sprintf("%v", hdr.Devmajor), fmt.Sprintf("%v", hdr.Devminor), fmt.Sprintf("%v", hdr.PAXRecords), fmt.Sprintf("%v", hdr.Format),
}
}

View File

@@ -1,4 +1,4 @@
package counters
package ioext
import "io"

19
internal/ioext/flusher.go Normal file
View File

@@ -0,0 +1,19 @@
package ioext
import "io"
type Flusher interface {
io.WriteCloser
Flush() error
}
type NopFlusher struct {
io.WriteCloser
}
func (NopFlusher) Flush() error { return nil }
func AddFlush(w io.WriteCloser) NopFlusher {
return NopFlusher{w}
}

13
internal/ioext/write.go Normal file
View File

@@ -0,0 +1,13 @@
package ioext
import "io"
type NopCloser struct {
io.Writer
}
func (NopCloser) Close() error { return nil }
func AddClose(w io.Writer) NopCloser {
return NopCloser{w}
}

View File

@@ -0,0 +1,5 @@
package mtio
const (
BlockSize = 512
)

View File

@@ -1,4 +1,6 @@
package controllers
//go:build linux
package mtio
import (
"os"
@@ -15,8 +17,6 @@ const (
mtOffl = 7 // Rewind and put the drive offline (eject?)
mtEom = 12 // Goto end of recorded media (for appending files)
mtSeek = 22 // Seek to block
BlockSize = 512
)
// position is struct for MTIOCPOS
@@ -31,6 +31,20 @@ type operation struct {
count int32 // Operation count
}
func GetCurrentRecordFromTape(f *os.File) (int64, error) {
pos := &position{}
if _, _, err := syscall.Syscall(
syscall.SYS_IOCTL,
f.Fd(),
mtioCpos,
uintptr(unsafe.Pointer(pos)),
); err != 0 {
return 0, err
}
return pos.blkNo, nil
}
func GoToEndOfTape(f *os.File) error {
if _, _, err := syscall.Syscall(
syscall.SYS_IOCTL,
@@ -66,18 +80,21 @@ func GoToNextFileOnTape(f *os.File) error {
return nil
}
func GetCurrentRecordFromTape(f *os.File) (int64, error) {
pos := &position{}
func EjectTape(f *os.File) error {
if _, _, err := syscall.Syscall(
syscall.SYS_IOCTL,
f.Fd(),
mtioCpos,
uintptr(unsafe.Pointer(pos)),
mtioCtop,
uintptr(unsafe.Pointer(
&operation{
op: mtOffl,
},
)),
); err != 0 {
return 0, err
return err
}
return pos.blkNo, nil
return nil
}
func SeekToRecordOnTape(f *os.File, record int32) error {
@@ -97,20 +114,3 @@ func SeekToRecordOnTape(f *os.File, record int32) error {
return nil
}
func EjectTape(f *os.File) error {
if _, _, err := syscall.Syscall(
syscall.SYS_IOCTL,
f.Fd(),
mtioCtop,
uintptr(unsafe.Pointer(
&operation{
op: mtOffl,
},
)),
); err != 0 {
return err
}
return nil
}

View File

@@ -0,0 +1,29 @@
//go:build !linux
package mtio
import (
"os"
"github.com/pojntfx/stfs/pkg/config"
)
func GetCurrentRecordFromTape(f *os.File) (int64, error) {
return -1, config.ErrDrivesUnsupported
}
func GoToEndOfTape(f *os.File) error {
return config.ErrDrivesUnsupported
}
func GoToNextFileOnTape(f *os.File) error {
return config.ErrDrivesUnsupported
}
func EjectTape(f *os.File) error {
return config.ErrDrivesUnsupported
}
func SeekToRecordOnTape(f *os.File, record int32) error {
return config.ErrDrivesUnsupported
}

View File

@@ -1,19 +0,0 @@
package noop
import "io"
type Flusher interface {
io.WriteCloser
Flush() error
}
type NoOpFlusher struct {
io.WriteCloser
}
func (NoOpFlusher) Flush() error { return nil }
func AddFlush(w io.WriteCloser) NoOpFlusher {
return NoOpFlusher{w}
}

View File

@@ -1,13 +0,0 @@
package noop
import "io"
type NoOpCloser struct {
io.Writer
}
func (NoOpCloser) Close() error { return nil }
func AddClose(w io.Writer) NoOpCloser {
return NoOpCloser{w}
}

View File

@@ -1,4 +1,4 @@
package pax
package records
import "errors"

View File

@@ -10,7 +10,7 @@ import (
"aead.dev/minisign"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/ProtonMail/go-crypto/openpgp/packet"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/pkg/config"
)
@@ -106,8 +106,8 @@ func SignHeader(
return err
}
newHdr.PAXRecords[pax.STFSRecordEmbeddedHeader] = string(wrappedHeader)
newHdr.PAXRecords[pax.STFSRecordSignature], err = SignString(newHdr.PAXRecords[pax.STFSRecordEmbeddedHeader], isRegular, signatureFormat, identity)
newHdr.PAXRecords[records.STFSRecordEmbeddedHeader] = string(wrappedHeader)
newHdr.PAXRecords[records.STFSRecordSignature], err = SignString(newHdr.PAXRecords[records.STFSRecordEmbeddedHeader], isRegular, signatureFormat, identity)
if err != nil {
return err
}

View File

@@ -10,7 +10,7 @@ import (
"aead.dev/minisign"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/ProtonMail/go-crypto/openpgp/packet"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/pkg/config"
)
@@ -102,12 +102,12 @@ func VerifyHeader(
return config.ErrEmbeddedHeaderMissing
}
embeddedHeader, ok := hdr.PAXRecords[pax.STFSRecordEmbeddedHeader]
embeddedHeader, ok := hdr.PAXRecords[records.STFSRecordEmbeddedHeader]
if !ok {
return config.ErrEmbeddedHeaderMissing
}
signature, ok := hdr.PAXRecords[pax.STFSRecordSignature]
signature, ok := hdr.PAXRecords[records.STFSRecordSignature]
if !ok {
return config.ErrSignatureMissing
}

View File

@@ -1,6 +1,6 @@
//go:build darwin
package adapters
package statext
import (
"archive/tar"

View File

@@ -1,6 +1,6 @@
//go:build dragonfly || openbsd || (linux && mips64) || (linux && mips) || (linux && mipsle) || (linux && mips64le)
//go:build dragonfly
package adapters
package statext
import (
"archive/tar"

View File

@@ -1,6 +1,6 @@
//go:build freebsd || netbsd
//go:build freebsd
package adapters
package statext
import (
"archive/tar"

View File

@@ -1,6 +1,6 @@
//go:build !(freebsd || darwin || dragonfly || js || netbsd || openbsd || windows || (linux && mips64) || (linux && mips) || (linux && mipsle) || (linux && mips64le))
//go:build (linux && amd64) || (linux && 386) || (linux && arm) || (linux && arm64) || (linux && ppc64) || (linux && ppc64le) || (linux && riscv64) || (linux && s390x)
package adapters
package statext
import (
"archive/tar"

View File

@@ -0,0 +1,11 @@
//go:build (linux && mips64) || (linux && mips) || (linux && mipsle) || (linux && mips64le) || !(darwin || dragonfly || freebsd || linux)
package statext
import (
"archive/tar"
)
func EnhanceHeader(path string, hdr *tar.Header) error {
return nil
}

View File

@@ -5,8 +5,8 @@ import (
"bufio"
"os"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/counters"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/mtio"
)
func OpenTapeWriteOnly(drive string, recordSize int, overwrite bool) (tw *tar.Writer, isRegular bool, cleanup func(dirty *bool) error, err error) {
@@ -37,19 +37,19 @@ func OpenTapeWriteOnly(drive string, recordSize int, overwrite bool) (tw *tar.Wr
if !overwrite {
// Go to end of tape
if err := controllers.GoToEndOfTape(f); err != nil {
if err := mtio.GoToEndOfTape(f); err != nil {
return nil, false, nil, err
}
}
}
var bw *bufio.Writer
var counter *counters.CounterWriter
var counter *ioext.CounterWriter
if isRegular {
tw = tar.NewWriter(f)
} else {
bw = bufio.NewWriterSize(f, controllers.BlockSize*recordSize)
counter = &counters.CounterWriter{Writer: bw, BytesRead: 0}
bw = bufio.NewWriterSize(f, mtio.BlockSize*recordSize)
counter = &ioext.CounterWriter{Writer: bw, BytesRead: 0}
tw = tar.NewWriter(counter)
}
@@ -61,9 +61,9 @@ func OpenTapeWriteOnly(drive string, recordSize int, overwrite bool) (tw *tar.Wr
}
if !isRegular {
if controllers.BlockSize*recordSize-counter.BytesRead > 0 {
if mtio.BlockSize*recordSize-counter.BytesRead > 0 {
// Fill the rest of the record with zeros
if _, err := bw.Write(make([]byte, controllers.BlockSize*recordSize-counter.BytesRead)); err != nil {
if _, err := bw.Write(make([]byte, mtio.BlockSize*recordSize-counter.BytesRead)); err != nil {
return err
}
}

View File

@@ -27,4 +27,6 @@ var (
ErrCompressionLevelUnknown = errors.New("unknown compression level")
ErrMissingTarHeader = errors.New("tar header is missing")
ErrDrivesUnsupported = errors.New("this system does not support tape drives")
)

View File

@@ -3,7 +3,7 @@ package hardware
import (
"os"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/mtio"
)
func Eject(
@@ -15,5 +15,5 @@ func Eject(
}
defer f.Close()
return controllers.EjectTape(f)
return mtio.EjectTape(f)
}

View File

@@ -3,7 +3,7 @@ package hardware
import (
"os"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/mtio"
)
func Tell(
@@ -15,5 +15,5 @@ func Tell(
}
defer f.Close()
return controllers.GetCurrentRecordFromTape(f)
return mtio.GetCurrentRecordFromTape(f)
}

View File

@@ -42,7 +42,7 @@ func Find(
return []*tar.Header{}, err
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
return []*tar.Header{}, err
}

View File

@@ -37,7 +37,7 @@ func List(
return []*tar.Header{}, err
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
return []*tar.Header{}, err
}

View File

@@ -8,14 +8,15 @@ import (
"path/filepath"
"strconv"
"github.com/pojntfx/stfs/internal/adapters"
"github.com/pojntfx/stfs/internal/compression"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/counters"
"github.com/pojntfx/stfs/internal/converters"
"github.com/pojntfx/stfs/internal/encryption"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/mtio"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/internal/signature"
"github.com/pojntfx/stfs/internal/statext"
"github.com/pojntfx/stfs/internal/suffix"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
@@ -72,7 +73,7 @@ func Archive(
}
// Seek to the start of the tape
if err := controllers.SeekToRecordOnTape(f, 0); err != nil {
if err := mtio.SeekToRecordOnTape(f, 0); err != nil {
return []*tar.Header{}, err
}
@@ -108,7 +109,7 @@ func Archive(
return err
}
if err := adapters.EnhanceHeader(path, hdr); err != nil {
if err := statext.EnhanceHeader(path, hdr); err != nil {
return err
}
@@ -117,7 +118,7 @@ func Archive(
if info.Mode().IsRegular() {
// Get the compressed size for the header
fileSizeCounter := &counters.CounterWriter{
fileSizeCounter := &ioext.CounterWriter{
Writer: io.Discard,
}
@@ -152,7 +153,7 @@ func Archive(
return err
}
} else {
buf := make([]byte, controllers.BlockSize*recordSize)
buf := make([]byte, mtio.BlockSize*recordSize)
if _, err := io.CopyBuffer(compressor, signer, buf); err != nil {
return err
}
@@ -177,14 +178,14 @@ func Archive(
if hdr.PAXRecords == nil {
hdr.PAXRecords = map[string]string{}
}
hdr.PAXRecords[pax.STFSRecordUncompressedSize] = strconv.Itoa(int(hdr.Size))
hdr.PAXRecords[records.STFSRecordUncompressedSize] = strconv.Itoa(int(hdr.Size))
signature, err := sign()
if err != nil {
return err
}
if signature != "" {
hdr.PAXRecords[pax.STFSRecordSignature] = signature
hdr.PAXRecords[records.STFSRecordSignature] = signature
}
hdr.Size = int64(fileSizeCounter.BytesRead)
@@ -202,7 +203,7 @@ func Archive(
first = false
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(-1, -1, -1, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
return err
}
@@ -252,7 +253,7 @@ func Archive(
return err
}
} else {
buf := make([]byte, controllers.BlockSize*recordSize)
buf := make([]byte, mtio.BlockSize*recordSize)
if _, err := io.CopyBuffer(compressor, file, buf); err != nil {
return err
}

View File

@@ -8,8 +8,8 @@ import (
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/encryption"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/persisters"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/internal/signature"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
@@ -71,8 +71,8 @@ func Delete(
}
hdr.Size = 0 // Don't try to seek after the record
hdr.PAXRecords[pax.STFSRecordVersion] = pax.STFSRecordVersion1
hdr.PAXRecords[pax.STFSRecordAction] = pax.STFSRecordActionDelete
hdr.PAXRecords[records.STFSRecordVersion] = records.STFSRecordVersion1
hdr.PAXRecords[records.STFSRecordAction] = records.STFSRecordActionDelete
if err := signature.SignHeader(hdr, isRegular, pipes.Signature, crypto.Identity); err != nil {
return err
@@ -88,7 +88,7 @@ func Delete(
dirty = true
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(-1, -1, -1, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
return err
}

View File

@@ -9,8 +9,8 @@ import (
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/encryption"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/persisters"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/internal/signature"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
@@ -74,9 +74,9 @@ func Move(
hdr.Size = 0 // Don't try to seek after the record
hdr.Name = strings.TrimSuffix(to, "/") + strings.TrimPrefix(hdr.Name, strings.TrimSuffix(from, "/"))
hdr.PAXRecords[pax.STFSRecordVersion] = pax.STFSRecordVersion1
hdr.PAXRecords[pax.STFSRecordAction] = pax.STFSRecordActionUpdate
hdr.PAXRecords[pax.STFSRecordReplacesName] = dbhdr.Name
hdr.PAXRecords[records.STFSRecordVersion] = records.STFSRecordVersion1
hdr.PAXRecords[records.STFSRecordAction] = records.STFSRecordActionUpdate
hdr.PAXRecords[records.STFSRecordReplacesName] = dbhdr.Name
if err := signature.SignHeader(hdr, isRegular, pipes.Signature, crypto.Identity); err != nil {
return err
@@ -92,7 +92,7 @@ func Move(
dirty = true
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(-1, -1, -1, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
return err
}

View File

@@ -71,7 +71,7 @@ func Restore(
return err
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
return err
}

View File

@@ -8,14 +8,15 @@ import (
"path/filepath"
"strconv"
"github.com/pojntfx/stfs/internal/adapters"
"github.com/pojntfx/stfs/internal/compression"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/counters"
"github.com/pojntfx/stfs/internal/converters"
"github.com/pojntfx/stfs/internal/encryption"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/mtio"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/internal/signature"
"github.com/pojntfx/stfs/internal/statext"
"github.com/pojntfx/stfs/internal/suffix"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
@@ -57,7 +58,7 @@ func Update(
return err
}
if err := adapters.EnhanceHeader(path, hdr); err != nil {
if err := statext.EnhanceHeader(path, hdr); err != nil {
return err
}
@@ -66,12 +67,12 @@ func Update(
if hdr.PAXRecords == nil {
hdr.PAXRecords = map[string]string{}
}
hdr.PAXRecords[pax.STFSRecordVersion] = pax.STFSRecordVersion1
hdr.PAXRecords[pax.STFSRecordAction] = pax.STFSRecordActionUpdate
hdr.PAXRecords[records.STFSRecordVersion] = records.STFSRecordVersion1
hdr.PAXRecords[records.STFSRecordAction] = records.STFSRecordActionUpdate
if info.Mode().IsRegular() && overwrite {
// Get the compressed size for the header
fileSizeCounter := &counters.CounterWriter{
fileSizeCounter := &ioext.CounterWriter{
Writer: io.Discard,
}
@@ -106,7 +107,7 @@ func Update(
return err
}
} else {
buf := make([]byte, controllers.BlockSize*recordSize)
buf := make([]byte, mtio.BlockSize*recordSize)
if _, err := io.CopyBuffer(compressor, signer, buf); err != nil {
return err
}
@@ -131,14 +132,14 @@ func Update(
if hdr.PAXRecords == nil {
hdr.PAXRecords = map[string]string{}
}
hdr.PAXRecords[pax.STFSRecordUncompressedSize] = strconv.Itoa(int(hdr.Size))
hdr.PAXRecords[records.STFSRecordUncompressedSize] = strconv.Itoa(int(hdr.Size))
signature, err := sign()
if err != nil {
return err
}
if signature != "" {
hdr.PAXRecords[pax.STFSRecordSignature] = signature
hdr.PAXRecords[records.STFSRecordSignature] = signature
}
hdr.Size = int64(fileSizeCounter.BytesRead)
@@ -157,9 +158,9 @@ func Update(
}
if overwrite {
hdr.PAXRecords[pax.STFSRecordReplacesContent] = pax.STFSRecordReplacesContentTrue
hdr.PAXRecords[records.STFSRecordReplacesContent] = records.STFSRecordReplacesContentTrue
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(-1, -1, -1, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
return err
}
@@ -209,7 +210,7 @@ func Update(
return err
}
} else {
buf := make([]byte, controllers.BlockSize*recordSize)
buf := make([]byte, mtio.BlockSize*recordSize)
if _, err := io.CopyBuffer(compressor, file, buf); err != nil {
return err
}
@@ -233,7 +234,7 @@ func Update(
} else {
hdr.Size = 0 // Don't try to seek after the record
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(-1, -1, -1, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
return err
}

View File

@@ -8,10 +8,11 @@ import (
"path/filepath"
"github.com/pojntfx/stfs/internal/compression"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/converters"
"github.com/pojntfx/stfs/internal/encryption"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/mtio"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/internal/signature"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
@@ -40,20 +41,20 @@ func Fetch(
var tr *tar.Reader
if isRegular {
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*record)+block*controllers.BlockSize), io.SeekStart); err != nil {
if _, err := f.Seek(int64((recordSize*mtio.BlockSize*record)+block*mtio.BlockSize), io.SeekStart); err != nil {
return err
}
tr = tar.NewReader(f)
} else {
// Seek to record
if err := controllers.SeekToRecordOnTape(f, int32(record)); err != nil {
if err := mtio.SeekToRecordOnTape(f, int32(record)); err != nil {
return err
}
// Seek to block
br := bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*controllers.BlockSize)); err != nil {
br := bufio.NewReaderSize(f, mtio.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*mtio.BlockSize)); err != nil {
return err
}
@@ -78,7 +79,7 @@ func Fetch(
return err
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(int64(record), -1, int64(block), -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(int64(record), -1, int64(block), -1, hdr)); err != nil {
return err
}
}
@@ -122,7 +123,7 @@ func Fetch(
sig := ""
if hdr.PAXRecords != nil {
if s, ok := hdr.PAXRecords[pax.STFSRecordSignature]; ok {
if s, ok := hdr.PAXRecords[records.STFSRecordSignature]; ok {
sig = s
}
}

View File

@@ -11,13 +11,13 @@ import (
"os"
"strconv"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/converters"
"github.com/pojntfx/stfs/internal/counters"
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/mtio"
"github.com/pojntfx/stfs/internal/persisters"
"github.com/pojntfx/stfs/internal/records"
"github.com/pojntfx/stfs/internal/suffix"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
@@ -70,7 +70,7 @@ func Index(
if isRegular {
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*record)+block*controllers.BlockSize), 0); err != nil {
if _, err := f.Seek(int64((recordSize*mtio.BlockSize*record)+block*mtio.BlockSize), 0); err != nil {
return err
}
@@ -89,7 +89,7 @@ func Index(
return err
}
nextTotalBlocks := math.Ceil(float64((curr)) / float64(controllers.BlockSize))
nextTotalBlocks := math.Ceil(float64((curr)) / float64(mtio.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
@@ -102,7 +102,7 @@ func Index(
}
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*int(record))+int(block)*controllers.BlockSize), io.SeekStart); err != nil {
if _, err := f.Seek(int64((recordSize*mtio.BlockSize*int(record))+int(block)*mtio.BlockSize), io.SeekStart); err != nil {
return err
}
@@ -154,7 +154,7 @@ func Index(
return err
}
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(mtio.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
@@ -167,21 +167,21 @@ func Index(
}
} else {
// Seek to record
if err := controllers.SeekToRecordOnTape(f, int32(record)); err != nil {
if err := mtio.SeekToRecordOnTape(f, int32(record)); err != nil {
return err
}
// Seek to block
br := bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*controllers.BlockSize)); err != nil {
br := bufio.NewReaderSize(f, mtio.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*mtio.BlockSize)); err != nil {
return err
}
record := int64(record)
block := int64(block)
curr := int64((recordSize * controllers.BlockSize * int(record)) + (int(block) * controllers.BlockSize))
counter := &counters.CounterReader{Reader: br, BytesRead: int(curr)}
curr := int64((recordSize * mtio.BlockSize * int(record)) + (int(block) * mtio.BlockSize))
counter := &ioext.CounterReader{Reader: br, BytesRead: int(curr)}
i := 0
tr := tar.NewReader(counter)
@@ -189,21 +189,21 @@ func Index(
hdr, err := tr.Next()
if err != nil {
if err == io.EOF {
if err := controllers.GoToNextFileOnTape(f); err != nil {
if err := mtio.GoToNextFileOnTape(f); err != nil {
// EOD
break
}
record, err = controllers.GetCurrentRecordFromTape(f)
record, err = mtio.GetCurrentRecordFromTape(f)
if err != nil {
return err
}
block = 0
br = bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
curr = int64(int64(recordSize) * controllers.BlockSize * record)
counter = &counters.CounterReader{Reader: br, BytesRead: int(curr)}
br = bufio.NewReaderSize(f, mtio.BlockSize*recordSize)
curr = int64(int64(recordSize) * mtio.BlockSize * record)
counter = &ioext.CounterReader{Reader: br, BytesRead: int(curr)}
tr = tar.NewReader(counter)
continue
@@ -232,7 +232,7 @@ func Index(
currAndSize := int64(counter.BytesRead)
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(mtio.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
@@ -261,7 +261,7 @@ func indexHeader(
}
}
uncompressedSize, ok := hdr.PAXRecords[pax.STFSRecordUncompressedSize]
uncompressedSize, ok := hdr.PAXRecords[records.STFSRecordUncompressedSize]
if ok {
size, err := strconv.Atoi(uncompressedSize)
if err != nil {
@@ -279,24 +279,24 @@ func indexHeader(
hdr.Name = newName
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(record, -1, block, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(record, -1, block, -1, hdr)); err != nil {
return err
}
stfsVersion, ok := hdr.PAXRecords[pax.STFSRecordVersion]
stfsVersion, ok := hdr.PAXRecords[records.STFSRecordVersion]
if !ok {
stfsVersion = pax.STFSRecordVersion1
stfsVersion = records.STFSRecordVersion1
}
switch stfsVersion {
case pax.STFSRecordVersion1:
stfsAction, ok := hdr.PAXRecords[pax.STFSRecordAction]
case records.STFSRecordVersion1:
stfsAction, ok := hdr.PAXRecords[records.STFSRecordAction]
if !ok {
stfsAction = pax.STFSRecordActionCreate
stfsAction = records.STFSRecordActionCreate
}
switch stfsAction {
case pax.STFSRecordActionCreate:
case records.STFSRecordActionCreate:
dbhdr, err := converters.TarHeaderToDBHeader(record, record, block, block, hdr)
if err != nil {
return err
@@ -305,20 +305,20 @@ func indexHeader(
if err := metadataPersister.UpsertHeader(context.Background(), dbhdr); err != nil {
return err
}
case pax.STFSRecordActionDelete:
case records.STFSRecordActionDelete:
if _, err := metadataPersister.DeleteHeader(context.Background(), hdr.Name, record, block); err != nil {
return err
}
case pax.STFSRecordActionUpdate:
case records.STFSRecordActionUpdate:
moveAfterEdits := false
oldName := hdr.Name
if _, ok := hdr.PAXRecords[pax.STFSRecordReplacesName]; ok {
if _, ok := hdr.PAXRecords[records.STFSRecordReplacesName]; ok {
moveAfterEdits = true
oldName = hdr.PAXRecords[pax.STFSRecordReplacesName]
oldName = hdr.PAXRecords[records.STFSRecordReplacesName]
}
var newHdr *models.Header
if replacesContent, ok := hdr.PAXRecords[pax.STFSRecordReplacesContent]; ok && replacesContent == pax.STFSRecordReplacesContentTrue {
if replacesContent, ok := hdr.PAXRecords[records.STFSRecordReplacesContent]; ok && replacesContent == records.STFSRecordReplacesContentTrue {
// Content & metadata update; use the new record & block
h, err := converters.TarHeaderToDBHeader(record, record, block, block, hdr)
if err != nil {
@@ -360,10 +360,10 @@ func indexHeader(
}
default:
return pax.ErrUnsupportedAction
return records.ErrUnsupportedAction
}
default:
return pax.ErrUnsupportedVersion
return records.ErrUnsupportedVersion
}
return nil

View File

@@ -7,10 +7,11 @@ import (
"io/ioutil"
"math"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/counters"
"github.com/pojntfx/stfs/internal/converters"
"github.com/pojntfx/stfs/internal/encryption"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/ioext"
"github.com/pojntfx/stfs/internal/mtio"
"github.com/pojntfx/stfs/internal/signature"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
@@ -36,7 +37,7 @@ func Query(
if isRegular {
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*record)+block*controllers.BlockSize), 0); err != nil {
if _, err := f.Seek(int64((recordSize*mtio.BlockSize*record)+block*mtio.BlockSize), 0); err != nil {
return []*tar.Header{}, err
}
@@ -54,7 +55,7 @@ func Query(
return []*tar.Header{}, err
}
nextTotalBlocks := math.Ceil(float64((curr)) / float64(controllers.BlockSize))
nextTotalBlocks := math.Ceil(float64((curr)) / float64(mtio.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
@@ -67,7 +68,7 @@ func Query(
}
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*int(record))+int(block)*controllers.BlockSize), io.SeekStart); err != nil {
if _, err := f.Seek(int64((recordSize*mtio.BlockSize*int(record))+int(block)*mtio.BlockSize), io.SeekStart); err != nil {
return []*tar.Header{}, err
}
@@ -108,7 +109,7 @@ func Query(
}
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(record, -1, block, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(record, -1, block, -1, hdr)); err != nil {
return []*tar.Header{}, err
}
@@ -128,7 +129,7 @@ func Query(
return []*tar.Header{}, err
}
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(mtio.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
@@ -139,42 +140,42 @@ func Query(
}
} else {
// Seek to record
if err := controllers.SeekToRecordOnTape(f, int32(record)); err != nil {
if err := mtio.SeekToRecordOnTape(f, int32(record)); err != nil {
return []*tar.Header{}, err
}
// Seek to block
br := bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*controllers.BlockSize)); err != nil {
br := bufio.NewReaderSize(f, mtio.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*mtio.BlockSize)); err != nil {
return []*tar.Header{}, err
}
record := int64(record)
block := int64(block)
curr := int64((recordSize * controllers.BlockSize * int(record)) + (int(block) * controllers.BlockSize))
counter := &counters.CounterReader{Reader: br, BytesRead: int(curr)}
curr := int64((recordSize * mtio.BlockSize * int(record)) + (int(block) * mtio.BlockSize))
counter := &ioext.CounterReader{Reader: br, BytesRead: int(curr)}
tr := tar.NewReader(counter)
for {
hdr, err := tr.Next()
if err != nil {
if err == io.EOF {
if err := controllers.GoToNextFileOnTape(f); err != nil {
if err := mtio.GoToNextFileOnTape(f); err != nil {
// EOD
break
}
record, err = controllers.GetCurrentRecordFromTape(f)
record, err = mtio.GetCurrentRecordFromTape(f)
if err != nil {
return []*tar.Header{}, err
}
block = 0
br = bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
curr := int64(int64(recordSize) * controllers.BlockSize * record)
counter := &counters.CounterReader{Reader: br, BytesRead: int(curr)}
br = bufio.NewReaderSize(f, mtio.BlockSize*recordSize)
curr := int64(int64(recordSize) * mtio.BlockSize * record)
counter := &ioext.CounterReader{Reader: br, BytesRead: int(curr)}
tr = tar.NewReader(counter)
continue
@@ -197,7 +198,7 @@ func Query(
}
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(record, -1, block, -1, hdr)); err != nil {
if err := formatting.PrintCSV(converters.TARHeaderToCSV(record, -1, block, -1, hdr)); err != nil {
return []*tar.Header{}, err
}
@@ -209,7 +210,7 @@ func Query(
currAndSize := int64(counter.BytesRead)
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(mtio.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))