refactor: Decompose index func

This commit is contained in:
Felicitas Pojtinger
2021-12-06 21:27:59 +01:00
parent 4f5d298c8e
commit 25fe4ddb04
14 changed files with 579 additions and 479 deletions

View File

@@ -33,6 +33,8 @@ import (
"github.com/pojntfx/stfs/internal/noop" "github.com/pojntfx/stfs/internal/noop"
"github.com/pojntfx/stfs/internal/pax" "github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/persisters" "github.com/pojntfx/stfs/internal/persisters"
"github.com/pojntfx/stfs/pkg/config"
"github.com/pojntfx/stfs/pkg/recovery"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil" "github.com/volatiletech/sqlboiler/v4/boil"
@@ -149,15 +151,28 @@ var archiveCmd = &cobra.Command{
return err return err
} }
return index( return recovery.Index(
viper.GetString(driveFlag), config.StateConfig{
viper.GetString(metadataFlag), Drive: viper.GetString(driveFlag),
Metadata: viper.GetString(metadataFlag),
},
config.PipeConfig{
Compression: viper.GetString(compressionFlag),
Encryption: viper.GetString(encryptionFlag),
Signature: viper.GetString(signatureFlag),
},
config.CryptoConfig{
Recipient: recipient,
Identity: identity,
Password: viper.GetString(passwordFlag),
},
viper.GetInt(recordSizeFlag), viper.GetInt(recordSizeFlag),
int(lastIndexedRecord), int(lastIndexedRecord),
int(lastIndexedBlock), int(lastIndexedBlock),
viper.GetBool(overwriteFlag), viper.GetBool(overwriteFlag),
viper.GetString(compressionFlag),
viper.GetString(encryptionFlag), 0,
func(hdr *tar.Header, i int) error { func(hdr *tar.Header, i int) error {
if len(hdrs) <= i { if len(hdrs) <= i {
return errMissingTarHeader return errMissingTarHeader
@@ -167,7 +182,6 @@ var archiveCmd = &cobra.Command{
return nil return nil
}, },
0,
func(hdr *tar.Header, isRegular bool) error { func(hdr *tar.Header, isRegular bool) error {
return nil // We sign above, no need to verify return nil // We sign above, no need to verify
}, },

View File

@@ -26,6 +26,7 @@ import (
"github.com/pojntfx/stfs/internal/controllers" "github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/formatting" "github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax" "github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/tape"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil" "github.com/volatiletech/sqlboiler/v4/boil"
@@ -109,7 +110,7 @@ var recoveryFetchCmd = &cobra.Command{
} }
func restoreFromRecordAndBlock( func restoreFromRecordAndBlock(
tape string, drive string,
recordSize int, recordSize int,
record int, record int,
block int, block int,
@@ -122,7 +123,7 @@ func restoreFromRecordAndBlock(
signatureFormat string, signatureFormat string,
recipient interface{}, recipient interface{},
) error { ) error {
f, isRegular, err := openTapeReadOnly(tape) f, isRegular, err := tape.OpenTapeReadOnly(drive)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -2,22 +2,9 @@ package cmd
import ( import (
"archive/tar" "archive/tar"
"bufio"
"context"
"io"
"io/ioutil"
"math"
"os"
"strconv"
"strings"
"github.com/pojntfx/stfs/internal/controllers" "github.com/pojntfx/stfs/pkg/config"
"github.com/pojntfx/stfs/internal/converters" "github.com/pojntfx/stfs/pkg/recovery"
"github.com/pojntfx/stfs/internal/counters"
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/persisters"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil" "github.com/volatiletech/sqlboiler/v4/boil"
@@ -66,19 +53,31 @@ var recoveryIndexCmd = &cobra.Command{
return err return err
} }
return index( return recovery.Index(
viper.GetString(driveFlag), config.StateConfig{
viper.GetString(metadataFlag), Drive: viper.GetString(driveFlag),
Metadata: viper.GetString(metadataFlag),
},
config.PipeConfig{
Compression: viper.GetString(compressionFlag),
Encryption: viper.GetString(encryptionFlag),
Signature: viper.GetString(signatureFlag),
},
config.CryptoConfig{
Recipient: recipient,
Identity: identity,
Password: viper.GetString(passwordFlag),
},
viper.GetInt(recordSizeFlag), viper.GetInt(recordSizeFlag),
viper.GetInt(recordFlag), viper.GetInt(recordFlag),
viper.GetInt(blockFlag), viper.GetInt(blockFlag),
viper.GetBool(overwriteFlag), viper.GetBool(overwriteFlag),
viper.GetString(compressionFlag),
viper.GetString(encryptionFlag), 0,
func(hdr *tar.Header, i int) error { func(hdr *tar.Header, i int) error {
return decryptHeader(hdr, viper.GetString(encryptionFlag), identity) return decryptHeader(hdr, viper.GetString(encryptionFlag), identity)
}, },
0,
func(hdr *tar.Header, isRegular bool) error { func(hdr *tar.Header, isRegular bool) error {
return verifyHeader(hdr, isRegular, viper.GetString(signatureFlag), recipient) return verifyHeader(hdr, isRegular, viper.GetString(signatureFlag), recipient)
}, },
@@ -86,408 +85,6 @@ var recoveryIndexCmd = &cobra.Command{
}, },
} }
func index(
tape string,
metadata string,
recordSize int,
record int,
block int,
overwrite bool,
compressionFormat string,
encryptionFormat string,
decryptHeader func(
hdr *tar.Header,
i int,
) error,
offset int,
verifyHeader func(
hdr *tar.Header,
isRegular bool,
) error,
) error {
if overwrite {
f, err := os.OpenFile(metadata, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
if err := f.Truncate(0); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
}
metadataPersister := persisters.NewMetadataPersister(metadata)
if err := metadataPersister.Open(); err != nil {
return err
}
f, isRegular, err := openTapeReadOnly(tape)
if err != nil {
return err
}
defer f.Close()
if isRegular {
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*record)+block*controllers.BlockSize), 0); err != nil {
return err
}
tr := tar.NewReader(f)
record := int64(record)
block := int64(block)
i := 0
for {
hdr, err := tr.Next()
if err != nil {
for {
curr, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
nextTotalBlocks := math.Ceil(float64((curr)) / float64(controllers.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
if block < 0 {
record--
block = int64(recordSize) - 1
} else if block >= int64(recordSize) {
record++
block = 0
}
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*int(record))+int(block)*controllers.BlockSize), io.SeekStart); err != nil {
return err
}
tr = tar.NewReader(f)
hdr, err = tr.Next()
if err != nil {
if err == io.EOF {
// EOF
break
}
continue
}
break
}
}
if hdr == nil {
// EOF
break
}
if i >= offset {
if err := decryptHeader(hdr, i-offset); err != nil {
return err
}
if err := verifyHeader(hdr, isRegular); err != nil {
return err
}
if err := indexHeader(record, block, hdr, metadataPersister, compressionFormat, encryptionFormat); err != nil {
return nil
}
}
curr, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if _, err := io.Copy(ioutil.Discard, tr); err != nil {
return err
}
currAndSize, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
if block > int64(recordSize) {
record++
block = 0
}
i++
}
} else {
// Seek to record
if err := controllers.SeekToRecordOnTape(f, int32(record)); err != nil {
return err
}
// Seek to block
br := bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*controllers.BlockSize)); err != nil {
return err
}
record := int64(record)
block := int64(block)
curr := int64((recordSize * controllers.BlockSize * int(record)) + (int(block) * controllers.BlockSize))
counter := &counters.CounterReader{Reader: br, BytesRead: int(curr)}
i := 0
tr := tar.NewReader(counter)
for {
hdr, err := tr.Next()
if err != nil {
if err == io.EOF {
if err := controllers.GoToNextFileOnTape(f); err != nil {
// EOD
break
}
record, err = controllers.GetCurrentRecordFromTape(f)
if err != nil {
return err
}
block = 0
br = bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
curr = int64(int64(recordSize) * controllers.BlockSize * record)
counter = &counters.CounterReader{Reader: br, BytesRead: int(curr)}
tr = tar.NewReader(counter)
continue
} else {
return err
}
}
if i >= offset {
if err := decryptHeader(hdr, i-offset); err != nil {
return err
}
if err := verifyHeader(hdr, isRegular); err != nil {
return err
}
if err := indexHeader(record, block, hdr, metadataPersister, compressionFormat, encryptionFormat); err != nil {
return nil
}
}
curr = int64(counter.BytesRead)
if _, err := io.Copy(ioutil.Discard, tr); err != nil {
return err
}
currAndSize := int64(counter.BytesRead)
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
if block > int64(recordSize) {
record++
block = 0
}
i++
}
}
return nil
}
func indexHeader(
record, block int64,
hdr *tar.Header,
metadataPersister *persisters.MetadataPersister,
compressionFormat string,
encryptionFormat string,
) error {
if record == 0 && block == 0 {
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
return err
}
}
uncompressedSize, ok := hdr.PAXRecords[pax.STFSRecordUncompressedSize]
if ok {
size, err := strconv.Atoi(uncompressedSize)
if err != nil {
return err
}
hdr.Size = int64(size)
}
if hdr.FileInfo().Mode().IsRegular() {
newName, err := removeSuffix(hdr.Name, compressionFormat, encryptionFormat)
if err != nil {
return err
}
hdr.Name = newName
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(record, block, hdr)); err != nil {
return err
}
stfsVersion, ok := hdr.PAXRecords[pax.STFSRecordVersion]
if !ok {
stfsVersion = pax.STFSRecordVersion1
}
switch stfsVersion {
case pax.STFSRecordVersion1:
stfsAction, ok := hdr.PAXRecords[pax.STFSRecordAction]
if !ok {
stfsAction = pax.STFSRecordActionCreate
}
switch stfsAction {
case pax.STFSRecordActionCreate:
dbhdr, err := converters.TarHeaderToDBHeader(record, block, hdr)
if err != nil {
return err
}
if err := metadataPersister.UpsertHeader(context.Background(), dbhdr); err != nil {
return err
}
case pax.STFSRecordActionDelete:
if _, err := metadataPersister.DeleteHeader(context.Background(), hdr.Name, true); err != nil {
return err
}
case pax.STFSRecordActionUpdate:
moveAfterEdits := false
oldName := hdr.Name
if _, ok := hdr.PAXRecords[pax.STFSRecordReplacesName]; ok {
moveAfterEdits = true
oldName = hdr.PAXRecords[pax.STFSRecordReplacesName]
}
var newHdr *models.Header
if replacesContent, ok := hdr.PAXRecords[pax.STFSRecordReplacesContent]; ok && replacesContent == pax.STFSRecordReplacesContentTrue {
// Content & metadata update; use the new record & block
h, err := converters.TarHeaderToDBHeader(record, block, hdr)
if err != nil {
return err
}
newHdr = h
} else {
// Metadata-only update; use the old record & block
oldHdr, err := metadataPersister.GetHeader(context.Background(), oldName)
if err != nil {
return err
}
h, err := converters.TarHeaderToDBHeader(oldHdr.Record, oldHdr.Block, hdr)
if err != nil {
return err
}
newHdr = h
}
if err := metadataPersister.UpdateHeaderMetadata(context.Background(), newHdr); err != nil {
return err
}
if moveAfterEdits {
// Move header
if err := metadataPersister.MoveHeader(context.Background(), oldName, hdr.Name); err != nil {
return err
}
}
default:
return pax.ErrUnsupportedAction
}
default:
return pax.ErrUnsupportedVersion
}
return nil
}
func removeSuffix(name string, compressionFormat string, encryptionFormat string) (string, error) {
switch encryptionFormat {
case encryptionFormatAgeKey:
name = strings.TrimSuffix(name, encryptionFormatAgeSuffix)
case encryptionFormatPGPKey:
name = strings.TrimSuffix(name, encryptionFormatPGPSuffix)
case noneKey:
default:
return "", errUnsupportedEncryptionFormat
}
switch compressionFormat {
case compressionFormatGZipKey:
fallthrough
case compressionFormatParallelGZipKey:
name = strings.TrimSuffix(name, compressionFormatGZipSuffix)
case compressionFormatLZ4Key:
name = strings.TrimSuffix(name, compressionFormatLZ4Suffix)
case compressionFormatZStandardKey:
name = strings.TrimSuffix(name, compressionFormatZStandardSuffix)
case compressionFormatBrotliKey:
name = strings.TrimSuffix(name, compressionFormatBrotliSuffix)
case compressionFormatBzip2Key:
fallthrough
case compressionFormatBzip2ParallelKey:
name = strings.TrimSuffix(name, compressionFormatBzip2Suffix)
case noneKey:
default:
return "", errUnsupportedCompressionFormat
}
return name, nil
}
func openTapeReadOnly(tape string) (f *os.File, isRegular bool, err error) {
fileDescription, err := os.Stat(tape)
if err != nil {
return nil, false, err
}
isRegular = fileDescription.Mode().IsRegular()
if isRegular {
f, err = os.Open(tape)
if err != nil {
return f, isRegular, err
}
return f, isRegular, nil
}
f, err = os.OpenFile(tape, os.O_RDONLY, os.ModeCharDevice)
if err != nil {
return f, isRegular, err
}
return f, isRegular, nil
}
func init() { func init() {
recoveryIndexCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") recoveryIndexCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record")
recoveryIndexCmd.PersistentFlags().IntP(recordFlag, "k", 0, "Record to seek too before counting") recoveryIndexCmd.PersistentFlags().IntP(recordFlag, "k", 0, "Record to seek too before counting")

View File

@@ -10,6 +10,7 @@ import (
"github.com/pojntfx/stfs/internal/controllers" "github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/counters" "github.com/pojntfx/stfs/internal/counters"
"github.com/pojntfx/stfs/internal/formatting" "github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/tape"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil" "github.com/volatiletech/sqlboiler/v4/boil"
@@ -72,7 +73,7 @@ var recoveryQueryCmd = &cobra.Command{
} }
func query( func query(
tape string, drive string,
record int, record int,
block int, block int,
recordSize int, recordSize int,
@@ -81,7 +82,7 @@ func query(
signatureFormat string, signatureFormat string,
recipient interface{}, recipient interface{},
) error { ) error {
f, isRegular, err := openTapeReadOnly(tape) f, isRegular, err := tape.OpenTapeReadOnly(drive)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -16,6 +16,8 @@ import (
"github.com/pojntfx/stfs/internal/formatting" "github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax" "github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/persisters" "github.com/pojntfx/stfs/internal/persisters"
"github.com/pojntfx/stfs/pkg/config"
"github.com/pojntfx/stfs/pkg/recovery"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil" "github.com/volatiletech/sqlboiler/v4/boil"
@@ -95,15 +97,28 @@ var updateCmd = &cobra.Command{
return err return err
} }
return index( return recovery.Index(
viper.GetString(driveFlag), config.StateConfig{
viper.GetString(metadataFlag), Drive: viper.GetString(driveFlag),
Metadata: viper.GetString(metadataFlag),
},
config.PipeConfig{
Compression: viper.GetString(compressionFlag),
Encryption: viper.GetString(encryptionFlag),
Signature: viper.GetString(signatureFlag),
},
config.CryptoConfig{
Recipient: recipient,
Identity: identity,
Password: viper.GetString(passwordFlag),
},
viper.GetInt(recordSizeFlag), viper.GetInt(recordSizeFlag),
int(lastIndexedRecord), int(lastIndexedRecord),
int(lastIndexedBlock), int(lastIndexedBlock),
false, false,
viper.GetString(compressionFlag),
viper.GetString(encryptionFlag), 1,
func(hdr *tar.Header, i int) error { func(hdr *tar.Header, i int) error {
if len(hdrs) <= i { if len(hdrs) <= i {
return errMissingTarHeader return errMissingTarHeader
@@ -113,7 +128,6 @@ var updateCmd = &cobra.Command{
return nil return nil
}, },
1,
func(hdr *tar.Header, isRegular bool) error { func(hdr *tar.Header, isRegular bool) error {
return nil // We sign above, no need to verify return nil // We sign above, no need to verify
}, },

12
internal/suffix/config.go Normal file
View File

@@ -0,0 +1,12 @@
package suffix
const (
CompressionFormatGZipSuffix = ".gz"
CompressionFormatLZ4Suffix = ".lz4"
CompressionFormatZStandardSuffix = ".zst"
CompressionFormatBrotliSuffix = ".br"
CompressionFormatBzip2Suffix = ".bz2"
EncryptionFormatAgeSuffix = ".age"
EncryptionFormatPGPSuffix = ".pgp"
)

41
internal/suffix/remove.go Normal file
View File

@@ -0,0 +1,41 @@
package suffix
import (
"strings"
"github.com/pojntfx/stfs/pkg/config"
)
func RemoveSuffix(name string, compressionFormat string, encryptionFormat string) (string, error) {
switch encryptionFormat {
case config.EncryptionFormatAgeKey:
name = strings.TrimSuffix(name, EncryptionFormatAgeSuffix)
case config.EncryptionFormatPGPKey:
name = strings.TrimSuffix(name, EncryptionFormatPGPSuffix)
case config.NoneKey:
default:
return "", config.ErrUnsupportedEncryptionFormat
}
switch compressionFormat {
case config.CompressionFormatGZipKey:
fallthrough
case config.CompressionFormatParallelGZipKey:
name = strings.TrimSuffix(name, CompressionFormatGZipSuffix)
case config.CompressionFormatLZ4Key:
name = strings.TrimSuffix(name, CompressionFormatLZ4Suffix)
case config.CompressionFormatZStandardKey:
name = strings.TrimSuffix(name, CompressionFormatZStandardSuffix)
case config.CompressionFormatBrotliKey:
name = strings.TrimSuffix(name, CompressionFormatBrotliSuffix)
case config.CompressionFormatBzip2Key:
fallthrough
case config.CompressionFormatBzip2ParallelKey:
name = strings.TrimSuffix(name, CompressionFormatBzip2Suffix)
case config.NoneKey:
default:
return "", config.ErrUnsupportedCompressionFormat
}
return name, nil
}

27
internal/tape/read.go Normal file
View File

@@ -0,0 +1,27 @@
package tape
import "os"
func OpenTapeReadOnly(tape string) (f *os.File, isRegular bool, err error) {
fileDescription, err := os.Stat(tape)
if err != nil {
return nil, false, err
}
isRegular = fileDescription.Mode().IsRegular()
if isRegular {
f, err = os.Open(tape)
if err != nil {
return f, isRegular, err
}
return f, isRegular, nil
}
f, err = os.OpenFile(tape, os.O_RDONLY, os.ModeCharDevice)
if err != nil {
return f, isRegular, err
}
return f, isRegular, nil
}

19
pkg/config/constants.go Normal file
View File

@@ -0,0 +1,19 @@
package config
const (
NoneKey = "none"
CompressionFormatGZipKey = "gzip"
CompressionFormatParallelGZipKey = "parallelgzip"
CompressionFormatLZ4Key = "lz4"
CompressionFormatZStandardKey = "zstandard"
CompressionFormatBrotliKey = "brotli"
CompressionFormatBzip2Key = "bzip2"
CompressionFormatBzip2ParallelKey = "parallelbzip2"
EncryptionFormatAgeKey = "age"
EncryptionFormatPGPKey = "pgp"
SignatureFormatMinisignKey = "minisign"
SignatureFormatPGPKey = "pgp"
)

8
pkg/config/error.go Normal file
View File

@@ -0,0 +1,8 @@
package config
import "errors"
var (
ErrUnsupportedEncryptionFormat = errors.New("unsupported encryption format")
ErrUnsupportedCompressionFormat = errors.New("unsupported compression format")
)

19
pkg/recovery/fetch.go Normal file
View File

@@ -0,0 +1,19 @@
package recovery
import (
"github.com/pojntfx/stfs/pkg/config"
)
func Fetch(
state config.StateConfig,
pipes config.PipeConfig,
crypto config.CryptoConfig,
recordSize int,
record int,
block int,
to string,
preview string,
) error {
return nil
}

368
pkg/recovery/index.go Normal file
View File

@@ -0,0 +1,368 @@
package recovery
import (
"archive/tar"
"bufio"
"context"
"io"
"io/ioutil"
"math"
"os"
"strconv"
"github.com/pojntfx/stfs/internal/controllers"
"github.com/pojntfx/stfs/internal/converters"
"github.com/pojntfx/stfs/internal/counters"
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
"github.com/pojntfx/stfs/internal/formatting"
"github.com/pojntfx/stfs/internal/pax"
"github.com/pojntfx/stfs/internal/persisters"
"github.com/pojntfx/stfs/internal/suffix"
"github.com/pojntfx/stfs/internal/tape"
"github.com/pojntfx/stfs/pkg/config"
)
func Index(
state config.StateConfig,
pipes config.PipeConfig,
crypto config.CryptoConfig,
recordSize int,
record int,
block int,
overwrite bool,
offset int,
decryptHeader func(
hdr *tar.Header,
i int,
) error,
verifyHeader func(
hdr *tar.Header,
isRegular bool,
) error,
) error {
if overwrite {
f, err := os.OpenFile(state.Metadata, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
if err := f.Truncate(0); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
}
metadataPersister := persisters.NewMetadataPersister(state.Metadata)
if err := metadataPersister.Open(); err != nil {
return err
}
f, isRegular, err := tape.OpenTapeReadOnly(state.Drive)
if err != nil {
return err
}
defer f.Close()
if isRegular {
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*record)+block*controllers.BlockSize), 0); err != nil {
return err
}
tr := tar.NewReader(f)
record := int64(record)
block := int64(block)
i := 0
for {
hdr, err := tr.Next()
if err != nil {
for {
curr, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
nextTotalBlocks := math.Ceil(float64((curr)) / float64(controllers.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
if block < 0 {
record--
block = int64(recordSize) - 1
} else if block >= int64(recordSize) {
record++
block = 0
}
// Seek to record and block
if _, err := f.Seek(int64((recordSize*controllers.BlockSize*int(record))+int(block)*controllers.BlockSize), io.SeekStart); err != nil {
return err
}
tr = tar.NewReader(f)
hdr, err = tr.Next()
if err != nil {
if err == io.EOF {
// EOF
break
}
continue
}
break
}
}
if hdr == nil {
// EOF
break
}
if i >= offset {
if err := decryptHeader(hdr, i-offset); err != nil {
return err
}
if err := verifyHeader(hdr, isRegular); err != nil {
return err
}
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption); err != nil {
return nil
}
}
curr, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if _, err := io.Copy(ioutil.Discard, tr); err != nil {
return err
}
currAndSize, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
if block > int64(recordSize) {
record++
block = 0
}
i++
}
} else {
// Seek to record
if err := controllers.SeekToRecordOnTape(f, int32(record)); err != nil {
return err
}
// Seek to block
br := bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
if _, err := br.Read(make([]byte, block*controllers.BlockSize)); err != nil {
return err
}
record := int64(record)
block := int64(block)
curr := int64((recordSize * controllers.BlockSize * int(record)) + (int(block) * controllers.BlockSize))
counter := &counters.CounterReader{Reader: br, BytesRead: int(curr)}
i := 0
tr := tar.NewReader(counter)
for {
hdr, err := tr.Next()
if err != nil {
if err == io.EOF {
if err := controllers.GoToNextFileOnTape(f); err != nil {
// EOD
break
}
record, err = controllers.GetCurrentRecordFromTape(f)
if err != nil {
return err
}
block = 0
br = bufio.NewReaderSize(f, controllers.BlockSize*recordSize)
curr = int64(int64(recordSize) * controllers.BlockSize * record)
counter = &counters.CounterReader{Reader: br, BytesRead: int(curr)}
tr = tar.NewReader(counter)
continue
} else {
return err
}
}
if i >= offset {
if err := decryptHeader(hdr, i-offset); err != nil {
return err
}
if err := verifyHeader(hdr, isRegular); err != nil {
return err
}
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption); err != nil {
return nil
}
}
curr = int64(counter.BytesRead)
if _, err := io.Copy(ioutil.Discard, tr); err != nil {
return err
}
currAndSize := int64(counter.BytesRead)
nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(controllers.BlockSize))
record = int64(nextTotalBlocks) / int64(recordSize)
block = int64(nextTotalBlocks) - (record * int64(recordSize))
if block > int64(recordSize) {
record++
block = 0
}
i++
}
}
return nil
}
func indexHeader(
record, block int64,
hdr *tar.Header,
metadataPersister *persisters.MetadataPersister,
compressionFormat string,
encryptionFormat string,
) error {
if record == 0 && block == 0 {
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
return err
}
}
uncompressedSize, ok := hdr.PAXRecords[pax.STFSRecordUncompressedSize]
if ok {
size, err := strconv.Atoi(uncompressedSize)
if err != nil {
return err
}
hdr.Size = int64(size)
}
if hdr.FileInfo().Mode().IsRegular() {
newName, err := suffix.RemoveSuffix(hdr.Name, compressionFormat, encryptionFormat)
if err != nil {
return err
}
hdr.Name = newName
}
if err := formatting.PrintCSV(formatting.GetTARHeaderAsCSV(record, block, hdr)); err != nil {
return err
}
stfsVersion, ok := hdr.PAXRecords[pax.STFSRecordVersion]
if !ok {
stfsVersion = pax.STFSRecordVersion1
}
switch stfsVersion {
case pax.STFSRecordVersion1:
stfsAction, ok := hdr.PAXRecords[pax.STFSRecordAction]
if !ok {
stfsAction = pax.STFSRecordActionCreate
}
switch stfsAction {
case pax.STFSRecordActionCreate:
dbhdr, err := converters.TarHeaderToDBHeader(record, block, hdr)
if err != nil {
return err
}
if err := metadataPersister.UpsertHeader(context.Background(), dbhdr); err != nil {
return err
}
case pax.STFSRecordActionDelete:
if _, err := metadataPersister.DeleteHeader(context.Background(), hdr.Name, true); err != nil {
return err
}
case pax.STFSRecordActionUpdate:
moveAfterEdits := false
oldName := hdr.Name
if _, ok := hdr.PAXRecords[pax.STFSRecordReplacesName]; ok {
moveAfterEdits = true
oldName = hdr.PAXRecords[pax.STFSRecordReplacesName]
}
var newHdr *models.Header
if replacesContent, ok := hdr.PAXRecords[pax.STFSRecordReplacesContent]; ok && replacesContent == pax.STFSRecordReplacesContentTrue {
// Content & metadata update; use the new record & block
h, err := converters.TarHeaderToDBHeader(record, block, hdr)
if err != nil {
return err
}
newHdr = h
} else {
// Metadata-only update; use the old record & block
oldHdr, err := metadataPersister.GetHeader(context.Background(), oldName)
if err != nil {
return err
}
h, err := converters.TarHeaderToDBHeader(oldHdr.Record, oldHdr.Block, hdr)
if err != nil {
return err
}
newHdr = h
}
if err := metadataPersister.UpdateHeaderMetadata(context.Background(), newHdr); err != nil {
return err
}
if moveAfterEdits {
// Move header
if err := metadataPersister.MoveHeader(context.Background(), oldName, hdr.Name); err != nil {
return err
}
}
default:
return pax.ErrUnsupportedAction
}
default:
return pax.ErrUnsupportedVersion
}
return nil
}

19
pkg/recovery/query.go Normal file
View File

@@ -0,0 +1,19 @@
package recovery
import (
"archive/tar"
"github.com/pojntfx/stfs/pkg/config"
)
func Query(
state config.StateConfig,
pipes config.PipeConfig,
crypto config.CryptoConfig,
recordSize int,
record int,
block int,
) ([]*tar.Header, error) {
return nil, nil
}

View File

@@ -1,40 +0,0 @@
package recovery
import (
"archive/tar"
"github.com/pojntfx/stfs/pkg/config"
)
func Fetch(
state config.StateConfig,
pipes config.PipeConfig,
crypto config.CryptoConfig,
recordSize int,
record int,
block int,
to string,
preview string,
) error
func Index(
state config.StateConfig,
pipes config.PipeConfig,
crypto config.CryptoConfig,
recordSize int,
record int,
block int,
overwrite bool,
) error
func Query(
state config.StateConfig,
pipes config.PipeConfig,
crypto config.CryptoConfig,
recordSize int,
record int,
block int,
) ([]*tar.Header, error)