refactor: Decompose logger completely
This commit is contained in:
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/pojntfx/stfs/internal/compression"
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/operations"
|
||||
@@ -102,6 +103,8 @@ var archiveCmd = &cobra.Command{
|
||||
viper.GetString(fromFlag),
|
||||
viper.GetBool(overwriteFlag),
|
||||
viper.GetString(compressionLevelFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -145,6 +148,8 @@ var archiveCmd = &cobra.Command{
|
||||
func(hdr *tar.Header, isRegular bool) error {
|
||||
return nil // We sign above, no need to verify
|
||||
},
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -66,6 +67,8 @@ var deleteCmd = &cobra.Command{
|
||||
|
||||
viper.GetInt(recordSizeFlag),
|
||||
viper.GetString(nameFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/pkg/inventory"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
@@ -25,6 +26,8 @@ var findCmd = &cobra.Command{
|
||||
},
|
||||
|
||||
viper.GetString(expressionFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/pkg/inventory"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
@@ -21,6 +22,8 @@ var listCmd = &cobra.Command{
|
||||
},
|
||||
|
||||
viper.GetString(nameFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -63,6 +64,8 @@ var moveCmd = &cobra.Command{
|
||||
viper.GetInt(recordSizeFlag),
|
||||
viper.GetString(fromFlag),
|
||||
viper.GetString(toFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/hardware"
|
||||
"github.com/pojntfx/stfs/pkg/recovery"
|
||||
@@ -72,7 +73,7 @@ var recoveryFetchCmd = &cobra.Command{
|
||||
viper.GetString(toFlag),
|
||||
viper.GetBool(previewFlag),
|
||||
|
||||
true,
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/pojntfx/stfs/internal/encryption"
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/internal/signature"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/recovery"
|
||||
@@ -74,6 +75,8 @@ var recoveryIndexCmd = &cobra.Command{
|
||||
func(hdr *tar.Header, isRegular bool) error {
|
||||
return signature.VerifyHeader(hdr, isRegular, viper.GetString(signatureFlag), recipient)
|
||||
},
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/hardware"
|
||||
"github.com/pojntfx/stfs/pkg/recovery"
|
||||
@@ -62,6 +63,8 @@ var recoveryQueryCmd = &cobra.Command{
|
||||
viper.GetInt(recordSizeFlag),
|
||||
viper.GetInt(recordFlag),
|
||||
viper.GetInt(blockFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -68,6 +69,8 @@ var restoreCmd = &cobra.Command{
|
||||
viper.GetString(fromFlag),
|
||||
viper.GetString(toFlag),
|
||||
viper.GetBool(flattenFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/pojntfx/stfs/internal/compression"
|
||||
"github.com/pojntfx/stfs/internal/keys"
|
||||
"github.com/pojntfx/stfs/internal/logging"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/operations"
|
||||
@@ -85,6 +86,8 @@ var updateCmd = &cobra.Command{
|
||||
viper.GetString(fromFlag),
|
||||
viper.GetBool(overwriteFlag),
|
||||
viper.GetString(compressionLevelFlag),
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -128,6 +131,8 @@ var updateCmd = &cobra.Command{
|
||||
func(hdr *tar.Header, isRegular bool) error {
|
||||
return nil // We sign above, no need to verify
|
||||
},
|
||||
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@ package converters
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
)
|
||||
@@ -68,9 +66,3 @@ func TarHeaderToDBHeader(record, lastKnownRecord, block, lastKnownBlock int64, t
|
||||
|
||||
return &hdr, nil
|
||||
}
|
||||
|
||||
func TARHeaderToCSV(record, lastKnownRecord, block, lastKnownBlock int64, hdr *tar.Header) []string {
|
||||
return []string{
|
||||
fmt.Sprintf("%v", record), fmt.Sprintf("%v", lastKnownRecord), fmt.Sprintf("%v", block), fmt.Sprintf("%v", lastKnownBlock), fmt.Sprintf("%v", hdr.Typeflag), hdr.Name, hdr.Linkname, fmt.Sprintf("%v", hdr.Size), fmt.Sprintf("%v", hdr.Mode), fmt.Sprintf("%v", hdr.Uid), fmt.Sprintf("%v", hdr.Gid), fmt.Sprintf("%v", hdr.Uname), fmt.Sprintf("%v", hdr.Gname), hdr.ModTime.Format(time.RFC3339), hdr.AccessTime.Format(time.RFC3339), hdr.ChangeTime.Format(time.RFC3339), fmt.Sprintf("%v", hdr.Devmajor), fmt.Sprintf("%v", hdr.Devminor), fmt.Sprintf("%v", hdr.PAXRecords), fmt.Sprintf("%v", hdr.Format),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
package formatting
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
TARHeaderCSV = []string{
|
||||
"record", "lastknownrecord", "block", "lastknownblock", "typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "paxrecords", "format",
|
||||
}
|
||||
)
|
||||
|
||||
func PrintCSV(input []string) error {
|
||||
w := csv.NewWriter(os.Stdout)
|
||||
|
||||
return w.WriteAll([][]string{input})
|
||||
}
|
||||
44
internal/logging/csv.go
Normal file
44
internal/logging/csv.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
tarHeaderCSV = []string{
|
||||
"record", "lastknownrecord", "block", "lastknownblock", "typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "paxrecords", "format",
|
||||
}
|
||||
)
|
||||
|
||||
func headerToCSV(hdr *models.Header) []string {
|
||||
return []string{
|
||||
fmt.Sprintf("%v", hdr.Record), fmt.Sprintf("%v", hdr.Lastknownrecord), fmt.Sprintf("%v", hdr.Block), fmt.Sprintf("%v", hdr.Lastknownblock), fmt.Sprintf("%v", hdr.Typeflag), hdr.Name, hdr.Linkname, fmt.Sprintf("%v", hdr.Size), fmt.Sprintf("%v", hdr.Mode), fmt.Sprintf("%v", hdr.UID), fmt.Sprintf("%v", hdr.Gid), fmt.Sprintf("%v", hdr.Uname), fmt.Sprintf("%v", hdr.Gname), hdr.Modtime.Format(time.RFC3339), hdr.Accesstime.Format(time.RFC3339), hdr.Changetime.Format(time.RFC3339), fmt.Sprintf("%v", hdr.Devmajor), fmt.Sprintf("%v", hdr.Devminor), fmt.Sprintf("%v", hdr.Paxrecords), fmt.Sprintf("%v", hdr.Format),
|
||||
}
|
||||
}
|
||||
|
||||
type Logger struct {
|
||||
n int
|
||||
}
|
||||
|
||||
func NewLogger() *Logger {
|
||||
return &Logger{}
|
||||
}
|
||||
|
||||
func (l *Logger) PrintHeader(hdr *models.Header) {
|
||||
w := csv.NewWriter(os.Stdout)
|
||||
|
||||
if l.n <= 0 {
|
||||
_ = w.Write(tarHeaderCSV) // Errors are ignored for compatibility with traditional logging APIs
|
||||
}
|
||||
|
||||
_ = w.Write(headerToCSV(hdr)) // Errors are ignored for compatibility with traditional logging APIs
|
||||
|
||||
w.Flush()
|
||||
|
||||
l.n++
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/pojntfx/stfs/internal/db/sqlite/migrations/metadata"
|
||||
@@ -137,6 +138,8 @@ func (p *MetadataPersister) GetHeaderDirectChildren(ctx context.Context, name st
|
||||
rootDepth = int(depth.Depth)
|
||||
}
|
||||
|
||||
log.Println(rootDepth)
|
||||
|
||||
if err := queries.Raw(
|
||||
fmt.Sprintf(
|
||||
`select %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v,
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"regexp"
|
||||
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
)
|
||||
|
||||
@@ -14,6 +14,8 @@ func Find(
|
||||
state MetadataConfig,
|
||||
|
||||
expression string,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) ([]*tar.Header, error) {
|
||||
metadataPersister := persisters.NewMetadataPersister(state.Metadata)
|
||||
if err := metadataPersister.Open(); err != nil {
|
||||
@@ -26,24 +28,15 @@ func Find(
|
||||
}
|
||||
|
||||
headers := []*tar.Header{}
|
||||
first := true
|
||||
for _, dbhdr := range dbHdrs {
|
||||
if regexp.MustCompile(expression).Match([]byte(dbhdr.Name)) {
|
||||
if first {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
|
||||
first = false
|
||||
}
|
||||
|
||||
hdr, err := converters.DBHeaderToTarHeader(dbhdr)
|
||||
if err != nil {
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
|
||||
return []*tar.Header{}, err
|
||||
if onHeader != nil {
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
headers = append(headers, hdr)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
)
|
||||
|
||||
@@ -13,6 +13,8 @@ func List(
|
||||
state MetadataConfig,
|
||||
|
||||
name string,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) ([]*tar.Header, error) {
|
||||
metadataPersister := persisters.NewMetadataPersister(state.Metadata)
|
||||
if err := metadataPersister.Open(); err != nil {
|
||||
@@ -25,20 +27,14 @@ func List(
|
||||
}
|
||||
|
||||
headers := []*tar.Header{}
|
||||
for i, dbhdr := range dbHdrs {
|
||||
if i == 0 {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, dbhdr := range dbHdrs {
|
||||
hdr, err := converters.DBHeaderToTarHeader(dbhdr)
|
||||
if err != nil {
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
|
||||
return []*tar.Header{}, err
|
||||
if onHeader != nil {
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
headers = append(headers, hdr)
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
|
||||
"github.com/pojntfx/stfs/internal/compression"
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/encryption"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/ioext"
|
||||
"github.com/pojntfx/stfs/internal/mtio"
|
||||
"github.com/pojntfx/stfs/internal/records"
|
||||
@@ -31,6 +31,8 @@ func Archive(
|
||||
from string,
|
||||
overwrite bool,
|
||||
compressionLevel string,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) ([]*tar.Header, error) {
|
||||
dirty := false
|
||||
tw, isRegular, cleanup, err := tape.OpenTapeWriteOnly(state.Drive, recordSize, overwrite)
|
||||
@@ -91,7 +93,6 @@ func Archive(
|
||||
defer cleanup(&dirty)
|
||||
|
||||
headers := []*tar.Header{}
|
||||
first := true
|
||||
return headers, filepath.Walk(from, func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -195,16 +196,13 @@ func Archive(
|
||||
}
|
||||
}
|
||||
|
||||
if first {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
first = false
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
|
||||
return err
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
hdrToAppend := *hdr
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/encryption"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
"github.com/pojntfx/stfs/internal/records"
|
||||
"github.com/pojntfx/stfs/internal/signature"
|
||||
@@ -23,6 +22,8 @@ func Delete(
|
||||
|
||||
recordSize int,
|
||||
name string,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) error {
|
||||
dirty := false
|
||||
tw, isRegular, cleanup, err := tape.OpenTapeWriteOnly(state.Drive, recordSize, false)
|
||||
@@ -58,10 +59,6 @@ func Delete(
|
||||
headersToDelete = append(headersToDelete, dbhdrs...)
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append deletion hdrs to the tape or tar file
|
||||
hdrs := []*tar.Header{}
|
||||
for _, dbhdr := range headersToDelete {
|
||||
@@ -88,8 +85,13 @@ func Delete(
|
||||
|
||||
dirty = true
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
|
||||
return err
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
hdrs = append(hdrs, hdr)
|
||||
@@ -122,5 +124,7 @@ func Delete(
|
||||
func(hdr *tar.Header, isRegular bool) error {
|
||||
return nil // We sign above, no need to verify
|
||||
},
|
||||
|
||||
onHeader,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/encryption"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
"github.com/pojntfx/stfs/internal/records"
|
||||
"github.com/pojntfx/stfs/internal/signature"
|
||||
@@ -25,6 +24,8 @@ func Move(
|
||||
recordSize int,
|
||||
from string,
|
||||
to string,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) error {
|
||||
dirty := false
|
||||
tw, isRegular, cleanup, err := tape.OpenTapeWriteOnly(state.Drive, recordSize, false)
|
||||
@@ -60,10 +61,6 @@ func Move(
|
||||
headersToMove = append(headersToMove, dbhdrs...)
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append move headers to the tape or tar file
|
||||
hdrs := []*tar.Header{}
|
||||
for _, dbhdr := range headersToMove {
|
||||
@@ -92,8 +89,13 @@ func Move(
|
||||
|
||||
dirty = true
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
|
||||
return err
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
hdrs = append(hdrs, hdr)
|
||||
@@ -126,5 +128,7 @@ func Move(
|
||||
func(hdr *tar.Header, isRegular bool) error {
|
||||
return nil // We sign above, no need to verify
|
||||
},
|
||||
|
||||
onHeader,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
"github.com/pojntfx/stfs/pkg/hardware"
|
||||
@@ -26,6 +24,8 @@ func Restore(
|
||||
from string,
|
||||
to string,
|
||||
flatten bool,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) error {
|
||||
metadataPersister := persisters.NewMetadataPersister(state.Metadata)
|
||||
if err := metadataPersister.Open(); err != nil {
|
||||
@@ -59,20 +59,9 @@ func Restore(
|
||||
headersToRestore = append(headersToRestore, dbhdrs...)
|
||||
}
|
||||
|
||||
for i, dbhdr := range headersToRestore {
|
||||
if i == 0 {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
hdr, err := converters.DBHeaderToTarHeader(dbhdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(dbhdr.Record, dbhdr.Lastknownrecord, dbhdr.Block, dbhdr.Lastknownblock, hdr)); err != nil {
|
||||
return err
|
||||
for _, dbhdr := range headersToRestore {
|
||||
if onHeader != nil {
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
dst := dbhdr.Name
|
||||
@@ -101,7 +90,7 @@ func Restore(
|
||||
dst,
|
||||
false,
|
||||
|
||||
false,
|
||||
nil,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
|
||||
"github.com/pojntfx/stfs/internal/compression"
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/encryption"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/ioext"
|
||||
"github.com/pojntfx/stfs/internal/mtio"
|
||||
"github.com/pojntfx/stfs/internal/records"
|
||||
@@ -31,6 +31,8 @@ func Update(
|
||||
from string,
|
||||
overwrite bool,
|
||||
compressionLevel string,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) ([]*tar.Header, error) {
|
||||
dirty := false
|
||||
tw, isRegular, cleanup, err := tape.OpenTapeWriteOnly(state.Drive, recordSize, false)
|
||||
@@ -40,7 +42,6 @@ func Update(
|
||||
defer cleanup(&dirty)
|
||||
|
||||
headers := []*tar.Header{}
|
||||
first := true
|
||||
return headers, filepath.Walk(from, func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -149,19 +150,16 @@ func Update(
|
||||
}
|
||||
}
|
||||
|
||||
if first {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
first = false
|
||||
}
|
||||
|
||||
if overwrite {
|
||||
hdr.PAXRecords[records.STFSRecordReplacesContent] = records.STFSRecordReplacesContentTrue
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
|
||||
return err
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
hdrToAppend := *hdr
|
||||
@@ -234,8 +232,13 @@ func Update(
|
||||
} else {
|
||||
hdr.Size = 0 // Don't try to seek after the record
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(-1, -1, -1, -1, hdr)); err != nil {
|
||||
return err
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
hdrToAppend := *hdr
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
|
||||
"github.com/pojntfx/stfs/internal/compression"
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/encryption"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/mtio"
|
||||
"github.com/pojntfx/stfs/internal/records"
|
||||
"github.com/pojntfx/stfs/internal/signature"
|
||||
@@ -30,7 +30,7 @@ func Fetch(
|
||||
to string,
|
||||
preview bool,
|
||||
|
||||
showHeader bool,
|
||||
onHeader func(hdr *models.Header),
|
||||
) error {
|
||||
f, isRegular, err := tape.OpenTapeReadOnly(state.Drive)
|
||||
if err != nil {
|
||||
@@ -74,14 +74,13 @@ func Fetch(
|
||||
return err
|
||||
}
|
||||
|
||||
if showHeader {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(int64(record), -1, int64(block), -1, hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(int64(record), -1, int64(block), -1, hdr)); err != nil {
|
||||
return err
|
||||
}
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
if !preview {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/ioext"
|
||||
"github.com/pojntfx/stfs/internal/mtio"
|
||||
"github.com/pojntfx/stfs/internal/persisters"
|
||||
@@ -41,6 +40,8 @@ func Index(
|
||||
hdr *tar.Header,
|
||||
isRegular bool,
|
||||
) error,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) error {
|
||||
if overwrite {
|
||||
f, err := os.OpenFile(state.Metadata, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
@@ -136,7 +137,7 @@ func Index(
|
||||
return err
|
||||
}
|
||||
|
||||
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption); err != nil {
|
||||
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption, onHeader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -220,7 +221,7 @@ func Index(
|
||||
return err
|
||||
}
|
||||
|
||||
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption); err != nil {
|
||||
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption, onHeader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -254,13 +255,8 @@ func indexHeader(
|
||||
metadataPersister *persisters.MetadataPersister,
|
||||
compressionFormat string,
|
||||
encryptionFormat string,
|
||||
onHeader func(hdr *models.Header),
|
||||
) error {
|
||||
if record == 0 && block == 0 {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
uncompressedSize, ok := hdr.PAXRecords[records.STFSRecordUncompressedSize]
|
||||
if ok {
|
||||
size, err := strconv.Atoi(uncompressedSize)
|
||||
@@ -279,8 +275,13 @@ func indexHeader(
|
||||
hdr.Name = newName
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(record, -1, block, -1, hdr)); err != nil {
|
||||
return err
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(record, -1, block, -1, hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
stfsVersion, ok := hdr.PAXRecords[records.STFSRecordVersion]
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"math"
|
||||
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata"
|
||||
"github.com/pojntfx/stfs/internal/encryption"
|
||||
"github.com/pojntfx/stfs/internal/formatting"
|
||||
"github.com/pojntfx/stfs/internal/ioext"
|
||||
"github.com/pojntfx/stfs/internal/mtio"
|
||||
"github.com/pojntfx/stfs/internal/signature"
|
||||
@@ -26,6 +26,8 @@ func Query(
|
||||
recordSize int,
|
||||
record int,
|
||||
block int,
|
||||
|
||||
onHeader func(hdr *models.Header),
|
||||
) ([]*tar.Header, error) {
|
||||
f, isRegular, err := tape.OpenTapeReadOnly(state.Drive)
|
||||
if err != nil {
|
||||
@@ -103,14 +105,13 @@ func Query(
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
|
||||
if record == 0 && block == 0 {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(record, -1, block, -1, hdr)
|
||||
if err != nil {
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(record, -1, block, -1, hdr)); err != nil {
|
||||
return []*tar.Header{}, err
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
headers = append(headers, hdr)
|
||||
@@ -192,14 +193,13 @@ func Query(
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
|
||||
if record == 0 && block == 0 {
|
||||
if err := formatting.PrintCSV(formatting.TARHeaderCSV); err != nil {
|
||||
if onHeader != nil {
|
||||
dbhdr, err := converters.TarHeaderToDBHeader(record, -1, block, -1, hdr)
|
||||
if err != nil {
|
||||
return []*tar.Header{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := formatting.PrintCSV(converters.TARHeaderToCSV(record, -1, block, -1, hdr)); err != nil {
|
||||
return []*tar.Header{}, err
|
||||
onHeader(dbhdr)
|
||||
}
|
||||
|
||||
headers = append(headers, hdr)
|
||||
|
||||
Reference in New Issue
Block a user