fix: Use offset to prevent re-indexing encrypted headers
This commit is contained in:
@@ -107,7 +107,7 @@ var archiveCmd = &cobra.Command{
|
||||
logging.NewLogger().PrintHeader,
|
||||
)
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
return recovery.Index(
|
||||
@@ -130,18 +130,14 @@ var archiveCmd = &cobra.Command{
|
||||
int(lastIndexedRecord),
|
||||
int(lastIndexedBlock),
|
||||
viper.GetBool(overwriteFlag),
|
||||
1, // Ignore the first header, which is the last header which we already indexed
|
||||
|
||||
func(hdr *tar.Header, i int) error {
|
||||
// Ignore the first header, which is the last header which we already indexed
|
||||
if i == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(hdrs) <= i-1 {
|
||||
if len(hdrs) <= i {
|
||||
return config.ErrTarHeaderMissing
|
||||
}
|
||||
|
||||
*hdr = *hdrs[i-1]
|
||||
*hdr = *hdrs[i]
|
||||
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -68,6 +68,7 @@ var recoveryIndexCmd = &cobra.Command{
|
||||
viper.GetInt(recordFlag),
|
||||
viper.GetInt(blockFlag),
|
||||
viper.GetBool(overwriteFlag),
|
||||
0,
|
||||
|
||||
func(hdr *tar.Header, i int) error {
|
||||
return encryption.DecryptHeader(hdr, viper.GetString(encryptionFlag), identity)
|
||||
|
||||
@@ -113,18 +113,14 @@ var updateCmd = &cobra.Command{
|
||||
int(lastIndexedRecord),
|
||||
int(lastIndexedBlock),
|
||||
false,
|
||||
1, // Ignore the first header, which is the last header which we already indexed
|
||||
|
||||
func(hdr *tar.Header, i int) error {
|
||||
// Ignore the first header, which is the last header which we already indexed
|
||||
if i == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(hdrs) <= i-1 {
|
||||
if len(hdrs) <= i {
|
||||
return config.ErrTarHeaderMissing
|
||||
}
|
||||
|
||||
*hdr = *hdrs[i-1]
|
||||
*hdr = *hdrs[i]
|
||||
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/pojntfx/stfs/internal/db/sqlite/migrations/metadata"
|
||||
@@ -138,8 +137,6 @@ func (p *MetadataPersister) GetHeaderDirectChildren(ctx context.Context, name st
|
||||
rootDepth = int(depth.Depth)
|
||||
}
|
||||
|
||||
log.Println(rootDepth)
|
||||
|
||||
if err := queries.Raw(
|
||||
fmt.Sprintf(
|
||||
`select %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v, %v,
|
||||
|
||||
@@ -2,11 +2,13 @@ package operations
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pojntfx/stfs/internal/compression"
|
||||
"github.com/pojntfx/stfs/internal/converters"
|
||||
@@ -22,6 +24,10 @@ import (
|
||||
"github.com/pojntfx/stfs/pkg/config"
|
||||
)
|
||||
|
||||
var (
|
||||
errSocketsNotSupported = errors.New("archive/tar: sockets not supported")
|
||||
)
|
||||
|
||||
func Archive(
|
||||
state config.StateConfig,
|
||||
pipes config.PipeConfig,
|
||||
@@ -107,6 +113,11 @@ func Archive(
|
||||
|
||||
hdr, err := tar.FileInfoHeader(info, link)
|
||||
if err != nil {
|
||||
// Skip sockets
|
||||
if strings.Contains(err.Error(), errSocketsNotSupported.Error()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -220,6 +231,8 @@ func Archive(
|
||||
return err
|
||||
}
|
||||
|
||||
dirty = true
|
||||
|
||||
if !info.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
@@ -273,8 +286,6 @@ func Archive(
|
||||
return err
|
||||
}
|
||||
|
||||
dirty = true
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -106,18 +106,14 @@ func Delete(
|
||||
int(lastIndexedRecord),
|
||||
int(lastIndexedBlock),
|
||||
false,
|
||||
1, // Ignore the first header, which is the last header which we already indexed
|
||||
|
||||
func(hdr *tar.Header, i int) error {
|
||||
// Ignore the first header, which is the last header which we already indexed
|
||||
if i == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(hdrs) <= i-1 {
|
||||
if len(hdrs) <= i {
|
||||
return config.ErrTarHeaderMissing
|
||||
}
|
||||
|
||||
*hdr = *hdrs[i-1]
|
||||
*hdr = *hdrs[i]
|
||||
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -110,18 +110,14 @@ func Move(
|
||||
int(lastIndexedRecord),
|
||||
int(lastIndexedBlock),
|
||||
false,
|
||||
1, // Ignore the first header, which is the last header which we already indexed
|
||||
|
||||
func(hdr *tar.Header, i int) error {
|
||||
// Ignore the first header, which is the last header which we already indexed
|
||||
if i == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(hdrs) <= i-1 {
|
||||
if len(hdrs) <= i {
|
||||
return config.ErrTarHeaderMissing
|
||||
}
|
||||
|
||||
*hdr = *hdrs[i-1]
|
||||
*hdr = *hdrs[i]
|
||||
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -31,6 +31,7 @@ func Index(
|
||||
record int,
|
||||
block int,
|
||||
overwrite bool,
|
||||
offset int,
|
||||
|
||||
decryptHeader func(
|
||||
hdr *tar.Header,
|
||||
@@ -129,7 +130,8 @@ func Index(
|
||||
break
|
||||
}
|
||||
|
||||
if err := decryptHeader(hdr, i); err != nil {
|
||||
if i >= offset {
|
||||
if err := decryptHeader(hdr, i-offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -140,6 +142,7 @@ func Index(
|
||||
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption, onHeader); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
curr, err := f.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
@@ -213,7 +216,8 @@ func Index(
|
||||
}
|
||||
}
|
||||
|
||||
if err := decryptHeader(hdr, i); err != nil {
|
||||
if i >= offset {
|
||||
if err := decryptHeader(hdr, i-offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -224,6 +228,7 @@ func Index(
|
||||
if err := indexHeader(record, block, hdr, metadataPersister, pipes.Compression, pipes.Encryption, onHeader); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
curr = int64(counter.BytesRead)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user