ping kopia to 0.18.2

Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
This commit is contained in:
Lyndon-Li
2024-12-02 19:17:41 +08:00
parent b89270f2c1
commit 3cd85f5b43
12 changed files with 566 additions and 263 deletions

View File

@@ -23,6 +23,8 @@ import (
"github.com/sirupsen/logrus"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/kopia/kopia/snapshot/snapshotfs"
)
// Throttle throttles controlle the interval of output result
@@ -41,11 +43,6 @@ func (t *Throttle) ShouldOutput() bool {
return false
}
func (p *Progress) InitThrottle(interval time.Duration) {
p.outputThrottle.throttle = 0
p.outputThrottle.interval = interval
}
// Progress represents a backup or restore counters.
type Progress struct {
// all int64 must precede all int32 due to alignment requirements on ARM
@@ -59,13 +56,29 @@ type Progress struct {
ignoredErrorCount int32 //the total errors has ignored
// +checkatomic
fatalErrorCount int32 //the total errors has occurred
estimatedFileCount int32 // +checklocksignore the total count of files to be processed
estimatedFileCount int64 // +checklocksignore the total count of files to be processed
estimatedTotalBytes int64 // +checklocksignore the total size of files to be processed
// +checkatomic
processedBytes int64 // which statistic all bytes has been processed currently
outputThrottle Throttle // which control the frequency of update progress
Updater uploader.ProgressUpdater //which kopia progress will call the UpdateProgress interface, the third party will implement the interface to do the progress update
Log logrus.FieldLogger // output info into log when backup
processedBytes int64 // which statistic all bytes has been processed currently
outputThrottle Throttle // which control the frequency of update progress
updater uploader.ProgressUpdater //which kopia progress will call the UpdateProgress interface, the third party will implement the interface to do the progress update
log logrus.FieldLogger // output info into log when backup
estimationParam snapshotfs.EstimationParameters
}
func NewProgress(updater uploader.ProgressUpdater, interval time.Duration, log logrus.FieldLogger) *Progress {
return &Progress{
outputThrottle: Throttle{
throttle: 0,
interval: interval,
},
updater: updater,
estimationParam: snapshotfs.EstimationParameters{
Type: snapshotfs.EstimationTypeClassic,
AdaptiveThreshold: 300000,
},
log: log,
}
}
// UploadedBytes the total bytes has uploaded currently
@@ -80,17 +93,17 @@ func (p *Progress) UploadedBytes(numBytes int64) {
func (p *Progress) Error(path string, err error, isIgnored bool) {
if isIgnored {
atomic.AddInt32(&p.ignoredErrorCount, 1)
p.Log.Warnf("Ignored error when processing %v: %v", path, err)
p.log.Warnf("Ignored error when processing %v: %v", path, err)
} else {
atomic.AddInt32(&p.fatalErrorCount, 1)
p.Log.Errorf("Error when processing %v: %v", path, err)
p.log.Errorf("Error when processing %v: %v", path, err)
}
}
// EstimatedDataSize statistic the total size of files to be processed and total files to be processed
func (p *Progress) EstimatedDataSize(fileCount int, totalBytes int64) {
func (p *Progress) EstimatedDataSize(fileCount int64, totalBytes int64) {
atomic.StoreInt64(&p.estimatedTotalBytes, totalBytes)
atomic.StoreInt32(&p.estimatedFileCount, int32(fileCount))
atomic.StoreInt64(&p.estimatedFileCount, fileCount)
p.UpdateProgress()
}
@@ -98,7 +111,7 @@ func (p *Progress) EstimatedDataSize(fileCount int, totalBytes int64) {
// UpdateProgress which calls Updater UpdateProgress interface, update progress by third-party implementation
func (p *Progress) UpdateProgress() {
if p.outputThrottle.ShouldOutput() {
p.Updater.UpdateProgress(&uploader.Progress{TotalBytes: p.estimatedTotalBytes, BytesDone: p.processedBytes})
p.updater.UpdateProgress(&uploader.Progress{TotalBytes: p.estimatedTotalBytes, BytesDone: p.processedBytes})
}
}
@@ -153,3 +166,11 @@ func (p *Progress) ProgressBytes(processedBytes int64, totalBytes int64) {
}
func (p *Progress) FinishedFile(fname string, err error) {}
func (p *Progress) EstimationParameters() snapshotfs.EstimationParameters {
return p.estimationParam
}
func (p *Progress) Enabled() bool {
return true
}

View File

@@ -39,11 +39,16 @@ func TestThrottle_ShouldOutput(t *testing.T) {
{interval: time.Second, expectedOutput: true},
{interval: time.Second, throttle: time.Now().UnixNano() + int64(time.Nanosecond*100000000), expectedOutput: false},
}
p := new(Progress)
for _, tc := range testCases {
// Setup
p.InitThrottle(tc.interval)
p.outputThrottle.throttle = tc.throttle
p := &Progress{
outputThrottle: Throttle{
interval: tc.interval,
throttle: tc.throttle,
},
}
// Perform the test
output := p.outputThrottle.ShouldOutput()
@@ -65,14 +70,18 @@ func TestProgress(t *testing.T) {
{interval: time.Second},
{interval: time.Second, throttle: time.Now().UnixNano() + int64(time.Nanosecond*10000)},
}
p := new(Progress)
p.Log = logrus.New()
p.Updater = &fakeProgressUpdater{}
for _, tc := range testCases {
// Setup
p.InitThrottle(tc.interval)
p.outputThrottle.throttle = tc.throttle
p.InitThrottle(time.Second)
p := &Progress{
outputThrottle: Throttle{
interval: tc.interval,
throttle: tc.throttle,
},
updater: &fakeProgressUpdater{},
log: logrus.New(),
}
// All below calls put together for the implementation are empty or just very simple and just want to cover testing
// If wanting to write unit tests for some functions could remove it and with writing new function alone
p.UpdateProgress()

View File

@@ -237,7 +237,7 @@ func (sr *shimRepository) Flush(ctx context.Context) error {
return sr.udmRepo.Flush(ctx)
}
func (sr *shimRepository) ConcatenateObjects(ctx context.Context, objectIDs []object.ID) (object.ID, error) {
func (sr *shimRepository) ConcatenateObjects(ctx context.Context, objectIDs []object.ID, opt repo.ConcatenateOptions) (object.ID, error) {
if len(objectIDs) == 0 {
return object.EmptyID, errors.New("object list is empty")
}

View File

@@ -338,7 +338,7 @@ func TestConcatenateObjects(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
_, err := NewShimRepo(tc.backupRepo).ConcatenateObjects(ctx, tc.objectIDs)
_, err := NewShimRepo(tc.backupRepo).ConcatenateObjects(ctx, tc.objectIDs, repo.ConcatenateOptions{})
if tc.expectedError != "" {
assert.EqualError(t, err, tc.expectedError)

View File

@@ -136,11 +136,7 @@ func (kp *kopiaProvider) RunBackup(
})
repoWriter := kopia.NewShimRepo(kp.bkRepo)
kpUploader := snapshotfs.NewUploader(repoWriter)
progress := new(kopia.Progress)
progress.InitThrottle(backupProgressCheckInterval)
progress.Updater = updater
progress.Log = log
kpUploader.Progress = progress
kpUploader.Progress = kopia.NewProgress(updater, backupProgressCheckInterval, log)
kpUploader.FailFast = true
quit := make(chan struct{})
log.Info("Starting backup")
@@ -222,9 +218,7 @@ func (kp *kopiaProvider) RunRestore(
})
repoWriter := kopia.NewShimRepo(kp.bkRepo)
progress := new(kopia.Progress)
progress.InitThrottle(restoreProgressCheckInterval)
progress.Updater = updater
progress := kopia.NewProgress(updater, restoreProgressCheckInterval, log)
restoreCancel := make(chan struct{})
quit := make(chan struct{})