From da0f5d585088ef58af788c0fea514716f209393e Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Sun, 16 Apr 2023 12:34:28 -0400 Subject: [PATCH 01/10] Field labels for Duration, Time, client.ObjectKey, KibishiiData, VeleroCLI2Version Signed-off-by: Tiger Kaovilai --- internal/hook/item_hook_handler_test.go | 72 +++++++++---------- internal/hook/wait_exec_hook_handler_test.go | 48 ++++++------- pkg/controller/backup_controller_test.go | 2 +- .../backup_repository_controller_test.go | 8 +-- pkg/repository/manager.go | 2 +- test/e2e/basic/namespace-mapping.go | 2 +- test/e2e/util/velero/velero_utils.go | 2 +- 7 files changed, 68 insertions(+), 68 deletions(-) diff --git a/internal/hook/item_hook_handler_test.go b/internal/hook/item_hook_handler_test.go index 4ff5ca44e..8ca9f5c9a 100644 --- a/internal/hook/item_hook_handler_test.go +++ b/internal/hook/item_hook_handler_test.go @@ -792,7 +792,7 @@ func TestGetPodExecRestoreHookFromAnnotations(t *testing.T) { expected: &velerov1api.ExecRestoreHook{ Command: []string{"/usr/bin/foo"}, Container: "my-app", - ExecTimeout: metav1.Duration{0}, + ExecTimeout: metav1.Duration{Duration: 0}, }, }, { @@ -805,7 +805,7 @@ func TestGetPodExecRestoreHookFromAnnotations(t *testing.T) { expected: &velerov1api.ExecRestoreHook{ Command: []string{"/usr/bin/foo"}, Container: "my-app", - ExecTimeout: metav1.Duration{0}, + ExecTimeout: metav1.Duration{Duration: 0}, }, }, } @@ -856,8 +856,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -886,8 +886,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -905,8 +905,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -926,8 +926,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -944,8 +944,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Exec: &velerov1api.ExecRestoreHook{ Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -965,8 +965,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -984,8 +984,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container2", Command: []string{"/usr/bin/bar"}, OnError: velerov1api.HookErrorModeFail, - ExecTimeout: metav1.Duration{time.Hour}, - WaitTimeout: metav1.Duration{time.Hour}, + ExecTimeout: metav1.Duration{Duration: time.Hour}, + WaitTimeout: metav1.Duration{Duration: time.Hour}, }, }, }, @@ -1012,8 +1012,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -1053,8 +1053,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -1075,8 +1075,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeFail, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, { @@ -1084,8 +1084,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container2", Command: []string{"/usr/bin/baz"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second * 3}, - WaitTimeout: metav1.Duration{time.Second * 3}, + ExecTimeout: metav1.Duration{Duration: time.Second * 3}, + WaitTimeout: metav1.Duration{Duration: time.Second * 3}, }, }, { @@ -1093,8 +1093,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/bar"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second * 2}, - WaitTimeout: metav1.Duration{time.Minute * 2}, + ExecTimeout: metav1.Duration{Duration: time.Second * 2}, + WaitTimeout: metav1.Duration{Duration: time.Minute * 2}, }, }, }, @@ -1108,8 +1108,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/aaa"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second * 4}, - WaitTimeout: metav1.Duration{time.Minute * 4}, + ExecTimeout: metav1.Duration{Duration: time.Second * 4}, + WaitTimeout: metav1.Duration{Duration: time.Minute * 4}, }, }, }, @@ -1129,8 +1129,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeFail, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, { @@ -1140,8 +1140,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/bar"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second * 2}, - WaitTimeout: metav1.Duration{time.Minute * 2}, + ExecTimeout: metav1.Duration{Duration: time.Second * 2}, + WaitTimeout: metav1.Duration{Duration: time.Minute * 2}, }, }, { @@ -1151,8 +1151,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/aaa"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second * 4}, - WaitTimeout: metav1.Duration{time.Minute * 4}, + ExecTimeout: metav1.Duration{Duration: time.Second * 4}, + WaitTimeout: metav1.Duration{Duration: time.Minute * 4}, }, }, }, @@ -1164,8 +1164,8 @@ func TestGroupRestoreExecHooks(t *testing.T) { Container: "container2", Command: []string{"/usr/bin/baz"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second * 3}, - WaitTimeout: metav1.Duration{time.Second * 3}, + ExecTimeout: metav1.Duration{Duration: time.Second * 3}, + WaitTimeout: metav1.Duration{Duration: time.Second * 3}, }, }, }, diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index f12e7fef4..922df19db 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -102,8 +102,8 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -115,7 +115,7 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - Timeout: metav1.Duration{time.Second}, + Timeout: metav1.Duration{Duration: time.Second}, }, error: nil, pod: builder.ForPod("default", "my-pod"). @@ -171,8 +171,8 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeFail, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -184,7 +184,7 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeFail, - Timeout: metav1.Duration{time.Second}, + Timeout: metav1.Duration{Duration: time.Second}, }, error: errors.New("pod hook error"), pod: builder.ForPod("default", "my-pod"). @@ -240,8 +240,8 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -253,7 +253,7 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - Timeout: metav1.Duration{time.Second}, + Timeout: metav1.Duration{Duration: time.Second}, }, error: errors.New("pod hook error"), pod: builder.ForPod("default", "my-pod"). @@ -309,8 +309,8 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Minute}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -322,7 +322,7 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - Timeout: metav1.Duration{time.Second}, + Timeout: metav1.Duration{Duration: time.Second}, }, error: nil, pod: builder.ForPod("default", "my-pod"). @@ -444,7 +444,7 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeContinue, - WaitTimeout: metav1.Duration{time.Millisecond}, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, }, }, }, @@ -475,7 +475,7 @@ func TestWaitExecHandleHooks(t *testing.T) { Container: "container1", Command: []string{"/usr/bin/foo"}, OnError: velerov1api.HookErrorModeFail, - WaitTimeout: metav1.Duration{time.Millisecond}, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, }, }, }, @@ -878,13 +878,13 @@ func TestMaxHookWait(t *testing.T) { "container1": { { Hook: velerov1api.ExecRestoreHook{ - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{0}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: 0}, }, }, { Hook: velerov1api.ExecRestoreHook{ - WaitTimeout: metav1.Duration{-1}, + WaitTimeout: metav1.Duration{Duration: -1}, }, }, }, @@ -897,24 +897,24 @@ func TestMaxHookWait(t *testing.T) { "container1": { { Hook: velerov1api.ExecRestoreHook{ - WaitTimeout: metav1.Duration{time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Second}, }, }, { Hook: velerov1api.ExecRestoreHook{ - WaitTimeout: metav1.Duration{time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Second}, }, }, }, "container2": { { Hook: velerov1api.ExecRestoreHook{ - WaitTimeout: metav1.Duration{time.Hour}, + WaitTimeout: metav1.Duration{Duration: time.Hour}, }, }, { Hook: velerov1api.ExecRestoreHook{ - WaitTimeout: metav1.Duration{time.Minute}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, }, }, }, @@ -927,13 +927,13 @@ func TestMaxHookWait(t *testing.T) { "container1": { { Hook: velerov1api.ExecRestoreHook{ - ExecTimeout: metav1.Duration{time.Second}, - WaitTimeout: metav1.Duration{time.Second}, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Second}, }, }, { Hook: velerov1api.ExecRestoreHook{ - WaitTimeout: metav1.Duration{0}, + WaitTimeout: metav1.Duration{Duration: 0}, }, }, }, diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 95bfc8606..ce8251e20 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -725,7 +725,7 @@ func TestProcessBackupCompletions(t *testing.T) { Phase: velerov1api.BackupPhaseFinalizing, Version: 1, FormatVersion: "1.1.0", - Expiration: &metav1.Time{now.Add(10 * time.Minute)}, + Expiration: &metav1.Time{Time: now.Add(10 * time.Minute)}, StartTimestamp: ×tamp, }, }, diff --git a/pkg/controller/backup_repository_controller_test.go b/pkg/controller/backup_repository_controller_test.go index 382f40162..22c64b45e 100644 --- a/pkg/controller/backup_repository_controller_test.go +++ b/pkg/controller/backup_repository_controller_test.go @@ -54,7 +54,7 @@ func mockBackupRepositoryCR() *velerov1api.BackupRepository { Name: "repo", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency}, }, } @@ -153,7 +153,7 @@ func TestBackupRepoReconcile(t *testing.T) { Name: "unknown", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency}, }, }, expectNil: true, @@ -166,7 +166,7 @@ func TestBackupRepoReconcile(t *testing.T) { Name: "repo", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency}, }, }, expectNil: true, @@ -179,7 +179,7 @@ func TestBackupRepoReconcile(t *testing.T) { Name: "repo", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency}, }, Status: velerov1api.BackupRepositoryStatus{ Phase: velerov1api.BackupRepositoryPhaseNew, diff --git a/pkg/repository/manager.go b/pkg/repository/manager.go index c8cabdd81..0807844fe 100644 --- a/pkg/repository/manager.go +++ b/pkg/repository/manager.go @@ -247,7 +247,7 @@ func (m *manager) getRepositoryProvider(repo *velerov1api.BackupRepository) (pro func (m *manager) assembleRepoParam(repo *velerov1api.BackupRepository) (provider.RepoParam, error) { bsl := &velerov1api.BackupStorageLocation{} - if err := m.client.Get(context.Background(), client.ObjectKey{m.namespace, repo.Spec.BackupStorageLocation}, bsl); err != nil { + if err := m.client.Get(context.Background(), client.ObjectKey{Namespace: m.namespace, Name: repo.Spec.BackupStorageLocation}, bsl); err != nil { return provider.RepoParam{}, errors.WithStack(err) } return provider.RepoParam{ diff --git a/test/e2e/basic/namespace-mapping.go b/test/e2e/basic/namespace-mapping.go index e46eabce2..1f95556d8 100644 --- a/test/e2e/basic/namespace-mapping.go +++ b/test/e2e/basic/namespace-mapping.go @@ -33,7 +33,7 @@ func (n *NamespaceMapping) Init() error { n.Client = *n.VeleroCfg.ClientToInstallVelero n.VeleroCfg.UseVolumeSnapshots = n.UseVolumeSnapshots n.VeleroCfg.UseNodeAgent = !n.UseVolumeSnapshots - n.kibishiiData = &KibishiiData{2, 10, 10, 1024, 1024, 0, 2} + n.kibishiiData = &KibishiiData{Levels: 2, DirsPerLevel: 10, FilesPerLevel: 10, FileLength: 1024, BlockSize: 1024, PassNum: 0, ExpectedNodes: 2} backupType := "restic" if n.UseVolumeSnapshots { backupType = "snapshot" diff --git a/test/e2e/util/velero/velero_utils.go b/test/e2e/util/velero/velero_utils.go index b8d57c621..5e5929d72 100644 --- a/test/e2e/util/velero/velero_utils.go +++ b/test/e2e/util/velero/velero_utils.go @@ -1092,7 +1092,7 @@ func GetVersionList(veleroCli, veleroVersion string) []VeleroCLI2Version { for _, veleroVersion := range veleroVersionList { veleroCLI2VersionList = append(veleroCLI2VersionList, - VeleroCLI2Version{veleroVersion, ""}) + VeleroCLI2Version{VeleroVersion: veleroVersion, VeleroCLI: ""}) } for i, veleroCli := range veleroCliList { if i == len(veleroVersionList)-1 { From 3893c460867a5e555c19a98733857c06359d0c14 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Sun, 16 Apr 2023 12:49:55 -0400 Subject: [PATCH 02/10] jsontag Signed-off-by: Tiger Kaovilai --- pkg/itemoperation/backup_operation.go | 6 +++--- pkg/itemoperation/restore_operation.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/itemoperation/backup_operation.go b/pkg/itemoperation/backup_operation.go index 520a25576..84b207a3b 100644 --- a/pkg/itemoperation/backup_operation.go +++ b/pkg/itemoperation/backup_operation.go @@ -56,13 +56,13 @@ type BackupOperationSpec struct { BackupItemAction string `json:"backupItemAction"` // Kubernetes resource identifier for the item - ResourceIdentifier velero.ResourceIdentifier "json:resourceIdentifier" + ResourceIdentifier velero.ResourceIdentifier `json:"resourceIdentifier"` // OperationID returned by the BIA plugin - OperationID string "json:operationID" + OperationID string `json:"operationID"` // Items needing to be added to the backup after all async operations have completed - PostOperationItems []velero.ResourceIdentifier "json:postOperationItems" + PostOperationItems []velero.ResourceIdentifier `json:"postOperationItems"` } func (in *BackupOperationSpec) DeepCopy() *BackupOperationSpec { diff --git a/pkg/itemoperation/restore_operation.go b/pkg/itemoperation/restore_operation.go index 47f396156..d639d4b23 100644 --- a/pkg/itemoperation/restore_operation.go +++ b/pkg/itemoperation/restore_operation.go @@ -56,10 +56,10 @@ type RestoreOperationSpec struct { RestoreItemAction string `json:"restoreItemAction"` // Kubernetes resource identifier for the item - ResourceIdentifier velero.ResourceIdentifier "json:resourceIdentifier" + ResourceIdentifier velero.ResourceIdentifier `json:"resourceIdentifier"` // OperationID returned by the RIA plugin - OperationID string "json:operationID" + OperationID string `json:"operationID"` } func (in *RestoreOperationSpec) DeepCopy() *RestoreOperationSpec { From 84daa36efe261f37ccdade9eb00450fba38e52f9 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Sun, 16 Apr 2023 13:06:13 -0400 Subject: [PATCH 03/10] cancel ctx in nil, err returns Signed-off-by: Tiger Kaovilai --- pkg/cmd/cli/nodeagent/server.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index cd0d4030e..8fe25defb 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -115,6 +115,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, metri clientConfig, err := factory.ClientConfig() if err != nil { + cancelFunc() return nil, err } @@ -139,6 +140,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, metri NewCache: cache.BuilderWithOptions(cacheOption), }) if err != nil { + cancelFunc() return nil, err } From 3f4b258dee13feeab6d1eb3556dadfcd3accfe82 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Sun, 16 Apr 2023 13:22:48 -0400 Subject: [PATCH 04/10] defer ctxCancel Signed-off-by: Tiger Kaovilai --- internal/hook/wait_exec_hook_handler_test.go | 4 +++- test/e2e/backups/deletion.go | 3 ++- test/e2e/backups/sync_backups.go | 5 ++++- test/e2e/backups/ttl.go | 4 +++- test/e2e/upgrade/upgrade.go | 3 ++- test/e2e/util/k8s/namespace.go | 3 ++- test/e2e/util/kibishii/kibishii_utils.go | 9 ++++++--- test/e2e/util/providers/common.go | 3 ++- 8 files changed, 24 insertions(+), 10 deletions(-) diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index 922df19db..933e5d34d 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -738,7 +738,9 @@ func TestWaitExecHandleHooks(t *testing.T) { ctx := context.Background() if test.sharedHooksContextTimeout > 0 { - ctx, _ = context.WithTimeout(ctx, test.sharedHooksContextTimeout) + var ctxCancel context.CancelFunc + ctx, ctxCancel = context.WithTimeout(ctx, test.sharedHooksContextTimeout) + defer ctxCancel() } errs := h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer) diff --git a/test/e2e/backups/deletion.go b/test/e2e/backups/deletion.go index 3d70948d8..aaef02652 100644 --- a/test/e2e/backups/deletion.go +++ b/test/e2e/backups/deletion.go @@ -91,7 +91,8 @@ func backup_deletion_test(useVolumeSnapshots bool) { // runUpgradeTests runs upgrade test on the provider by kibishii. func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupName, backupLocation string, useVolumeSnapshots bool, kibishiiDirectory string) error { - oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) + oneHourTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*60) + defer ctxCancel() veleroCLI := veleroCfg.VeleroCLI providerName := veleroCfg.CloudProvider veleroNamespace := veleroCfg.VeleroNamespace diff --git a/test/e2e/backups/sync_backups.go b/test/e2e/backups/sync_backups.go index 1de243414..d381e38f8 100644 --- a/test/e2e/backups/sync_backups.go +++ b/test/e2e/backups/sync_backups.go @@ -40,6 +40,7 @@ type SyncBackups struct { testNS string backupName string ctx context.Context + ctxCancel context.CancelFunc } func (b *SyncBackups) Init() { @@ -47,7 +48,7 @@ func (b *SyncBackups) Init() { UUIDgen, _ = uuid.NewRandom() b.testNS = "sync-bsl-test-" + UUIDgen.String() b.backupName = "sync-bsl-test-" + UUIDgen.String() - b.ctx, _ = context.WithTimeout(context.Background(), time.Minute*10) + b.ctx, b.ctxCancel = context.WithTimeout(context.Background(), time.Minute*10) } func BackupsSyncTest() { @@ -79,6 +80,7 @@ func BackupsSyncTest() { It("Backups in object storage should be synced to a new Velero successfully", func() { test.Init() + defer test.ctxCancel() By(fmt.Sprintf("Prepare workload as target to backup by creating namespace %s namespace", test.testNS)) Expect(CreateNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", test.testNS)) @@ -119,6 +121,7 @@ func BackupsSyncTest() { It("Deleted backups in object storage are synced to be deleted in Velero", func() { test.Init() + defer test.ctxCancel() By(fmt.Sprintf("Prepare workload as target to backup by creating namespace in %s namespace", test.testNS), func() { Expect(CreateNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", test.testNS)) diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index a513d22ca..322201a33 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -43,6 +43,7 @@ type TTL struct { backupName string restoreName string ctx context.Context + ctxCancel context.CancelFunc ttl time.Duration } @@ -52,7 +53,7 @@ func (b *TTL) Init() { b.testNS = "backup-ttl-test-" + UUIDgen.String() b.backupName = "backup-ttl-test-" + UUIDgen.String() b.restoreName = "restore-ttl-test-" + UUIDgen.String() - b.ctx, _ = context.WithTimeout(context.Background(), 2*time.Hour) + b.ctx, b.ctxCancel = context.WithTimeout(context.Background(), 2*time.Hour) b.ttl = 20 * time.Minute } @@ -93,6 +94,7 @@ func TTLTest() { It("Backups in object storage should be synced to a new Velero successfully", func() { test.Init() + defer test.ctxCancel() By(fmt.Sprintf("Prepare workload as target to backup by creating namespace %s namespace", test.testNS), func() { Expect(CreateNamespace(test.ctx, client, test.testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", test.testNS)) diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 0bf0ccc96..c9f0c00ab 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -96,7 +96,8 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC flag.Parse() UUIDgen, err = uuid.NewRandom() Expect(err).To(Succeed()) - oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) + oneHourTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*60) + defer ctxCancel() supportUploaderType, err := IsSupportUploaderType(veleroCLI2Version.VeleroVersion) Expect(err).To(Succeed()) if veleroCLI2Version.VeleroCLI == "" { diff --git a/test/e2e/util/k8s/namespace.go b/test/e2e/util/k8s/namespace.go index 3135549a3..e19582dd7 100644 --- a/test/e2e/util/k8s/namespace.go +++ b/test/e2e/util/k8s/namespace.go @@ -67,7 +67,8 @@ func GetNamespace(ctx context.Context, client TestClient, namespace string) (*co } func DeleteNamespace(ctx context.Context, client TestClient, namespace string, wait bool) error { - tenMinuteTimeout, _ := context.WithTimeout(context.Background(), time.Minute*10) + tenMinuteTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*10) + defer ctxCancel() if err := client.ClientGo.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}); err != nil { return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace)) } diff --git a/test/e2e/util/kibishii/kibishii_utils.go b/test/e2e/util/kibishii/kibishii_utils.go index 950599735..2f103508a 100644 --- a/test/e2e/util/kibishii/kibishii_utils.go +++ b/test/e2e/util/kibishii/kibishii_utils.go @@ -55,7 +55,8 @@ var KibishiiPodNameList = []string{"kibishii-deployment-0", "kibishii-deployment func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLocation, kibishiiNamespace string, useVolumeSnapshots, defaultVolumesToFsBackup bool) error { client := *veleroCfg.ClientToInstallVelero - oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) + oneHourTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*60) + defer ctxCancel() veleroCLI := veleroCfg.VeleroCLI providerName := veleroCfg.CloudProvider veleroNamespace := veleroCfg.VeleroNamespace @@ -221,7 +222,8 @@ func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiD timeout := 30 * time.Minute interval := 1 * time.Second err := wait.PollImmediate(interval, timeout, func() (bool, error) { - timeout, _ := context.WithTimeout(context.Background(), time.Minute*20) + timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20) + defer ctxCancel() kibishiiGenerateCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--", "/usr/local/bin/generate.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel), strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength), @@ -246,7 +248,8 @@ func verifyData(ctx context.Context, namespace string, kibishiiData *KibishiiDat timeout := 10 * time.Minute interval := 5 * time.Second err := wait.PollImmediate(interval, timeout, func() (bool, error) { - timeout, _ := context.WithTimeout(context.Background(), time.Minute*20) + timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20) + defer ctxCancel() kibishiiVerifyCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--", "/usr/local/bin/verify.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel), strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength), diff --git a/test/e2e/util/providers/common.go b/test/e2e/util/providers/common.go index 8c37d79d9..eaeb5c3c1 100644 --- a/test/e2e/util/providers/common.go +++ b/test/e2e/util/providers/common.go @@ -138,7 +138,8 @@ func IsSnapshotExisted(cloudProvider, cloudCredentialsFile, bslBucket, bslConfig } if cloudProvider == "vsphere" { var retSnapshotIDs []string - ctx, _ := context.WithTimeout(context.Background(), time.Minute*2) + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*2) + defer ctxCancel() retSnapshotIDs, err = velero.GetVsphereSnapshotIDs(ctx, time.Hour, snapshotCheck.NamespaceBackedUp, snapshotCheck.PodName) if err != nil { return errors.Wrapf(err, fmt.Sprintf("Fail to get snapshot CRs of backup%s", backupName)) From d6848ffb16cc55db1db52317ac678aa636e3ba8f Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Sun, 16 Apr 2023 13:59:21 -0400 Subject: [PATCH 05/10] Self contain context in each high level function rather than passing around in structs Signed-off-by: Tiger Kaovilai --- hack/verify-vet.sh | 17 ++++++++ test/e2e/backups/sync_backups.go | 31 ++++++++------- test/e2e/backups/ttl.go | 26 ++++++------- test/e2e/basic/namespace-mapping.go | 15 +++---- test/e2e/basic/nodeport.go | 39 +++++++++++-------- test/e2e/basic/pvc-selected-node-changing.go | 7 ++-- test/e2e/basic/resources-check/namespaces.go | 16 +++++--- .../resources-check/namespaces_annotation.go | 9 +++-- test/e2e/basic/resources-check/rbac.go | 28 +++++++------ test/e2e/basic/storage-class-changing.go | 19 ++++++--- test/e2e/bsl-mgmt/deletion.go | 4 +- test/e2e/migration/migration.go | 4 +- test/e2e/privilegesmgmt/ssr.go | 3 +- test/e2e/pv-backup/pv-backup-filter.go | 29 ++++++++------ test/e2e/resource-filtering/base.go | 13 ++++--- test/e2e/resource-filtering/exclude_label.go | 13 ++++--- .../resource-filtering/exclude_namespaces.go | 11 ++++-- .../resource-filtering/include_namespaces.go | 11 ++++-- test/e2e/resource-filtering/label_selector.go | 9 +++-- .../e2e/resourcepolicies/resource_policies.go | 28 ++++++++----- test/e2e/schedule/ordered_resources.go | 29 ++++++++------ test/e2e/schedule/schedule-backup-creation.go | 5 ++- test/e2e/schedule/schedule.go | 17 +++++--- test/e2e/test/test.go | 20 ++++++---- 24 files changed, 244 insertions(+), 159 deletions(-) create mode 100755 hack/verify-vet.sh diff --git a/hack/verify-vet.sh b/hack/verify-vet.sh new file mode 100755 index 000000000..ba0c242fe --- /dev/null +++ b/hack/verify-vet.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2023 the Velero contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +go vet -mod=mod ./... || (echo "go vet is not happy, fix the errors above" && exit 1) diff --git a/test/e2e/backups/sync_backups.go b/test/e2e/backups/sync_backups.go index d381e38f8..c8b230528 100644 --- a/test/e2e/backups/sync_backups.go +++ b/test/e2e/backups/sync_backups.go @@ -39,8 +39,6 @@ import ( type SyncBackups struct { testNS string backupName string - ctx context.Context - ctxCancel context.CancelFunc } func (b *SyncBackups) Init() { @@ -48,11 +46,12 @@ func (b *SyncBackups) Init() { UUIDgen, _ = uuid.NewRandom() b.testNS = "sync-bsl-test-" + UUIDgen.String() b.backupName = "sync-bsl-test-" + UUIDgen.String() - b.ctx, b.ctxCancel = context.WithTimeout(context.Background(), time.Minute*10) } func BackupsSyncTest() { test := new(SyncBackups) + ctx, ctxCancel := context.WithCancel(context.Background()) + defer ctxCancel() var ( err error ) @@ -80,13 +79,13 @@ func BackupsSyncTest() { It("Backups in object storage should be synced to a new Velero successfully", func() { test.Init() - defer test.ctxCancel() + By(fmt.Sprintf("Prepare workload as target to backup by creating namespace %s namespace", test.testNS)) - Expect(CreateNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS)).To(Succeed(), + Expect(CreateNamespace(ctx, *VeleroCfg.ClientToInstallVelero, test.testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", test.testNS)) if !VeleroCfg.Debug { defer func() { - Expect(DeleteNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) + Expect(DeleteNamespace(ctx, *VeleroCfg.ClientToInstallVelero, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) }() } @@ -97,7 +96,7 @@ func BackupsSyncTest() { BackupCfg.UseVolumeSnapshots = false BackupCfg.Selector = "" By(fmt.Sprintf("Backup the workload in %s namespace", test.testNS), func() { - Expect(VeleroBackupNamespace(test.ctx, VeleroCfg.VeleroCLI, + Expect(VeleroBackupNamespace(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, test.backupName, "") return "Fail to backup workload" @@ -105,13 +104,13 @@ func BackupsSyncTest() { }) By("Uninstall velero", func() { - Expect(VeleroUninstall(test.ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed()) }) By("Install velero", func() { veleroCfg := VeleroCfg veleroCfg.UseVolumeSnapshots = false - Expect(VeleroInstall(test.ctx, &VeleroCfg)).To(Succeed()) + Expect(VeleroInstall(ctx, &VeleroCfg)).To(Succeed()) }) By("Check all backups in object storage are synced to Velero", func() { @@ -121,14 +120,14 @@ func BackupsSyncTest() { It("Deleted backups in object storage are synced to be deleted in Velero", func() { test.Init() - defer test.ctxCancel() + defer ctxCancel() By(fmt.Sprintf("Prepare workload as target to backup by creating namespace in %s namespace", test.testNS), func() { - Expect(CreateNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS)).To(Succeed(), + Expect(CreateNamespace(ctx, *VeleroCfg.ClientToInstallVelero, test.testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", test.testNS)) }) if !VeleroCfg.Debug { defer func() { - Expect(DeleteNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS, false)).To(Succeed(), + Expect(DeleteNamespace(ctx, *VeleroCfg.ClientToInstallVelero, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) }() } @@ -139,7 +138,7 @@ func BackupsSyncTest() { BackupCfg.UseVolumeSnapshots = false BackupCfg.Selector = "" By(fmt.Sprintf("Backup the workload in %s namespace", test.testNS), func() { - Expect(VeleroBackupNamespace(test.ctx, VeleroCfg.VeleroCLI, + Expect(VeleroBackupNamespace(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, test.backupName, "") return "Fail to backup workload" @@ -159,11 +158,13 @@ func BackupsSyncTest() { }) By("Check if backups are deleted as a result of sync from BSL", func() { - Expect(WaitBackupDeleted(test.ctx, VeleroCfg.VeleroCLI, test.backupName, time.Minute*10)).To(Succeed(), fmt.Sprintf("Failed to check backup %s deleted", test.backupName)) + Expect(WaitBackupDeleted(ctx, VeleroCfg.VeleroCLI, test.backupName, time.Minute*10)).To(Succeed(), fmt.Sprintf("Failed to check backup %s deleted", test.backupName)) }) }) } func (b *SyncBackups) IsBackupsSynced() error { - return WaitForBackupToBeCreated(b.ctx, VeleroCfg.VeleroCLI, b.backupName, 10*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() + return WaitForBackupToBeCreated(ctx, VeleroCfg.VeleroCLI, b.backupName, 10*time.Minute) } diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index 322201a33..3dc0ec0e2 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -42,8 +42,6 @@ type TTL struct { testNS string backupName string restoreName string - ctx context.Context - ctxCancel context.CancelFunc ttl time.Duration } @@ -53,12 +51,13 @@ func (b *TTL) Init() { b.testNS = "backup-ttl-test-" + UUIDgen.String() b.backupName = "backup-ttl-test-" + UUIDgen.String() b.restoreName = "restore-ttl-test-" + UUIDgen.String() - b.ctx, b.ctxCancel = context.WithTimeout(context.Background(), 2*time.Hour) b.ttl = 20 * time.Minute } func TTLTest() { + ctx, contextCancel := context.WithCancel(context.Background()) + defer contextCancel() var err error var veleroCfg VeleroConfig useVolumeSnapshots := true @@ -88,20 +87,19 @@ func TTLTest() { if veleroCfg.InstallVelero { Expect(VeleroUninstall(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed()) } - Expect(DeleteNamespace(test.ctx, client, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) + Expect(DeleteNamespace(ctx, client, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) } }) It("Backups in object storage should be synced to a new Velero successfully", func() { test.Init() - defer test.ctxCancel() By(fmt.Sprintf("Prepare workload as target to backup by creating namespace %s namespace", test.testNS), func() { - Expect(CreateNamespace(test.ctx, client, test.testNS)).To(Succeed(), + Expect(CreateNamespace(ctx, client, test.testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", test.testNS)) }) By("Deploy sample workload of Kibishii", func() { - Expect(KibishiiPrepareBeforeBackup(test.ctx, client, veleroCfg.CloudProvider, + Expect(KibishiiPrepareBeforeBackup(ctx, client, veleroCfg.CloudProvider, test.testNS, veleroCfg.RegistryCredentialFile, veleroCfg.Features, veleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed()) }) @@ -115,7 +113,7 @@ func TTLTest() { BackupCfg.TTL = test.ttl By(fmt.Sprintf("Backup the workload in %s namespace", test.testNS), func() { - Expect(VeleroBackupNamespace(test.ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + Expect(VeleroBackupNamespace(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.backupName, "") return "Fail to backup workload" }) @@ -126,7 +124,7 @@ func TTLTest() { if veleroCfg.CloudProvider == "vsphere" { // TODO - remove after upload progress monitoring is implemented By("Waiting for vSphere uploads to complete", func() { - Expect(WaitForVSphereUploadCompletion(test.ctx, time.Hour, + Expect(WaitForVSphereUploadCompletion(ctx, time.Hour, test.testNS, 2)).To(Succeed()) }) } @@ -139,7 +137,7 @@ func TTLTest() { } By(fmt.Sprintf("Simulating a disaster by removing namespace %s\n", BackupCfg.BackupName), func() { - Expect(DeleteNamespace(test.ctx, client, BackupCfg.BackupName, true)).To(Succeed(), + Expect(DeleteNamespace(ctx, client, BackupCfg.BackupName, true)).To(Succeed(), fmt.Sprintf("Failed to delete namespace %s", BackupCfg.BackupName)) }) @@ -149,9 +147,9 @@ func TTLTest() { } By(fmt.Sprintf("Restore %s", test.testNS), func() { - Expect(VeleroRestore(test.ctx, veleroCfg.VeleroCLI, + Expect(VeleroRestore(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.restoreName, test.backupName, "")).To(Succeed(), func() string { - RunDebug(test.ctx, veleroCfg.VeleroCLI, + RunDebug(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, "", test.restoreName) return "Fail to restore workload" }) @@ -166,7 +164,7 @@ func TTLTest() { }) By("Check TTL was set correctly", func() { - ttl, err := GetBackupTTL(test.ctx, veleroCfg.VeleroNamespace, test.backupName) + ttl, err := GetBackupTTL(ctx, veleroCfg.VeleroNamespace, test.backupName) Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint") t, _ := time.ParseDuration(strings.ReplaceAll(ttl, "'", "")) fmt.Println(t.Round(time.Minute).String()) @@ -178,7 +176,7 @@ func TTLTest() { }) By("Check if backups are deleted by GC", func() { - Expect(WaitBackupDeleted(test.ctx, veleroCfg.VeleroCLI, test.backupName, time.Minute*10)).To(Succeed(), fmt.Sprintf("Backup %s was not deleted by GC", test.backupName)) + Expect(WaitBackupDeleted(ctx, veleroCfg.VeleroCLI, test.backupName, time.Minute*10)).To(Succeed(), fmt.Sprintf("Backup %s was not deleted by GC", test.backupName)) }) By("Backup file from cloud object storage should be deleted", func() { diff --git a/test/e2e/basic/namespace-mapping.go b/test/e2e/basic/namespace-mapping.go index 1f95556d8..0de695305 100644 --- a/test/e2e/basic/namespace-mapping.go +++ b/test/e2e/basic/namespace-mapping.go @@ -82,15 +82,15 @@ func (n *NamespaceMapping) StartRun() error { return nil } func (n *NamespaceMapping) CreateResources() error { - n.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) - + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() for index, ns := range *n.NSIncluded { n.kibishiiData.Levels = len(*n.NSIncluded) + index By(fmt.Sprintf("Creating namespaces ...%s\n", ns), func() { - Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) + Expect(CreateNamespace(ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) }) By("Deploy sample workload of Kibishii", func() { - Expect(KibishiiPrepareBeforeBackup(n.Ctx, n.Client, VeleroCfg.CloudProvider, + Expect(KibishiiPrepareBeforeBackup(ctx, n.Client, VeleroCfg.CloudProvider, ns, VeleroCfg.RegistryCredentialFile, VeleroCfg.Features, VeleroCfg.KibishiiDirectory, false, n.kibishiiData)).To(Succeed()) }) @@ -99,17 +99,18 @@ func (n *NamespaceMapping) CreateResources() error { } func (n *NamespaceMapping) Verify() error { - n.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() for index, ns := range n.MappedNamespaceList { n.kibishiiData.Levels = len(*n.NSIncluded) + index By(fmt.Sprintf("Verify workload %s after restore ", ns), func() { Expect(KibishiiVerifyAfterRestore(n.Client, ns, - n.Ctx, n.kibishiiData)).To(Succeed(), "Fail to verify workload after restore") + ctx, n.kibishiiData)).To(Succeed(), "Fail to verify workload after restore") }) } for _, ns := range *n.NSIncluded { By(fmt.Sprintf("Verify namespace %s for backup is no longer exist after restore with namespace mapping", ns), func() { - Expect(NamespaceShouldNotExist(n.Ctx, n.Client, ns)).To(Succeed()) + Expect(NamespaceShouldNotExist(ctx, n.Client, ns)).To(Succeed()) }) } return nil diff --git a/test/e2e/basic/nodeport.go b/test/e2e/basic/nodeport.go index f9caa9ad9..3faa518bc 100644 --- a/test/e2e/basic/nodeport.go +++ b/test/e2e/basic/nodeport.go @@ -63,35 +63,38 @@ func (n *NodePort) StartRun() error { return nil } func (n *NodePort) CreateResources() error { - n.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Creating service %s in namespaces %s ......\n", n.serviceName, n.namespace), func() { - Expect(CreateNamespace(n.Ctx, n.Client, n.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", n.namespace)) - Expect(createServiceWithNodeport(n.Ctx, n.Client, n.namespace, n.serviceName, n.labels, 0)).To(Succeed(), fmt.Sprintf("Failed to create service %s", n.serviceName)) - service, err := GetService(n.Ctx, n.Client, n.namespace, n.serviceName) + Expect(CreateNamespace(ctx, n.Client, n.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", n.namespace)) + Expect(createServiceWithNodeport(ctx, n.Client, n.namespace, n.serviceName, n.labels, 0)).To(Succeed(), fmt.Sprintf("Failed to create service %s", n.serviceName)) + service, err := GetService(ctx, n.Client, n.namespace, n.serviceName) Expect(err).To(Succeed()) Expect(len(service.Spec.Ports)).To(Equal(1)) n.nodePort = service.Spec.Ports[0].NodePort - _, err = GetAllService(n.Ctx) + _, err = GetAllService(ctx) Expect(err).To(Succeed(), "fail to get service") }) return nil } func (n *NodePort) Destroy() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Start to destroy namespace %s......", n.NSBaseName), func() { - Expect(CleanupNamespacesWithPoll(n.Ctx, n.Client, NodeportBaseName)).To(Succeed(), + Expect(CleanupNamespacesWithPoll(ctx, n.Client, NodeportBaseName)).To(Succeed(), fmt.Sprintf("Failed to delete namespace %s", n.NSBaseName)) Expect(WaitForServiceDelete(n.Client, n.namespace, n.serviceName, false)).To(Succeed(), "fail to delete service") - _, err := GetAllService(n.Ctx) + _, err := GetAllService(ctx) Expect(err).To(Succeed(), "fail to get service") }) n.namespaceToCollision = NodeportBaseName + "tmp" By(fmt.Sprintf("Creating a new service which has the same nodeport as backed up service has in a new namespaces for nodeport collision ...%s\n", n.namespaceToCollision), func() { - Expect(CreateNamespace(n.Ctx, n.Client, n.namespaceToCollision)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", n.namespaceToCollision)) - Expect(createServiceWithNodeport(n.Ctx, n.Client, n.namespaceToCollision, n.serviceName, n.labels, n.nodePort)).To(Succeed(), fmt.Sprintf("Failed to create service %s", n.serviceName)) - _, err := GetAllService(n.Ctx) + Expect(CreateNamespace(ctx, n.Client, n.namespaceToCollision)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", n.namespaceToCollision)) + Expect(createServiceWithNodeport(ctx, n.Client, n.namespaceToCollision, n.serviceName, n.labels, n.nodePort)).To(Succeed(), fmt.Sprintf("Failed to create service %s", n.serviceName)) + _, err := GetAllService(ctx) Expect(err).To(Succeed(), "fail to get service") }) @@ -99,6 +102,8 @@ func (n *NodePort) Destroy() error { } func (n *NodePort) Restore() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() index := 4 restoreName1 := n.RestoreName + "-1" restoreName2 := restoreName1 + "-1" @@ -107,7 +112,7 @@ func (n *NodePort) Restore() error { args = append(args[:index], append([]string{n.RestoreName}, args[index:]...)...) args = append(args, "--preserve-nodeports=true") By(fmt.Sprintf("Start to restore %s with nodeports preservation when port %d is already occupied by other service", n.RestoreName, n.nodePort), func() { - Expect(VeleroRestoreExec(n.Ctx, n.VeleroCfg.VeleroCLI, + Expect(VeleroRestoreExec(ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.RestoreName, args, velerov1api.RestorePhasePartiallyFailed)).To( Succeed(), @@ -122,7 +127,7 @@ func (n *NodePort) Restore() error { args = append(args[:index], append([]string{restoreName1}, args[index:]...)...) args = append(args, "--preserve-nodeports=false") By(fmt.Sprintf("Start to restore %s without nodeports preservation ......", restoreName1), func() { - Expect(VeleroRestoreExec(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, + Expect(VeleroRestoreExec(ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, restoreName1, args, velerov1api.RestorePhaseCompleted)).To(Succeed(), func() string { RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", restoreName1) return "Fail to restore workload" @@ -130,16 +135,16 @@ func (n *NodePort) Restore() error { }) By(fmt.Sprintf("Delete service %s by deleting namespace %s", n.serviceName, n.namespace), func() { - service, err := GetService(n.Ctx, n.Client, n.namespace, n.serviceName) + service, err := GetService(ctx, n.Client, n.namespace, n.serviceName) Expect(err).To(Succeed()) Expect(len(service.Spec.Ports)).To(Equal(1)) fmt.Println(service.Spec.Ports) - Expect(DeleteNamespace(n.Ctx, n.Client, n.namespace, true)).To(Succeed()) + Expect(DeleteNamespace(ctx, n.Client, n.namespace, true)).To(Succeed()) }) By(fmt.Sprintf("Start to delete service %s in namespace %s ......", n.serviceName, n.namespaceToCollision), func() { Expect(WaitForServiceDelete(n.Client, n.namespaceToCollision, n.serviceName, true)).To(Succeed(), "fail to delete service") - _, err := GetAllService(n.Ctx) + _, err := GetAllService(ctx) Expect(err).To(Succeed(), "fail to get service") }) @@ -147,7 +152,7 @@ func (n *NodePort) Restore() error { args = append(args[:index], append([]string{restoreName2}, args[index:]...)...) args = append(args, "--preserve-nodeports=true") By(fmt.Sprintf("Start to restore %s with nodeports preservation ......", restoreName2), func() { - Expect(VeleroRestoreExec(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, + Expect(VeleroRestoreExec(ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, restoreName2, args, velerov1api.RestorePhaseCompleted)).To(Succeed(), func() string { RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", restoreName2) return "Fail to restore workload" @@ -155,7 +160,7 @@ func (n *NodePort) Restore() error { }) By(fmt.Sprintf("Verify service %s was restore successfully with the origin nodeport.", n.namespace), func() { - service, err := GetService(n.Ctx, n.Client, n.namespace, n.serviceName) + service, err := GetService(ctx, n.Client, n.namespace, n.serviceName) Expect(err).To(Succeed()) Expect(len(service.Spec.Ports)).To(Equal(1)) Expect(service.Spec.Ports[0].NodePort).To(Equal(n.nodePort)) diff --git a/test/e2e/basic/pvc-selected-node-changing.go b/test/e2e/basic/pvc-selected-node-changing.go index 9b31966a3..9e085addf 100644 --- a/test/e2e/basic/pvc-selected-node-changing.go +++ b/test/e2e/basic/pvc-selected-node-changing.go @@ -74,7 +74,6 @@ func (p *PVCSelectedNodeChanging) StartRun() error { return nil } func (p *PVCSelectedNodeChanging) CreateResources() error { - p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) By(fmt.Sprintf("Create namespace %s", p.namespace), func() { Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", p.namespace)) @@ -125,8 +124,10 @@ func (p *PVCSelectedNodeChanging) Destroy() error { } func (p *PVCSelectedNodeChanging) Restore() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Start to restore %s .....", p.RestoreName), func() { - Expect(VeleroRestoreExec(context.Background(), p.VeleroCfg.VeleroCLI, + Expect(VeleroRestoreExec(ctx, p.VeleroCfg.VeleroCLI, p.VeleroCfg.VeleroNamespace, p.RestoreName, p.RestoreArgs, velerov1api.RestorePhaseCompleted)).To( Succeed(), @@ -135,7 +136,7 @@ func (p *PVCSelectedNodeChanging) Restore() error { p.VeleroCfg.VeleroNamespace, "", p.RestoreName) return "Fail to restore workload" }) - err := WaitForPods(p.Ctx, p.Client, p.mappedNS, []string{p.podName}) + err := WaitForPods(ctx, p.Client, p.mappedNS, []string{p.podName}) Expect(err).To(Succeed()) }) return nil diff --git a/test/e2e/basic/resources-check/namespaces.go b/test/e2e/basic/resources-check/namespaces.go index 87694a9dd..3acf299a1 100644 --- a/test/e2e/basic/resources-check/namespaces.go +++ b/test/e2e/basic/resources-check/namespaces.go @@ -93,14 +93,15 @@ func (m *MultiNSBackup) StartRun() error { } func (m *MultiNSBackup) CreateResources() error { - m.Ctx, _ = context.WithTimeout(context.Background(), m.TimeoutDuration) + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() fmt.Printf("Creating namespaces ...\n") labels := map[string]string{ "ns-test": "true", } for nsNum := 0; nsNum < m.NamespacesTotal; nsNum++ { createNSName := fmt.Sprintf("%s-%00000d", m.NSBaseName, nsNum) - if err := CreateNamespaceWithLabel(m.Ctx, m.Client, createNSName, labels); err != nil { + if err := CreateNamespaceWithLabel(ctx, m.Client, createNSName, labels); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", createNSName) } } @@ -108,10 +109,12 @@ func (m *MultiNSBackup) CreateResources() error { } func (m *MultiNSBackup) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), m.TimeoutDuration) + defer ctxCancel() // Verify that we got back all of the namespaces we created for nsNum := 0; nsNum < m.NamespacesTotal; nsNum++ { checkNSName := fmt.Sprintf("%s-%00000d", m.NSBaseName, nsNum) - checkNS, err := GetNamespace(m.Ctx, m.Client, checkNSName) + checkNS, err := GetNamespace(ctx, m.Client, checkNSName) if err != nil { return errors.Wrapf(err, "Could not retrieve test namespace %s", checkNSName) } else if checkNS.Name != checkNSName { @@ -122,10 +125,11 @@ func (m *MultiNSBackup) Verify() error { } func (m *MultiNSBackup) Destroy() error { - m.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) - err := CleanupNamespaces(m.Ctx, m.Client, m.NSBaseName) + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() + err := CleanupNamespaces(ctx, m.Client, m.NSBaseName) if err != nil { return errors.Wrap(err, "Could cleanup retrieve namespaces") } - return WaitAllSelectedNSDeleted(m.Ctx, m.Client, "ns-test=true") + return WaitAllSelectedNSDeleted(ctx, m.Client, "ns-test=true") } diff --git a/test/e2e/basic/resources-check/namespaces_annotation.go b/test/e2e/basic/resources-check/namespaces_annotation.go index b21bfb728..3d41e5a59 100644 --- a/test/e2e/basic/resources-check/namespaces_annotation.go +++ b/test/e2e/basic/resources-check/namespaces_annotation.go @@ -68,11 +68,12 @@ func (n *NSAnnotationCase) Init() error { } func (n *NSAnnotationCase) CreateResources() error { - n.Ctx, _ = context.WithTimeout(context.Background(), 10*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < n.NamespacesTotal; nsNum++ { createNSName := fmt.Sprintf("%s-%00000d", n.NSBaseName, nsNum) createAnnotationName := fmt.Sprintf("annotation-%s-%00000d", n.NSBaseName, nsNum) - if err := CreateNamespaceWithAnnotation(n.Ctx, n.Client, createNSName, map[string]string{"testAnnotation": createAnnotationName}); err != nil { + if err := CreateNamespaceWithAnnotation(ctx, n.Client, createNSName, map[string]string{"testAnnotation": createAnnotationName}); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", createNSName) } } @@ -80,10 +81,12 @@ func (n *NSAnnotationCase) CreateResources() error { } func (n *NSAnnotationCase) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < n.NamespacesTotal; nsNum++ { checkNSName := fmt.Sprintf("%s-%00000d", n.NSBaseName, nsNum) checkAnnoName := fmt.Sprintf("annotation-%s-%00000d", n.NSBaseName, nsNum) - checkNS, err := GetNamespace(n.Ctx, n.Client, checkNSName) + checkNS, err := GetNamespace(ctx, n.Client, checkNSName) if err != nil { return errors.Wrapf(err, "Could not retrieve test namespace %s", checkNSName) diff --git a/test/e2e/basic/resources-check/rbac.go b/test/e2e/basic/resources-check/rbac.go index 1b988bc9e..b4eaf7d37 100644 --- a/test/e2e/basic/resources-check/rbac.go +++ b/test/e2e/basic/resources-check/rbac.go @@ -84,21 +84,22 @@ func (r *RBACCase) Init() error { } func (r *RBACCase) CreateResources() error { - r.Ctx, _ = context.WithTimeout(context.Background(), 10*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ { createNSName := fmt.Sprintf("%s-%00000d", r.NSBaseName, nsNum) fmt.Printf("Creating namespaces ...%s\n", createNSName) - if err := CreateNamespace(r.Ctx, r.Client, createNSName); err != nil { + if err := CreateNamespace(ctx, r.Client, createNSName); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", createNSName) } serviceAccountName := fmt.Sprintf("service-account-%s-%00000d", r.NSBaseName, nsNum) fmt.Printf("Creating service account ...%s\n", createNSName) - if err := CreateServiceAccount(r.Ctx, r.Client, createNSName, serviceAccountName); err != nil { + if err := CreateServiceAccount(ctx, r.Client, createNSName, serviceAccountName); err != nil { return errors.Wrapf(err, "Failed to create service account %s", serviceAccountName) } clusterRoleName := fmt.Sprintf("clusterrole-%s-%00000d", r.NSBaseName, nsNum) clusterRoleBindingName := fmt.Sprintf("clusterrolebinding-%s-%00000d", r.NSBaseName, nsNum) - if err := CreateRBACWithBindingSA(r.Ctx, r.Client, createNSName, serviceAccountName, clusterRoleName, clusterRoleBindingName); err != nil { + if err := CreateRBACWithBindingSA(ctx, r.Client, createNSName, serviceAccountName, clusterRoleName, clusterRoleBindingName); err != nil { return errors.Wrapf(err, "Failed to create cluster role %s with role binding %s", clusterRoleName, clusterRoleBindingName) } } @@ -106,13 +107,14 @@ func (r *RBACCase) CreateResources() error { } func (r *RBACCase) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ { checkNSName := fmt.Sprintf("%s-%00000d", r.NSBaseName, nsNum) checkServiceAccountName := fmt.Sprintf("service-account-%s-%00000d", r.NSBaseName, nsNum) checkClusterRoleName := fmt.Sprintf("clusterrole-%s-%00000d", r.NSBaseName, nsNum) checkClusterRoleBindingName := fmt.Sprintf("clusterrolebinding-%s-%00000d", r.NSBaseName, nsNum) - - checkNS, err := GetNamespace(r.Ctx, r.Client, checkNSName) + checkNS, err := GetNamespace(ctx, r.Client, checkNSName) if err != nil { return errors.Wrapf(err, "Could not retrieve test namespace %s", checkNSName) } @@ -121,7 +123,7 @@ func (r *RBACCase) Verify() error { } //getting service account from the restore - checkSA, err := GetServiceAccount(r.Ctx, r.Client, checkNSName, checkServiceAccountName) + checkSA, err := GetServiceAccount(ctx, r.Client, checkNSName, checkServiceAccountName) if err != nil { return errors.Wrapf(err, "Could not retrieve test service account %s", checkSA) @@ -132,7 +134,7 @@ func (r *RBACCase) Verify() error { } //getting cluster role from the restore - checkClusterRole, err := GetClusterRole(r.Ctx, r.Client, checkClusterRoleName) + checkClusterRole, err := GetClusterRole(ctx, r.Client, checkClusterRoleName) if err != nil { return errors.Wrapf(err, "Could not retrieve test cluster role %s", checkClusterRole) @@ -143,7 +145,7 @@ func (r *RBACCase) Verify() error { } //getting cluster role binding from the restore - checkClusterRoleBinding, err := GetClusterRoleBinding(r.Ctx, r.Client, checkClusterRoleBindingName) + checkClusterRoleBinding, err := GetClusterRoleBinding(ctx, r.Client, checkClusterRoleBindingName) if err != nil { return errors.Wrapf(err, "Could not retrieve test cluster role binding %s", checkClusterRoleBinding) @@ -164,19 +166,21 @@ func (r *RBACCase) Verify() error { } func (r *RBACCase) Destroy() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() //cleanup clusterrole - err := CleanupClusterRole(r.Ctx, r.Client, r.NSBaseName) + err := CleanupClusterRole(ctx, r.Client, r.NSBaseName) if err != nil { return errors.Wrap(err, "Could not cleanup clusterroles") } //cleanup cluster rolebinding - err = CleanupClusterRoleBinding(r.Ctx, r.Client, r.NSBaseName) + err = CleanupClusterRoleBinding(ctx, r.Client, r.NSBaseName) if err != nil { return errors.Wrap(err, "Could not cleanup clusterrolebindings") } - err = CleanupNamespacesWithPoll(r.Ctx, r.Client, r.NSBaseName) + err = CleanupNamespacesWithPoll(ctx, r.Client, r.NSBaseName) if err != nil { return errors.Wrap(err, "Could cleanup retrieve namespaces") } diff --git a/test/e2e/basic/storage-class-changing.go b/test/e2e/basic/storage-class-changing.go index 4ae1d71c6..ee931588a 100644 --- a/test/e2e/basic/storage-class-changing.go +++ b/test/e2e/basic/storage-class-changing.go @@ -74,13 +74,14 @@ func (s *StorageClasssChanging) StartRun() error { return nil } func (s *StorageClasssChanging) CreateResources() error { - s.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Create a storage class %s", s.desStorageClass), func() { Expect(InstallStorageClass(context.Background(), fmt.Sprintf("testdata/storage-class/%s.yaml", s.VeleroCfg.CloudProvider))).To(Succeed()) }) By(fmt.Sprintf("Create namespace %s", s.namespace), func() { - Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(), + Expect(CreateNamespace(ctx, s.Client, s.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", s.namespace)) }) @@ -96,10 +97,12 @@ func (s *StorageClasssChanging) CreateResources() error { } func (s *StorageClasssChanging) Destroy() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Expect storage class of PV %s to be %s ", s.volume, s.srcStorageClass), func() { pvName, err := GetPVByPodName(s.Client, s.namespace, s.volume) Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV name by pod name %s", s.podName)) - pv, err := GetPersistentVolume(s.Ctx, s.Client, s.namespace, pvName) + pv, err := GetPersistentVolume(ctx, s.Client, s.namespace, pvName) Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV by pod name %s", s.podName)) fmt.Println(pv) Expect(pv.Spec.StorageClassName).To(Equal(s.srcStorageClass), @@ -107,15 +110,17 @@ func (s *StorageClasssChanging) Destroy() error { }) By(fmt.Sprintf("Start to destroy namespace %s......", s.NSBaseName), func() { - Expect(CleanupNamespacesWithPoll(s.Ctx, s.Client, s.NSBaseName)).To(Succeed(), + Expect(CleanupNamespacesWithPoll(ctx, s.Client, s.NSBaseName)).To(Succeed(), fmt.Sprintf("Failed to delete namespace %s", s.NSBaseName)) }) return nil } func (s *StorageClasssChanging) Restore() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Start to restore %s .....", s.RestoreName), func() { - Expect(VeleroRestoreExec(s.Ctx, s.VeleroCfg.VeleroCLI, + Expect(VeleroRestoreExec(ctx, s.VeleroCfg.VeleroCLI, s.VeleroCfg.VeleroNamespace, s.RestoreName, s.RestoreArgs, velerov1api.RestorePhaseCompleted)).To( Succeed(), @@ -128,11 +133,13 @@ func (s *StorageClasssChanging) Restore() error { return nil } func (s *StorageClasssChanging) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Expect storage class of PV %s to be %s ", s.volume, s.desStorageClass), func() { time.Sleep(1 * time.Minute) pvName, err := GetPVByPodName(s.Client, s.mappedNS, s.volume) Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV name by pod name %s", s.podName)) - pv, err := GetPersistentVolume(s.Ctx, s.Client, s.mappedNS, pvName) + pv, err := GetPersistentVolume(ctx, s.Client, s.mappedNS, pvName) Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV by pod name %s", s.podName)) fmt.Println(pv) Expect(pv.Spec.StorageClassName).To(Equal(s.desStorageClass), diff --git a/test/e2e/bsl-mgmt/deletion.go b/test/e2e/bsl-mgmt/deletion.go index 5d8619a8c..f68342efb 100644 --- a/test/e2e/bsl-mgmt/deletion.go +++ b/test/e2e/bsl-mgmt/deletion.go @@ -96,6 +96,8 @@ func BslDeletionTest(useVolumeSnapshots bool) { When("kibishii is the sample workload", func() { It("Local backups and restic repos (if Velero was installed with Restic) will be deleted once the corresponding backup storage location is deleted", func() { + oneHourTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*60) + defer ctxCancel() if veleroCfg.AdditionalBSLProvider == "" { Skip("no additional BSL provider given, not running multiple BackupStorageLocation with unique credentials tests") } @@ -141,8 +143,6 @@ func BslDeletionTest(useVolumeSnapshots bool) { backupName_1 := "backup1-" + UUIDgen.String() backupName_2 := "backup2-" + UUIDgen.String() - oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) - backupLocation_1 := "default" backupLocation_2 := additionalBsl podName_1 := "kibishii-deployment-0" diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index b1e2dae67..1511cc307 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -97,13 +97,13 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) }) When("kibishii is the sample workload", func() { It("should be successfully backed up and restored to the default BackupStorageLocation", func() { + oneHourTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*60) + defer ctxCancel() flag.Parse() UUIDgen, err = uuid.NewRandom() Expect(err).To(Succeed()) supportUploaderType, err := IsSupportUploaderType(veleroCLI2Version.VeleroVersion) Expect(err).To(Succeed()) - oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) - if veleroCLI2Version.VeleroCLI == "" { //Assume tag of velero server image is identical to velero CLI version //Download velero CLI if it's empty according to velero CLI version diff --git a/test/e2e/privilegesmgmt/ssr.go b/test/e2e/privilegesmgmt/ssr.go index 20b385b6e..704d4d191 100644 --- a/test/e2e/privilegesmgmt/ssr.go +++ b/test/e2e/privilegesmgmt/ssr.go @@ -57,8 +57,9 @@ func SSRTest() { }) It(fmt.Sprintf("Should create an ssr object in the %s namespace and later removed by controller", veleroCfg.VeleroNamespace), func() { + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Duration(time.Minute*10)) + defer ctxCancel() defer DeleteNamespace(context.TODO(), *veleroCfg.ClientToInstallVelero, testNS, false) - ctx, _ := context.WithTimeout(context.Background(), time.Duration(time.Minute*10)) By(fmt.Sprintf("Create %s namespace", testNS)) Expect(CreateNamespace(ctx, *veleroCfg.ClientToInstallVelero, testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", testNS)) diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index 311503612..a144a7a7e 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -31,7 +31,6 @@ var OptInPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_IN_AN var OptOutPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_OUT_ANN, id: "opt-out"}) func (p *PVBackupFiltering) Init() error { - p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) p.VeleroCfg = VeleroCfg p.Client = *p.VeleroCfg.ClientToInstallVelero p.VeleroCfg.UseVolumeSnapshots = false @@ -48,7 +47,9 @@ func (p *PVBackupFiltering) Init() error { } func (p *PVBackupFiltering) StartRun() error { - err := InstallStorageClass(p.Ctx, fmt.Sprintf("testdata/storage-class/%s.yaml", VeleroCfg.CloudProvider)) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() + err := InstallStorageClass(ctx, fmt.Sprintf("testdata/storage-class/%s.yaml", VeleroCfg.CloudProvider)) if err != nil { return err } @@ -72,9 +73,11 @@ func (p *PVBackupFiltering) StartRun() error { return nil } func (p *PVBackupFiltering) CreateResources() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for _, ns := range *p.NSIncluded { By(fmt.Sprintf("Create namespaces %s for workload\n", ns), func() { - Expect(CreateNamespace(p.Ctx, p.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) + Expect(CreateNamespace(ctx, p.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) }) var pods []string By(fmt.Sprintf("Deploy a few pods with several PVs in namespace %s", ns), func() { @@ -102,7 +105,7 @@ func (p *PVBackupFiltering) CreateResources() error { p.annotation: volumesToAnnotation, } By(fmt.Sprintf("Add annotation to pod %s of namespace %s", pod.Name, ns), func() { - _, err := AddAnnotationToPod(p.Ctx, p.Client, ns, pod.Name, ann) + _, err := AddAnnotationToPod(ctx, p.Client, ns, pod.Name, ann) Expect(err).To(Succeed()) }) }) @@ -113,17 +116,17 @@ func (p *PVBackupFiltering) CreateResources() error { By(fmt.Sprintf("Waiting for all pods to start %s\n", p.podsList), func() { for index, ns := range *p.NSIncluded { By(fmt.Sprintf("Waiting for all pods to start %d in namespace %s", index, ns), func() { - WaitForPods(p.Ctx, p.Client, ns, p.podsList[index]) + WaitForPods(ctx, p.Client, ns, p.podsList[index]) }) } }) By(fmt.Sprintf("Populate all pods %s with file %s", p.podsList, FILE_NAME), func() { for index, ns := range *p.NSIncluded { By(fmt.Sprintf("Creating file in all pods to start %d in namespace %s", index, ns), func() { - WaitForPods(p.Ctx, p.Client, ns, p.podsList[index]) + WaitForPods(ctx, p.Client, ns, p.podsList[index]) for i, pod := range p.podsList[index] { for j := range p.volumesList[i] { - Expect(CreateFileToPod(p.Ctx, ns, pod, pod, p.volumesList[i][j], + Expect(CreateFileToPod(ctx, ns, pod, pod, p.volumesList[i][j], FILE_NAME, fileContent(ns, pod, p.volumesList[i][j]))).To(Succeed()) } } @@ -134,10 +137,12 @@ func (p *PVBackupFiltering) CreateResources() error { } func (p *PVBackupFiltering) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*60) + defer ctxCancel() By(fmt.Sprintf("Waiting for all pods to start %s", p.podsList), func() { for index, ns := range *p.NSIncluded { By(fmt.Sprintf("Waiting for all pods to start %d in namespace %s", index, ns), func() { - WaitForPods(p.Ctx, p.Client, ns, p.podsList[index]) + WaitForPods(ctx, p.Client, ns, p.podsList[index]) }) } }) @@ -150,21 +155,21 @@ func (p *PVBackupFiltering) Verify() error { if j%2 == 0 { if p.annotation == OPT_IN_ANN { By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { - Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect") + Expect(fileExist(ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect") }) } else { By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { - Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect") + Expect(fileNotExist(ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect") }) } } else { if p.annotation == OPT_OUT_ANN { By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { - Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect") + Expect(fileExist(ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect") }) } else { By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { - Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect") + Expect(fileNotExist(ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect") }) } } diff --git a/test/e2e/resource-filtering/base.go b/test/e2e/resource-filtering/base.go index 35f5e3f77..1d8ce9026 100644 --- a/test/e2e/resource-filtering/base.go +++ b/test/e2e/resource-filtering/base.go @@ -66,20 +66,21 @@ func (f *FilteringCase) Init() error { } func (f *FilteringCase) CreateResources() error { - f.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < f.NamespacesTotal; nsNum++ { namespace := fmt.Sprintf("%s-%00000d", f.NSBaseName, nsNum) fmt.Printf("Creating resources in namespace ...%s\n", namespace) - if err := CreateNamespace(f.Ctx, f.Client, namespace); err != nil { + if err := CreateNamespace(ctx, f.Client, namespace); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", namespace) } serviceAccountName := "default" // wait until the service account is created before patch the image pull secret - if err := WaitUntilServiceAccountCreated(f.Ctx, f.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { + if err := WaitUntilServiceAccountCreated(ctx, f.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { return errors.Wrapf(err, "failed to wait the service account %q created under the namespace %q", serviceAccountName, namespace) } // add the image pull secret to avoid the image pull limit issue of Docker Hub - if err := PatchServiceAccountWithImagePullSecret(f.Ctx, f.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { + if err := PatchServiceAccountWithImagePullSecret(ctx, f.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, namespace) } //Create deployment @@ -120,11 +121,13 @@ func (f *FilteringCase) CreateResources() error { } func (f *FilteringCase) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < f.NamespacesTotal; nsNum++ { namespace := fmt.Sprintf("%s-%00000d", f.NSBaseName, nsNum) fmt.Printf("Checking resources in namespaces ...%s\n", namespace) //Check namespace - checkNS, err := GetNamespace(f.Ctx, f.Client, namespace) + checkNS, err := GetNamespace(ctx, f.Client, namespace) if err != nil { return errors.Wrapf(err, "Could not retrieve test namespace %s", namespace) } diff --git a/test/e2e/resource-filtering/exclude_label.go b/test/e2e/resource-filtering/exclude_label.go index 7644a1c86..b2202cdec 100644 --- a/test/e2e/resource-filtering/exclude_label.go +++ b/test/e2e/resource-filtering/exclude_label.go @@ -76,7 +76,8 @@ func (e *ExcludeFromBackup) Init() error { } func (e *ExcludeFromBackup) CreateResources() error { - e.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() namespace := e.NSBaseName // These 2 labels for resources to be included label1 := map[string]string{ @@ -86,16 +87,16 @@ func (e *ExcludeFromBackup) CreateResources() error { "velero.io/exclude-from-backup": "false", } fmt.Printf("Creating resources in namespace ...%s\n", namespace) - if err := CreateNamespace(e.Ctx, e.Client, namespace); err != nil { + if err := CreateNamespace(ctx, e.Client, namespace); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", namespace) } serviceAccountName := "default" // wait until the service account is created before patch the image pull secret - if err := WaitUntilServiceAccountCreated(e.Ctx, e.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { + if err := WaitUntilServiceAccountCreated(ctx, e.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { return errors.Wrapf(err, "failed to wait the service account %q created under the namespace %q", serviceAccountName, namespace) } // add the image pull secret to avoid the image pull limit issue of Docker Hub - if err := PatchServiceAccountWithImagePullSecret(e.Ctx, e.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { + if err := PatchServiceAccountWithImagePullSecret(ctx, e.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, namespace) } //Create deployment: to be included @@ -139,10 +140,12 @@ func (e *ExcludeFromBackup) CreateResources() error { } func (e *ExcludeFromBackup) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() namespace := e.NSBaseName By(fmt.Sprintf("Checking resources in namespaces ...%s\n", namespace), func() { //Check namespace - checkNS, err := GetNamespace(e.Ctx, e.Client, namespace) + checkNS, err := GetNamespace(ctx, e.Client, namespace) Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("Could not retrieve test namespace %s", namespace)) Expect(checkNS.Name == namespace).To(Equal(true), fmt.Sprintf("Retrieved namespace for %s has name %s instead", namespace, checkNS.Name)) diff --git a/test/e2e/resource-filtering/exclude_namespaces.go b/test/e2e/resource-filtering/exclude_namespaces.go index 8d195c247..05485e8e2 100644 --- a/test/e2e/resource-filtering/exclude_namespaces.go +++ b/test/e2e/resource-filtering/exclude_namespaces.go @@ -109,11 +109,12 @@ func (e *ExcludeNamespaces) Init() error { } func (e *ExcludeNamespaces) CreateResources() error { - e.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < e.NamespacesTotal; nsNum++ { createNSName := fmt.Sprintf("%s-%00000d", e.NSBaseName, nsNum) fmt.Printf("Creating namespaces ...%s\n", createNSName) - if err := CreateNamespace(e.Ctx, e.Client, createNSName); err != nil { + if err := CreateNamespace(ctx, e.Client, createNSName); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", createNSName) } } @@ -121,10 +122,12 @@ func (e *ExcludeNamespaces) CreateResources() error { } func (e *ExcludeNamespaces) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() // Verify that we got back all of the namespaces we created for nsNum := 0; nsNum < e.namespacesExcluded; nsNum++ { excludeNSName := fmt.Sprintf("%s-%00000d", e.NSBaseName, nsNum) - _, err := GetNamespace(e.Ctx, e.Client, excludeNSName) + _, err := GetNamespace(ctx, e.Client, excludeNSName) if err == nil { return errors.Wrapf(err, "Resource filtering with exclude namespace but exclude namespace %s exist", excludeNSName) } @@ -136,7 +139,7 @@ func (e *ExcludeNamespaces) Verify() error { for nsNum := e.namespacesExcluded; nsNum < e.NamespacesTotal; nsNum++ { checkNSName := fmt.Sprintf("%s-%00000d", e.NSBaseName, nsNum) - checkNS, err := GetNamespace(e.Ctx, e.Client, checkNSName) + checkNS, err := GetNamespace(ctx, e.Client, checkNSName) if err != nil { return errors.Wrapf(err, "Could not retrieve test namespace %s", checkNSName) } diff --git a/test/e2e/resource-filtering/include_namespaces.go b/test/e2e/resource-filtering/include_namespaces.go index 5783586fa..003c99d99 100644 --- a/test/e2e/resource-filtering/include_namespaces.go +++ b/test/e2e/resource-filtering/include_namespaces.go @@ -105,11 +105,12 @@ func (i *IncludeNamespaces) Init() error { } func (i *IncludeNamespaces) CreateResources() error { - i.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < i.NamespacesTotal; nsNum++ { createNSName := fmt.Sprintf("%s-%00000d", i.NSBaseName, nsNum) fmt.Printf("Creating namespaces ...%s\n", createNSName) - if err := CreateNamespace(i.Ctx, i.Client, createNSName); err != nil { + if err := CreateNamespace(ctx, i.Client, createNSName); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", createNSName) } } @@ -117,10 +118,12 @@ func (i *IncludeNamespaces) CreateResources() error { } func (i *IncludeNamespaces) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() // Verify that we got back all of the namespaces we created for nsNum := 0; nsNum < i.namespacesIncluded; nsNum++ { checkNSName := fmt.Sprintf("%s-%00000d", i.NSBaseName, nsNum) - checkNS, err := GetNamespace(i.Ctx, i.Client, checkNSName) + checkNS, err := GetNamespace(ctx, i.Client, checkNSName) if err != nil { return errors.Wrapf(err, "Could not retrieve test namespace %s", checkNSName) } @@ -131,7 +134,7 @@ func (i *IncludeNamespaces) Verify() error { for nsNum := i.namespacesIncluded; nsNum < i.NamespacesTotal; nsNum++ { excludeNSName := fmt.Sprintf("%s-%00000d", i.NSBaseName, nsNum) - _, err := GetNamespace(i.Ctx, i.Client, excludeNSName) + _, err := GetNamespace(ctx, i.Client, excludeNSName) if err == nil { return errors.Wrapf(err, "Resource filtering with include namespace but exclude namespace %s exist", excludeNSName) } diff --git a/test/e2e/resource-filtering/label_selector.go b/test/e2e/resource-filtering/label_selector.go index 2053daed3..90a4bdb8f 100644 --- a/test/e2e/resource-filtering/label_selector.go +++ b/test/e2e/resource-filtering/label_selector.go @@ -75,7 +75,8 @@ func (l *LabelSelector) Init() error { } func (l *LabelSelector) CreateResources() error { - l.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for nsNum := 0; nsNum < l.NamespacesTotal; nsNum++ { namespace := fmt.Sprintf("%s-%00000d", l.NSBaseName, nsNum) fmt.Printf("Creating resources in namespace ...%s\n", namespace) @@ -85,17 +86,17 @@ func (l *LabelSelector) CreateResources() error { "resourcefiltering": "false", } } - if err := CreateNamespaceWithLabel(l.Ctx, l.Client, namespace, labels); err != nil { + if err := CreateNamespaceWithLabel(ctx, l.Client, namespace, labels); err != nil { return errors.Wrapf(err, "Failed to create namespace %s", namespace) } serviceAccountName := "default" // wait until the service account is created before patch the image pull secret - if err := WaitUntilServiceAccountCreated(l.Ctx, l.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { + if err := WaitUntilServiceAccountCreated(ctx, l.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { return errors.Wrapf(err, "failed to wait the service account %q created under the namespace %q", serviceAccountName, namespace) } // add the image pull secret to avoid the image pull limit issue of Docker Hub - if err := PatchServiceAccountWithImagePullSecret(l.Ctx, l.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { + if err := PatchServiceAccountWithImagePullSecret(ctx, l.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, namespace) } //Create deployment diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index c5b5dfdfc..3d84de30d 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -102,8 +102,8 @@ func (r *ResourcePoliciesCase) Init() error { } func (r *ResourcePoliciesCase) CreateResources() error { - r.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) - + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() By(("Installing storage class..."), func() { Expect(r.installTestStorageClasses(fmt.Sprintf("testdata/storage-class/%s.yaml", VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class") }) @@ -119,7 +119,7 @@ func (r *ResourcePoliciesCase) CreateResources() error { for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ { namespace := fmt.Sprintf("%s-%00000d", r.NSBaseName, nsNum) By(fmt.Sprintf("Create namespaces %s for workload\n", namespace), func() { - Expect(CreateNamespace(r.Ctx, r.Client, namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", namespace)) + Expect(CreateNamespace(ctx, r.Client, namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", namespace)) }) volName := fmt.Sprintf("vol-%s-%00000d", r.NSBaseName, nsNum) @@ -145,12 +145,14 @@ func (r *ResourcePoliciesCase) CreateResources() error { } func (r *ResourcePoliciesCase) Verify() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for i, ns := range *r.NSIncluded { By(fmt.Sprintf("Verify pod data in namespace %s", ns), func() { By(fmt.Sprintf("Waiting for deployment %s in namespace %s ready", r.NSBaseName, ns), func() { Expect(WaitForReadyDeployment(r.Client.ClientGo, ns, r.NSBaseName)).To(Succeed(), fmt.Sprintf("Failed to waiting for deployment %s in namespace %s ready", r.NSBaseName, ns)) }) - podList, err := ListPods(r.Ctx, r.Client, ns) + podList, err := ListPods(ctx, r.Client, ns) Expect(err).To(Succeed(), fmt.Sprintf("failed to list pods in namespace: %q with error %v", ns, err)) volName := fmt.Sprintf("vol-%s-%00000d", r.NSBaseName, i) @@ -159,7 +161,7 @@ func (r *ResourcePoliciesCase) Verify() error { if vol.Name != volName { continue } - content, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) + content, err := ReadFileFromPodVolume(ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) if i%2 == 0 { Expect(err).To(HaveOccurred(), "Expected file not found") // File should not exist } else { @@ -228,7 +230,9 @@ func (r *ResourcePoliciesCase) createDeploymentWithVolume(namespace string, volL } func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) error { - podList, err := ListPods(r.Ctx, r.Client, namespace) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() + podList, err := ListPods(ctx, r.Client, namespace) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to list pods in namespace: %q with error %v", namespace, err)) } @@ -237,7 +241,7 @@ func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) erro if vol.Name != volName { continue } - err := CreateFileToPod(r.Ctx, namespace, pod.Name, "container-busybox", vol.Name, FileName, fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, pod.Name, vol.Name)) + err := CreateFileToPod(ctx, namespace, pod.Name, "container-busybox", vol.Name, FileName, fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, pod.Name, vol.Name)) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to create file into pod %s in namespace: %q", pod.Name, namespace)) } @@ -247,8 +251,10 @@ func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) erro } func (r *ResourcePoliciesCase) deleteTestStorageClassList(scList []string) error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() for _, v := range scList { - if err := DeleteStorageClass(r.Ctx, r.Client, v); err != nil { + if err := DeleteStorageClass(ctx, r.Client, v); err != nil { return err } } @@ -256,7 +262,9 @@ func (r *ResourcePoliciesCase) deleteTestStorageClassList(scList []string) error } func (r *ResourcePoliciesCase) installTestStorageClasses(path string) error { - err := InstallStorageClass(r.Ctx, path) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() + err := InstallStorageClass(ctx, path) if err != nil { return err } @@ -277,5 +285,5 @@ func (r *ResourcePoliciesCase) installTestStorageClasses(path string) error { if _, err := tmpFile.WriteString(newContent); err != nil { return errors.Wrapf(err, "failed to write content into temp file %s when install storage class", tmpFile.Name()) } - return InstallStorageClass(r.Ctx, tmpFile.Name()) + return InstallStorageClass(ctx, tmpFile.Name()) } diff --git a/test/e2e/schedule/ordered_resources.go b/test/e2e/schedule/ordered_resources.go index f220d9d44..8be393c17 100644 --- a/test/e2e/schedule/ordered_resources.go +++ b/test/e2e/schedule/ordered_resources.go @@ -64,13 +64,15 @@ func ScheduleOrderedResources() { }) It("Create a schedule to backup resources in a specific order should be successful", func() { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() test := &OrderedResources{} test.VeleroCfg = VeleroCfg err := test.Init() Expect(err).To(Succeed(), err) defer func() { - Expect(DeleteNamespace(test.Ctx, test.Client, test.Namespace, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.Namespace)) - err = VeleroScheduleDelete(test.Ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.ScheduleName) + Expect(DeleteNamespace(ctx, test.Client, test.Namespace, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.Namespace)) + err = VeleroScheduleDelete(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.ScheduleName) Expect(err).To(Succeed(), fmt.Sprintf("Failed to delete schedule with err %v", err)) err = test.DeleteBackups() Expect(err).To(Succeed(), fmt.Sprintf("Failed to delete backups with err %v", err)) @@ -82,24 +84,24 @@ func ScheduleOrderedResources() { }) By(fmt.Sprintf("Create schedule the workload in %s namespace", test.Namespace), func() { - err = VeleroScheduleCreate(test.Ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.ScheduleName, test.ScheduleArgs) + err = VeleroScheduleCreate(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.ScheduleName, test.ScheduleArgs) Expect(err).To(Succeed(), fmt.Sprintf("Failed to create schedule %s with err %v", test.ScheduleName, err)) }) By(fmt.Sprintf("Checking resource order in %s schedule cr", test.ScheduleName), func() { - err = CheckScheduleWithResourceOrder(test.Ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.ScheduleName, test.OrderMap) + err = CheckScheduleWithResourceOrder(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, test.ScheduleName, test.OrderMap) Expect(err).To(Succeed(), fmt.Sprintf("Failed to check schedule %s with err %v", test.ScheduleName, err)) }) By("Checking resource order in backup cr", func() { backupList := new(velerov1api.BackupList) err = waitutil.PollImmediate(10*time.Second, time.Minute*5, func() (bool, error) { - if err = test.Client.Kubebuilder.List(test.Ctx, backupList, &kbclient.ListOptions{Namespace: veleroCfg.VeleroNamespace}); err != nil { + if err = test.Client.Kubebuilder.List(ctx, backupList, &kbclient.ListOptions{Namespace: veleroCfg.VeleroNamespace}); err != nil { return false, fmt.Errorf("failed to list backup object in %s namespace with err %v", veleroCfg.VeleroNamespace, err) } for _, backup := range backupList.Items { - if err = CheckBackupWithResourceOrder(test.Ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, backup.Name, test.OrderMap); err == nil { + if err = CheckBackupWithResourceOrder(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, backup.Name, test.OrderMap); err == nil { return true, nil } } @@ -139,21 +141,22 @@ func (o *OrderedResources) Init() error { func (o *OrderedResources) CreateResources() error { veleroCfg := o.VeleroCfg - o.Ctx, _ = context.WithTimeout(context.Background(), 5*time.Minute) + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() label := map[string]string{ "orderedresources": "true", } fmt.Printf("Creating resources in %s namespace ...\n", o.Namespace) - if err := CreateNamespace(o.Ctx, o.Client, o.Namespace); err != nil { + if err := CreateNamespace(ctx, o.Client, o.Namespace); err != nil { return errors.Wrapf(err, "failed to create namespace %s", o.Namespace) } serviceAccountName := "default" // wait until the service account is created before patch the image pull secret - if err := WaitUntilServiceAccountCreated(o.Ctx, o.Client, o.Namespace, serviceAccountName, 10*time.Minute); err != nil { + if err := WaitUntilServiceAccountCreated(ctx, o.Client, o.Namespace, serviceAccountName, 10*time.Minute); err != nil { return errors.Wrapf(err, "failed to wait the service account %q created under the namespace %q", serviceAccountName, o.Namespace) } // add the image pull secret to avoid the image pull limit issue of Docker Hub - if err := PatchServiceAccountWithImagePullSecret(o.Ctx, o.Client, o.Namespace, serviceAccountName, veleroCfg.RegistryCredentialFile); err != nil { + if err := PatchServiceAccountWithImagePullSecret(ctx, o.Client, o.Namespace, serviceAccountName, veleroCfg.RegistryCredentialFile); err != nil { return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, o.Namespace) } //Create deployment @@ -194,13 +197,15 @@ func (o *OrderedResources) CreateResources() error { } func (o *OrderedResources) DeleteBackups() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() veleroCfg := o.VeleroCfg backupList := new(velerov1api.BackupList) - if err := o.Client.Kubebuilder.List(o.Ctx, backupList, &kbclient.ListOptions{Namespace: veleroCfg.VeleroNamespace}); err != nil { + if err := o.Client.Kubebuilder.List(ctx, backupList, &kbclient.ListOptions{Namespace: veleroCfg.VeleroNamespace}); err != nil { return fmt.Errorf("failed to list backup object in %s namespace with err %v", veleroCfg.VeleroNamespace, err) } for _, backup := range backupList.Items { - if err := VeleroBackupDelete(o.Ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, backup.Name); err != nil { + if err := VeleroBackupDelete(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, backup.Name); err != nil { return err } } diff --git a/test/e2e/schedule/schedule-backup-creation.go b/test/e2e/schedule/schedule-backup-creation.go index 8ab5d8e51..3e26149b7 100644 --- a/test/e2e/schedule/schedule-backup-creation.go +++ b/test/e2e/schedule/schedule-backup-creation.go @@ -69,7 +69,6 @@ func (n *ScheduleBackupCreation) StartRun() error { return nil } func (p *ScheduleBackupCreation) CreateResources() error { - p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) By(fmt.Sprintf("Create namespace %s", p.namespace), func() { Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", p.namespace)) @@ -85,6 +84,8 @@ func (p *ScheduleBackupCreation) CreateResources() error { } func (n *ScheduleBackupCreation) Backup() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() // Wait until the beginning of the given period to create schedule, it will give us // a predictable period to wait for the first scheduled backup, and verify no immediate // scheduled backup was created between schedule creation and first scheduled backup. @@ -94,7 +95,7 @@ func (n *ScheduleBackupCreation) Backup() error { now := time.Now().Minute() triggerNow := now % n.Period if triggerNow == 0 { - Expect(VeleroScheduleCreate(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string { + Expect(VeleroScheduleCreate(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "") return "Fail to restore workload" }) diff --git a/test/e2e/schedule/schedule.go b/test/e2e/schedule/schedule.go index de3e06b11..bcd7e1f91 100644 --- a/test/e2e/schedule/schedule.go +++ b/test/e2e/schedule/schedule.go @@ -53,10 +53,11 @@ func (n *ScheduleBackup) StartRun() error { return nil } func (n *ScheduleBackup) CreateResources() error { - n.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + ctx, ctxCanel := context.WithTimeout(context.Background(), 60*time.Minute) + defer ctxCanel() for _, ns := range *n.NSIncluded { By(fmt.Sprintf("Creating namespaces %s ......\n", ns), func() { - Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) + Expect(CreateNamespace(ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) }) configmaptName := n.NSBaseName fmt.Printf("Creating configmap %s in namespaces ...%s\n", configmaptName, ns) @@ -69,6 +70,8 @@ func (n *ScheduleBackup) CreateResources() error { } func (n *ScheduleBackup) Backup() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() // Wait until the beginning of the given period to create schedule, it will give us // a predictable period to wait for the first scheduled backup, and verify no immediate // scheduled backup was created between schedule creation and first scheduled backup. @@ -78,7 +81,7 @@ func (n *ScheduleBackup) Backup() error { now := time.Now().Minute() triggerNow := now % n.Period if triggerNow == 0 { - Expect(VeleroScheduleCreate(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string { + Expect(VeleroScheduleCreate(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "") return "Fail to restore workload" }) @@ -89,6 +92,8 @@ func (n *ScheduleBackup) Backup() error { return nil } func (n *ScheduleBackup) Destroy() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Schedule %s is created without any delay\n", n.ScheduleName), func() { creationTimestamp, err := GetSchedule(context.Background(), VeleroCfg.VeleroNamespace, n.ScheduleName) Expect(err).To(Succeed()) @@ -144,7 +149,7 @@ func (n *ScheduleBackup) Destroy() error { n.BackupName = strings.Replace(n.randBackupName, " ", "", -1) By("Delete all namespaces", func() { - Expect(CleanupNamespacesWithPoll(n.Ctx, n.Client, n.NSBaseName)).To(Succeed(), "Could cleanup retrieve namespaces") + Expect(CleanupNamespacesWithPoll(ctx, n.Client, n.NSBaseName)).To(Succeed(), "Could cleanup retrieve namespaces") }) n.RestoreArgs = []string{ @@ -159,7 +164,7 @@ func (n *ScheduleBackup) Destroy() error { backupCount := len(backupsInfo) By(fmt.Sprintf("Pause schedule %s ......\n", n.ScheduleName), func() { - Expect(VeleroSchedulePause(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string { + Expect(VeleroSchedulePause(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "") return "Fail to restore workload" }) @@ -182,7 +187,7 @@ func (n *ScheduleBackup) Destroy() error { }) By(fmt.Sprintf("Unpause schedule %s ......\n", n.ScheduleName), func() { - Expect(VeleroScheduleUnpause(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string { + Expect(VeleroScheduleUnpause(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "") return "Fail to unpause schedule" }) diff --git a/test/e2e/test/test.go b/test/e2e/test/test.go index b0b4d7ac8..20c90dbbd 100644 --- a/test/e2e/test/test.go +++ b/test/e2e/test/test.go @@ -68,7 +68,6 @@ type TestCase struct { NamespacesTotal int TestMsg *TestMSG Client TestClient - Ctx context.Context NSIncluded *[]string UseVolumeSnapshots bool VeleroCfg VeleroConfig @@ -159,8 +158,10 @@ func (t *TestCase) StartRun() error { } func (t *TestCase) Backup() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() veleroCfg := t.GetTestCase().VeleroCfg - if err := VeleroBackupExec(t.Ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, t.BackupName, t.BackupArgs); err != nil { + if err := VeleroBackupExec(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, t.BackupName, t.BackupArgs); err != nil { RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, t.BackupName, "") return errors.Wrapf(err, "Failed to backup resources") } @@ -168,13 +169,17 @@ func (t *TestCase) Backup() error { } func (t *TestCase) Destroy() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() By(fmt.Sprintf("Start to destroy namespace %s......", t.NSBaseName), func() { - Expect(CleanupNamespacesWithPoll(t.Ctx, t.Client, t.NSBaseName)).To(Succeed(), "Could cleanup retrieve namespaces") + Expect(CleanupNamespacesWithPoll(ctx, t.Client, t.NSBaseName)).To(Succeed(), "Could cleanup retrieve namespaces") }) return nil } func (t *TestCase) Restore() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() veleroCfg := t.GetTestCase().VeleroCfg // the snapshots of AWS may be still in pending status when do the restore, wait for a while // to avoid this https://github.com/vmware-tanzu/velero/issues/1799 @@ -188,7 +193,7 @@ func (t *TestCase) Restore() error { if t.RestorePhaseExpect == "" { t.RestorePhaseExpect = velerov1api.RestorePhaseCompleted } - Expect(VeleroRestoreExec(t.Ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, t.RestoreName, t.RestoreArgs, t.RestorePhaseExpect)).To(Succeed(), func() string { + Expect(VeleroRestoreExec(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, t.RestoreName, t.RestoreArgs, t.RestorePhaseExpect)).To(Succeed(), func() string { RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, "", t.RestoreName) return "Fail to restore workload" }) @@ -201,13 +206,15 @@ func (t *TestCase) Verify() error { } func (t *TestCase) Clean() error { + ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer ctxCancel() veleroCfg := t.GetTestCase().VeleroCfg if !veleroCfg.Debug { By(fmt.Sprintf("Clean namespace with prefix %s after test", t.NSBaseName), func() { - CleanupNamespaces(t.Ctx, t.Client, t.NSBaseName) + CleanupNamespaces(ctx, t.Client, t.NSBaseName) }) By("Clean backups after test", func() { - DeleteBackups(t.Ctx, t.Client) + DeleteBackups(ctx, t.Client) }) } return nil @@ -225,7 +232,6 @@ func RunTestCase(test VeleroBackupRestoreTest) error { if test == nil { return errors.New("No case should be tested") } - defer test.Clean() err := test.StartRun() if err != nil { From b7d1c3e679d8e0d625fc843999abc97472612edb Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 17 Apr 2023 10:22:50 -0400 Subject: [PATCH 06/10] golangci.yaml: comment out check-shawdowing, remove vet script Per comment: https://github.com/vmware-tanzu/velero/pull/6154#issuecomment-1510946594 Signed-off-by: Tiger Kaovilai --- golangci.yaml | 5 +++-- hack/verify-vet.sh | 17 ----------------- 2 files changed, 3 insertions(+), 19 deletions(-) delete mode 100755 hack/verify-vet.sh diff --git a/golangci.yaml b/golangci.yaml index 4422a3a3c..28cd9ce7f 100644 --- a/golangci.yaml +++ b/golangci.yaml @@ -181,7 +181,7 @@ linters-settings: # reason: "testing if blocked version constraint works." # Reason why the version constraint exists. (Optional) govet: # report about shadowed variables - check-shadowing: true + # check-shadowing: true # settings per analyzer settings: @@ -296,6 +296,7 @@ linters: - goheader - goimports - gosec + - govet - misspell - typecheck - unparam @@ -340,4 +341,4 @@ severity: rules: - linters: - dupl - severity: info + severity: info \ No newline at end of file diff --git a/hack/verify-vet.sh b/hack/verify-vet.sh deleted file mode 100755 index ba0c242fe..000000000 --- a/hack/verify-vet.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# -# Copyright 2023 the Velero contributors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -go vet -mod=mod ./... || (echo "go vet is not happy, fix the errors above" && exit 1) From 291149732c20b40a76ff27c8cc3ba44fb3a617ac Mon Sep 17 00:00:00 2001 From: Orlix <7236111+OrlinVasilev@users.noreply.github.com> Date: Mon, 24 Apr 2023 20:18:32 +0200 Subject: [PATCH 07/10] Algolia fix docs search (#6105) * wip:Algolia fix 2 Signed-off-by: OrlinVasilev * wip:Algolia fix 2 Signed-off-by: OrlinVasilev * Algolia fix 2 Signed-off-by: OrlinVasilev * Update site/layouts/partials/head-docs.html Co-authored-by: Tiger Kaovilai Signed-off-by: Orlix <7236111+OrlinVasilev@users.noreply.github.com> --------- Signed-off-by: OrlinVasilev Signed-off-by: Orlix <7236111+OrlinVasilev@users.noreply.github.com> Co-authored-by: Tiger Kaovilai --- site/algolia-crawler.json | 90 ++++++++++++++++++++++++++++ site/layouts/docs/docs.html | 31 +++++----- site/layouts/partials/head-docs.html | 2 + 3 files changed, 109 insertions(+), 14 deletions(-) create mode 100644 site/algolia-crawler.json diff --git a/site/algolia-crawler.json b/site/algolia-crawler.json new file mode 100644 index 000000000..06dc083d7 --- /dev/null +++ b/site/algolia-crawler.json @@ -0,0 +1,90 @@ +new Crawler({ + rateLimit: 8, + maxDepth: 10, + startUrls: ["https://velero.io/docs", "https://velero.io/"], + renderJavaScript: false, + sitemaps: ["https://velero.io/sitemap.xml"], + ignoreCanonicalTo: false, + discoveryPatterns: ["https://velero.io/**"], + schedule: "at 6:39 PM on Friday", + actions: [ + { + indexName: "velero_new", + pathsToMatch: ["https://velero.io/docs**/**"], + recordExtractor: ({ helpers }) => { + return helpers.docsearch({ + recordProps: { + lvl1: ["header h1", "article h1", "main h1", "h1", "head > title"], + content: ["article p, article li", "main p, main li", "p, li"], + lvl0: { + defaultValue: "Documentation", + }, + lvl2: ["article h2", "main h2", "h2"], + lvl3: ["article h3", "main h3", "h3"], + lvl4: ["article h4", "main h4", "h4"], + lvl5: ["article h5", "main h5", "h5"], + lvl6: ["article h6", "main h6", "h6"], + version: "#dropdownMenuButton", + }, + aggregateContent: true, + recordVersion: "v3", + }); + }, + }, + ], + initialIndexSettings: { + velero_new: { + attributesForFaceting: ["type", "lang", "version"], + attributesToRetrieve: [ + "hierarchy", + "content", + "anchor", + "url", + "url_without_anchor", + "type", + "version", + ], + attributesToHighlight: ["hierarchy", "content"], + attributesToSnippet: ["content:10"], + camelCaseAttributes: ["hierarchy", "content"], + searchableAttributes: [ + "unordered(hierarchy.lvl0)", + "unordered(hierarchy.lvl1)", + "unordered(hierarchy.lvl2)", + "unordered(hierarchy.lvl3)", + "unordered(hierarchy.lvl4)", + "unordered(hierarchy.lvl5)", + "unordered(hierarchy.lvl6)", + "content", + ], + distinct: true, + attributeForDistinct: "url", + customRanking: [ + "desc(weight.pageRank)", + "desc(weight.level)", + "asc(weight.position)", + ], + ranking: [ + "words", + "filters", + "typo", + "attribute", + "proximity", + "exact", + "custom", + ], + highlightPreTag: '', + highlightPostTag: "", + minWordSizefor1Typo: 3, + minWordSizefor2Typos: 7, + allowTyposOnNumericTokens: false, + minProximity: 1, + ignorePlurals: true, + advancedSyntax: true, + attributeCriteriaComputedByMinProximity: true, + removeWordsIfNoResults: "allOptional", + }, + }, + appId: "9ASKQJ1HR3", + apiKey: "6392a5916af73b73df2406d3aef5ca45", +}); \ No newline at end of file diff --git a/site/layouts/docs/docs.html b/site/layouts/docs/docs.html index 938ce660d..6d2a3f57f 100644 --- a/site/layouts/docs/docs.html +++ b/site/layouts/docs/docs.html @@ -27,14 +27,16 @@
{{ .Render "versions" }}
-
- - - -
+
+ +
{{ .Render "nav" }}
@@ -56,13 +58,14 @@ {{ .Render "footer" }}
- + diff --git a/site/layouts/partials/head-docs.html b/site/layouts/partials/head-docs.html index c92837b2f..5ebae8c24 100644 --- a/site/layouts/partials/head-docs.html +++ b/site/layouts/partials/head-docs.html @@ -8,4 +8,6 @@ {{ $styles := resources.Get "styles.scss" | toCSS $options | resources.Fingerprint }} {{/* TODO {% seo %}*/}} + + From b38ee8ad41453248c44ffd971d54f7ba115936e8 Mon Sep 17 00:00:00 2001 From: qiuming Date: Tue, 25 Apr 2023 05:38:30 +0800 Subject: [PATCH 08/10] Optimize APIGroup E2E test case (#6174) Signed-off-by: Ming --- .../api-group/enable_api_group_extentions.go | 157 +++++ .../api-group/enable_api_group_versions.go | 544 ++++++++++++++++ test/e2e/basic/enable_api_group_versions.go | 591 ------------------ test/e2e/e2e_suite_test.go | 1 + test/e2e/util/k8s/crd.go | 85 +++ 5 files changed, 787 insertions(+), 591 deletions(-) create mode 100644 test/e2e/basic/api-group/enable_api_group_extentions.go create mode 100644 test/e2e/basic/api-group/enable_api_group_versions.go delete mode 100644 test/e2e/basic/enable_api_group_versions.go create mode 100644 test/e2e/util/k8s/crd.go diff --git a/test/e2e/basic/api-group/enable_api_group_extentions.go b/test/e2e/basic/api-group/enable_api_group_extentions.go new file mode 100644 index 000000000..a3d6a3b88 --- /dev/null +++ b/test/e2e/basic/api-group/enable_api_group_extentions.go @@ -0,0 +1,157 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package basic + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/vmware-tanzu/velero/test/e2e" + . "github.com/vmware-tanzu/velero/test/e2e/util/k8s" + . "github.com/vmware-tanzu/velero/test/e2e/util/velero" +) + +func APIExtensionsVersionsTest() { + var ( + backupName, restoreName string + ) + + resourceName := "apiextensions.k8s.io" + crdName := "rocknrollbands.music.example.io" + label := "for=backup" + srcCrdYaml := "testdata/enable_api_group_versions/case-a-source-v1beta1.yaml" + BeforeEach(func() { + if veleroCfg.DefaultCluster == "" && veleroCfg.StandbyCluster == "" { + Skip("CRD with apiextension versions migration test needs 2 clusters") + } + veleroCfg = VeleroCfg + Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) + srcVersions, err := GetAPIVersions(veleroCfg.DefaultClient, resourceName) + Expect(err).ShouldNot(HaveOccurred()) + dstVersions, err := GetAPIVersions(veleroCfg.StandbyClient, resourceName) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(srcVersions).Should(ContainElement("v1"), func() string { + Skip("CRD with apiextension versions srcVersions should have v1") + return "" + }) + Expect(srcVersions).Should(ContainElement("v1beta1"), func() string { + Skip("CRD with apiextension versions srcVersions should have v1") + return "" + }) + Expect(dstVersions).Should(ContainElement("v1"), func() string { + Skip("CRD with apiextension versions dstVersions should have v1") + return "" + }) + Expect(len(srcVersions) > 1 && len(dstVersions) == 1).Should(Equal(true), func() string { + Skip("Source cluster should support apiextension v1 and v1beta1, destination cluster should only support apiextension v1") + return "" + }) + }) + AfterEach(func() { + if !veleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *veleroCfg.DefaultClient) + }) + if veleroCfg.InstallVelero { + By("Uninstall Velero and delete CRD ", func() { + Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) + Expect(VeleroUninstall(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(DeleteCRDByName(context.Background(), crdName)).To(Succeed()) + + Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyCluster)).To(Succeed()) + Expect(VeleroUninstall(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(DeleteCRDByName(context.Background(), crdName)).To(Succeed()) + }) + } + By(fmt.Sprintf("Switch to default kubeconfig context %s", veleroCfg.DefaultCluster), func() { + Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) + veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient + }) + } + + }) + Context("When EnableAPIGroupVersions flag is set", func() { + It("Enable API Group to B/R CRD APIExtensionsVersions", func() { + backupName = "backup-" + UUIDgen.String() + restoreName = "restore-" + UUIDgen.String() + + By(fmt.Sprintf("Install Velero in cluster-A (%s) to backup workload", veleroCfg.DefaultCluster), func() { + Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) + veleroCfg.Features = "EnableAPIGroupVersions" + veleroCfg.UseVolumeSnapshots = false + Expect(VeleroInstall(context.Background(), &veleroCfg)).To(Succeed()) + }) + + By(fmt.Sprintf("Install CRD of apiextenstions v1beta1 in cluster-A (%s)", veleroCfg.DefaultCluster), func() { + Expect(InstallCRD(context.Background(), srcCrdYaml)).To(Succeed()) + Expect(CRDShouldExist(context.Background(), crdName)).To(Succeed()) + Expect(WaitForCRDEstablished(crdName)).To(Succeed()) + Expect(AddLabelToCRD(context.Background(), crdName, label)).To(Succeed()) + // Velero server refresh api version data by discovery helper every 5 minutes + time.Sleep(6 * time.Minute) + }) + + By("Backup CRD", func() { + var BackupCfg BackupConfig + BackupCfg.BackupName = backupName + BackupCfg.IncludeResources = "crd" + BackupCfg.IncludeClusterResources = true + BackupCfg.Selector = label + Expect(VeleroBackupNamespace(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, backupName, "") + return "Fail to backup workload" + }) + }) + + By(fmt.Sprintf("Install Velero in cluster-B (%s) to restore workload", veleroCfg.StandbyCluster), func() { + Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyCluster)).To(Succeed()) + veleroCfg.ClientToInstallVelero = veleroCfg.StandbyClient + Expect(VeleroInstall(context.Background(), &veleroCfg)).To(Succeed()) + }) + + By(fmt.Sprintf("Waiting for backups sync to Velero in cluster-B (%s)", veleroCfg.StandbyCluster), func() { + Expect(WaitForBackupToBeCreated(context.Background(), veleroCfg.VeleroCLI, backupName, 5*time.Minute)).To(Succeed()) + }) + + By(fmt.Sprintf("CRD %s should not exist in cluster-B (%s)", crdName, veleroCfg.StandbyCluster), func() { + Expect(CRDShouldNotExist(context.Background(), crdName)).To(Succeed(), "Error: CRD already exists in cluster B, clean it and re-run test") + }) + + By("Restore CRD", func() { + Expect(VeleroRestore(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, restoreName, backupName, "")).To(Succeed(), func() string { + RunDebug(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, "", restoreName) + return "Fail to restore workload" + }) + }) + + By("Verify CRD restore ", func() { + Expect(CRDShouldExist(context.Background(), crdName)).To(Succeed()) + }) + }) + }) +} diff --git a/test/e2e/basic/api-group/enable_api_group_versions.go b/test/e2e/basic/api-group/enable_api_group_versions.go new file mode 100644 index 000000000..cc6290546 --- /dev/null +++ b/test/e2e/basic/api-group/enable_api_group_versions.go @@ -0,0 +1,544 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package basic + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/google/uuid" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/builder" + veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec" + . "github.com/vmware-tanzu/velero/test/e2e" + . "github.com/vmware-tanzu/velero/test/e2e/util/k8s" + . "github.com/vmware-tanzu/velero/test/e2e/util/velero" +) + +var veleroCfg VeleroConfig + +type apiGropuVersionsTest struct { + name string + namespaces []string + srcCrdYaml string + srcCRs map[string]string + tgtCrdYaml string + tgtVer string + cm *corev1api.ConfigMap + gvs map[string][]string + want map[string]map[string]string +} + +func APIGropuVersionsTest() { + var ( + group string + err error + ctx = context.Background() + testCaseNum int + ) + + BeforeEach(func() { + veleroCfg = VeleroCfg + group = "music.example.io" + UUIDgen, err = uuid.NewRandom() + Expect(err).NotTo(HaveOccurred()) + flag.Parse() + // TODO: install Velero once for the test suite once feature flag is + // removed and velero installation becomes the same as other e2e tests. + if veleroCfg.InstallVelero { + veleroCfg.Features = "EnableAPIGroupVersions" + veleroCfg.UseVolumeSnapshots = false + err = VeleroInstall(context.Background(), &veleroCfg) + Expect(err).NotTo(HaveOccurred()) + } + testCaseNum = 4 + }) + + AfterEach(func() { + if !veleroCfg.Debug { + for i := 0; i < testCaseNum; i++ { + curResource := fmt.Sprintf("rockband%ds", i) + curGroup := fmt.Sprintf("%s.%d", group, i) + By(fmt.Sprintf("Clean up resource: kubectl delete crd %s.%s\n", curResource, curGroup)) + cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", curResource+"."+curGroup) + _, stderr, err := veleroexec.RunCommand(cmd) + if strings.Contains(stderr, "NotFound") { + fmt.Printf("Ignore error: %v\n", stderr) + err = nil + } + Expect(err).NotTo(HaveOccurred()) + } + + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *veleroCfg.ClientToInstallVelero) + }) + if veleroCfg.InstallVelero { + By("Uninstall Velero", func() { + Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).NotTo(HaveOccurred()) + }) + } + } + }) + + Context("When EnableAPIGroupVersions flag is set", func() { + It("Should back up API group version and restore by version priority", func() { + Expect(runEnableAPIGroupVersionsTests( + ctx, + *veleroCfg.ClientToInstallVelero, + group, + )).To(Succeed(), "Failed to successfully backup and restore multiple API Groups") + }) + }) +} + +func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, group string) error { + tests := []apiGropuVersionsTest{ + { + name: "Target and source cluster preferred versions match; Preferred version v1 is restored (Priority 1, Case A).", + srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml", + srcCRs: map[string]string{ + "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", + "v1alpha1": "testdata/enable_api_group_versions/music_v1alpha1_rockband.yaml", + }, + tgtCrdYaml: "testdata/enable_api_group_versions/case-a-target.yaml", + tgtVer: "v1", + cm: nil, + want: map[string]map[string]string{ + "annotations": { + "rockband0s.music.example.io.0/originalVersion": "v1", + }, + "specs": { + "genre": "60s rock", + }, + }, + }, + { + name: "Latest common non-preferred supported version v2beta2 is restored (Priority 3, Case D).", + srcCrdYaml: "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml", + srcCRs: map[string]string{ + "v2beta2": "testdata/enable_api_group_versions/music_v2beta2_rockband.yaml", + "v2beta1": "testdata/enable_api_group_versions/music_v2beta1_rockband.yaml", + "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", + }, + tgtCrdYaml: "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml", + tgtVer: "v2beta2", + cm: nil, + want: map[string]map[string]string{ + "annotations": { + "rockband1s.music.example.io.1/originalVersion": "v2beta2", + }, + "specs": { + "genre": "60s rock", + }, + }, + }, + { + name: "No common supported versions means no rockbands custom resource is restored.", + srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml", + srcCRs: map[string]string{ + "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", + "v1alpha1": "testdata/enable_api_group_versions/music_v1alpha1_rockband.yaml", + }, + tgtCrdYaml: "testdata/enable_api_group_versions/case-b-target-manually-added-mutations.yaml", + tgtVer: "", + cm: nil, + want: nil, + }, + { + name: "User config map overrides Priority 3, Case D and restores v2beta1", + srcCrdYaml: "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml", + srcCRs: map[string]string{ + "v2beta2": "testdata/enable_api_group_versions/music_v2beta2_rockband.yaml", + "v2beta1": "testdata/enable_api_group_versions/music_v2beta1_rockband.yaml", + "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", + }, + tgtCrdYaml: "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml", + tgtVer: "v2beta1", + cm: builder.ForConfigMap(veleroCfg.VeleroNamespace, "enableapigroupversions").Data( + "restoreResourcesVersionPriority", + `rockband3s.music.example.io.3=v2beta1,v2beta2,v2`, + ).Result(), + want: map[string]map[string]string{ + "annotations": { + "rockband3s.music.example.io.3/originalVersion": "v2beta1", + }, + "specs": { + "genre": "60s rock", + }, + }, + }, + { + name: "Restore successful when CRD doesn't (yet) exist in target", + srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml", + srcCRs: map[string]string{ + "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", + }, + tgtCrdYaml: "", + tgtVer: "v1", + cm: nil, + want: map[string]map[string]string{ + "annotations": { + "rockband4s.music.example.io.4/originalVersion": "v1", + }, + "specs": { + "genre": "60s rock", + }, + }, + }, + } + + nsListwanted, nsListUnwanted, err := installTestResources(ctx, client, group, tests) + Expect(err).NotTo(HaveOccurred()) + + for i, tc := range tests { + for version := range tc.srcCRs { + ns := fmt.Sprintf("rockband%ds-src-%s-%d", i, version, i) + defer func(namespace string) { + if err = DeleteNamespace(ctx, client, namespace, true); err != nil { + fmt.Println(errors.Wrapf(err, "failed to delete the namespace %q", ns)) + } + }(ns) + } + if tc.cm != nil { + defer func(name string) { + if err = client.ClientGo.CoreV1().ConfigMaps(veleroCfg.VeleroNamespace).Delete(ctx, name, metav1.DeleteOptions{}); err != nil { + fmt.Println(errors.Wrapf(err, "failed to delete configmap %q", name)) + } + }(tc.cm.Name) + } + + defer func(crdName string) { + if err = DeleteCRDByName(ctx, crdName); err != nil { + fmt.Println(errors.Wrapf(err, "failed to delete crd %q", crdName)) + } + }(fmt.Sprintf("rockband%ds.music.example.io.%d", i, i)) + + } + + time.Sleep(6 * time.Minute) + + BackupCfgWanted := BackupConfig{ + BackupName: "backup-rockbands-" + UUIDgen.String() + "-wanted", + Namespace: nsListwanted, + UseVolumeSnapshots: false, + } + + Expect(VeleroBackupNamespace(ctx, veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, BackupCfgWanted)).To(Succeed(), func() string { + RunDebug(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, BackupCfgWanted.BackupName, "") + return "Fail to backup workload" + }) + + BackupCfgUnwanted := BackupConfig{ + BackupName: "backup-rockbands-" + UUIDgen.String() + "-unwanted", + Namespace: nsListUnwanted, + UseVolumeSnapshots: false, + } + + Expect(VeleroBackupNamespace(ctx, veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, BackupCfgUnwanted)).To(Succeed(), func() string { + RunDebug(context.Background(), veleroCfg.VeleroCLI, + veleroCfg.VeleroNamespace, BackupCfgUnwanted.BackupName, "") + return "Fail to backup workload" + }) + + Expect(reinstallTestResources(ctx, group, client, tests)).NotTo(HaveOccurred()) + + time.Sleep(6 * time.Minute) + + restoreName := "restore-rockbands-" + UUIDgen.String() + "-wanted" + if err := VeleroRestore(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, restoreName, BackupCfgWanted.BackupName, ""); err != nil { + RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, "", restoreName) + return errors.Wrapf(err, "restore %s namespaces on target cluster", nsListwanted) + } + + restoreName = "restore-rockbands-" + UUIDgen.String() + "-unwanted" + err = VeleroRestore(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, restoreName, BackupCfgUnwanted.BackupName, "") + if !strings.Contains(err.Error(), "Unexpected restore phase got PartiallyFailed, expecting Completed") { + return errors.New("expected error but not none") + } + + for i, tc := range tests { + defer func() { + _ = deleteTestCRD(ctx, i, group, tc.srcCrdYaml) + if tc.tgtCrdYaml != "" { + _ = deleteTestCRD(ctx, i, group, tc.tgtCrdYaml) + } + if tc.cm != nil { + client.ClientGo.CoreV1().ConfigMaps(veleroCfg.VeleroNamespace).Delete(ctx, tc.cm.Name, metav1.DeleteOptions{}) + } + }() + + if tc.want != nil { + curResource := fmt.Sprintf("rockband%ds", i) + annoSpec, err := resourceInfo(ctx, group, tc.tgtVer, curResource, i) + if err != nil { + return errors.Wrapf( + err, + "get annotation and spec from %s.%s/%s object", + curResource, + group, + tc.tgtVer, + ) + } + + // Assertion + if !containsAll(annoSpec["annotations"], tc.want["annotations"]) { + msg := fmt.Sprintf( + "actual annotations: %v, expected annotations: %v", + annoSpec["annotations"], + tc.want["annotations"], + ) + return errors.New(msg) + } + + // Assertion + if !containsAll(annoSpec["specs"], tc.want["specs"]) { + msg := fmt.Sprintf( + "actual specs: %v, expected specs: %v", + annoSpec["specs"], + tc.want["specs"], + ) + return errors.New(msg) + } + + } + } + + return nil +} + +func deleteTestCRD(ctx context.Context, index int, group, path string) error { + fileName, err := rerenderTestYaml(index, group, path) + defer func() { + if fileName != "" { + os.Remove(fileName) + } + }() + if err != nil { + return err + } + return DeleteCRD(ctx, fileName) +} + +func installTestCR(ctx context.Context, index int, group, path, ns string) error { + fileName, err := rerenderTestYaml(index, group, path) + defer func() { + if fileName != "" { + os.Remove(fileName) + } + }() + if err != nil { + return err + } + return InstallCR(ctx, fileName, ns) +} + +func installTestCRD(ctx context.Context, index int, group, path string) error { + fileName, err := rerenderTestYaml(index, group, path) + defer func() { + if fileName != "" { + os.Remove(fileName) + } + }() + if err != nil { + return err + } + return InstallCRD(ctx, fileName) +} + +func rerenderTestYaml(index int, group, path string) (string, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return "", errors.Wrapf(err, "failed to get %s when install test yaml", path) + } + + // replace resource name to new value + re := regexp.MustCompile(`\b(RockBand|RockBandList|rockband|rockbands)\b`) + newContent := re.ReplaceAllStringFunc(string(content), func(s string) string { + if s == "RockBand" { + return fmt.Sprintf("RockBand%d", index) + } else if s == "RockBandList" { + return fmt.Sprintf("RockBand%dList", index) + } else if s == "rockbands" { + return fmt.Sprintf("rockband%ds", index) + } else { + return fmt.Sprintf("rockband%d", index) + } + }) + + // replace group name to new value + newContent = strings.ReplaceAll(newContent, group, fmt.Sprintf("%s.%d", group, index)) + + By(fmt.Sprintf("\n%s\n", newContent)) + tmpFile, err := ioutil.TempFile("", "test-yaml") + if err != nil { + return "", errors.Wrapf(err, "failed to create temp file when install storage class") + } + + if _, err := tmpFile.WriteString(newContent); err != nil { + return "", errors.Wrapf(err, "failed to write content into temp file %s when install storage class", tmpFile.Name()) + } + + return tmpFile.Name(), nil +} + +func resourceInfo(ctx context.Context, g, v, r string, index int) (map[string]map[string]string, error) { + rvg := fmt.Sprintf("%s.%s.%s.%d", r, v, g, index) + ns := fmt.Sprintf("rockband%ds-src-%s-%d", index, v, index) + cmd := exec.CommandContext(ctx, "kubectl", "get", rvg, "-n", ns, "-o", "json") + stdout, errMsg, err := veleroexec.RunCommand(cmd) + if err != nil { + return nil, errors.Wrap(err, errMsg) + } + + var info map[string]interface{} + if err := json.Unmarshal([]byte(stdout), &info); err != nil { + return nil, errors.Wrap(err, "unmarshal resource info JSON") + } + items := info["items"].([]interface{}) + + if len(items) < 1 { + return nil, errors.New("resource info is empty") + } + + item := items[0].(map[string]interface{}) + metadata := item["metadata"].(map[string]interface{}) + annotations := metadata["annotations"].(map[string]interface{}) + specs := item["spec"].(map[string]interface{}) + + annoSpec := make(map[string]map[string]string) + + for k, v := range annotations { + if annoSpec["annotations"] == nil { + annoSpec["annotations"] = map[string]string{ + k: v.(string), + } + } else { + annoSpec["annotations"][k] = v.(string) + } + } + + for k, v := range specs { + if val, ok := v.(string); ok { + if annoSpec["specs"] == nil { + annoSpec["specs"] = map[string]string{ + k: val, + } + } else { + annoSpec["specs"][k] = val + } + } + } + + return annoSpec, nil +} + +// containsAll returns true if all the map values in the needles argument +// are found in the haystack argument values. +func containsAll(haystack, needles map[string]string) bool { + for nkey, nval := range needles { + + hval, ok := haystack[nkey] + if !ok { + return false + } + + if hval != nval { + return false + } + } + return true +} + +func installTestResources(ctx context.Context, client TestClient, group string, tests []apiGropuVersionsTest) (string, string, error) { + var wanted, unwanted []string + for i, tc := range tests { + fmt.Printf("\n====== Test Case %d: %s ======\n", i, tc.name) + + err := installTestCRD(ctx, i, group, tc.srcCrdYaml) + if err != nil { + return "", "", errors.Wrap(err, "install music-system CRD on source cluster") + } + + for version, cr := range tc.srcCRs { + ns := fmt.Sprintf("rockband%ds-src-%s-%d", i, version, i) + if err := CreateNamespace(ctx, client, ns); err != nil { + return "", "", errors.Wrapf(err, "create %s namespace", ns) + } + + if err := installTestCR(ctx, i, group, cr, ns); err != nil { + return "", "", errors.Wrapf(err, "install %s custom resource on source cluster in namespace %s", cr, ns) + } + + if tc.want == nil { + unwanted = append(unwanted, ns) + } else { + wanted = append(wanted, ns) + } + } + } + return strings.Join(wanted, ","), strings.Join(unwanted, ","), nil +} + +func reinstallTestResources(ctx context.Context, group string, client TestClient, tests []apiGropuVersionsTest) error { + for i, tc := range tests { + By(fmt.Sprintf("Deleting CRD %s", tc.srcCrdYaml)) + if err := deleteTestCRD(ctx, i, group, tc.srcCrdYaml); err != nil { + return errors.Wrapf(err, "delete music-system CRD from source cluster") + } + + for version := range tc.srcCRs { + ns := fmt.Sprintf("rockband%ds-src-%s-%d", i, version, i) + By(fmt.Sprintf("Deleting namespace %s", ns)) + if err := DeleteNamespace(ctx, client, ns, true); err != nil { + fmt.Println(errors.Wrapf(err, "failed to delete the namespace %q", ns)) + } + } + // Install music-system CRD for target cluster. + if tc.tgtCrdYaml != "" { + By(fmt.Sprintf("Installing CRD %s", tc.tgtCrdYaml)) + if err := installTestCRD(ctx, i, group, tc.tgtCrdYaml); err != nil { + return errors.Wrapf(err, "install music-system CRD on target cluster") + } + } + + // Apply config map if there is one. + if tc.cm != nil { + By(fmt.Sprintf("Creating configmap %s", tc.cm.Name)) + _, err := client.ClientGo.CoreV1().ConfigMaps(veleroCfg.VeleroNamespace).Create(ctx, tc.cm, metav1.CreateOptions{}) + if err != nil { + return errors.Wrap(err, "create config map with user version priorities") + } + } + } + return nil +} diff --git a/test/e2e/basic/enable_api_group_versions.go b/test/e2e/basic/enable_api_group_versions.go deleted file mode 100644 index 8102765f0..000000000 --- a/test/e2e/basic/enable_api_group_versions.go +++ /dev/null @@ -1,591 +0,0 @@ -/* -Copyright the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package basic - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os/exec" - "strconv" - "strings" - "time" - - "github.com/google/uuid" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" - corev1api "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/vmware-tanzu/velero/pkg/builder" - veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec" - . "github.com/vmware-tanzu/velero/test/e2e" - . "github.com/vmware-tanzu/velero/test/e2e/util/k8s" - . "github.com/vmware-tanzu/velero/test/e2e/util/velero" -) - -var veleroCfg VeleroConfig - -func APIExtensionsVersionsTest() { - var ( - backupName, restoreName string - ) - - resourceName := "apiextensions.k8s.io" - crdName := "rocknrollbands.music.example.io" - label := "for=backup" - srcCrdYaml := "testdata/enable_api_group_versions/case-a-source-v1beta1.yaml" - BeforeEach(func() { - if veleroCfg.DefaultCluster == "" && veleroCfg.StandbyCluster == "" { - Skip("CRD with apiextension versions migration test needs 2 clusters") - } - veleroCfg = VeleroCfg - Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) - srcVersions, err := GetAPIVersions(veleroCfg.DefaultClient, resourceName) - Expect(err).ShouldNot(HaveOccurred()) - dstVersions, err := GetAPIVersions(veleroCfg.StandbyClient, resourceName) - Expect(err).ShouldNot(HaveOccurred()) - - Expect(srcVersions).Should(ContainElement("v1"), func() string { - Skip("CRD with apiextension versions srcVersions should have v1") - return "" - }) - Expect(srcVersions).Should(ContainElement("v1beta1"), func() string { - Skip("CRD with apiextension versions srcVersions should have v1") - return "" - }) - Expect(dstVersions).Should(ContainElement("v1"), func() string { - Skip("CRD with apiextension versions dstVersions should have v1") - return "" - }) - Expect(len(srcVersions) > 1 && len(dstVersions) == 1).Should(Equal(true), func() string { - Skip("Source cluster should support apiextension v1 and v1beta1, destination cluster should only support apiextension v1") - return "" - }) - }) - AfterEach(func() { - if !veleroCfg.Debug { - By("Clean backups after test", func() { - DeleteBackups(context.Background(), *veleroCfg.DefaultClient) - }) - if veleroCfg.InstallVelero { - By("Uninstall Velero and delete CRD ", func() { - Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) - Expect(VeleroUninstall(context.Background(), veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) - Expect(deleteCRDByName(context.Background(), crdName)).To(Succeed()) - - Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyCluster)).To(Succeed()) - Expect(VeleroUninstall(context.Background(), veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) - Expect(deleteCRDByName(context.Background(), crdName)).To(Succeed()) - }) - } - By(fmt.Sprintf("Switch to default kubeconfig context %s", veleroCfg.DefaultCluster), func() { - Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) - veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient - }) - } - - }) - Context("When EnableAPIGroupVersions flag is set", func() { - It("Enable API Group to B/R CRD APIExtensionsVersions", func() { - backupName = "backup-" + UUIDgen.String() - restoreName = "restore-" + UUIDgen.String() - - By(fmt.Sprintf("Install Velero in cluster-A (%s) to backup workload", veleroCfg.DefaultCluster), func() { - Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultCluster)).To(Succeed()) - veleroCfg.Features = "EnableAPIGroupVersions" - veleroCfg.UseVolumeSnapshots = false - Expect(VeleroInstall(context.Background(), &veleroCfg)).To(Succeed()) - }) - - By(fmt.Sprintf("Install CRD of apiextenstions v1beta1 in cluster-A (%s)", veleroCfg.DefaultCluster), func() { - Expect(installCRD(context.Background(), srcCrdYaml)).To(Succeed()) - Expect(CRDShouldExist(context.Background(), crdName)).To(Succeed()) - Expect(WaitForCRDEstablished(crdName)).To(Succeed()) - Expect(AddLabelToCRD(context.Background(), crdName, label)).To(Succeed()) - // Velero server refresh api version data by discovery helper every 5 minutes - time.Sleep(6 * time.Minute) - }) - - By("Backup CRD", func() { - var BackupCfg BackupConfig - BackupCfg.BackupName = backupName - BackupCfg.IncludeResources = "crd" - BackupCfg.IncludeClusterResources = true - BackupCfg.Selector = label - Expect(VeleroBackupNamespace(context.Background(), veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { - RunDebug(context.Background(), veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace, backupName, "") - return "Fail to backup workload" - }) - }) - - By(fmt.Sprintf("Install Velero in cluster-B (%s) to restore workload", veleroCfg.StandbyCluster), func() { - Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyCluster)).To(Succeed()) - veleroCfg.ClientToInstallVelero = veleroCfg.StandbyClient - Expect(VeleroInstall(context.Background(), &veleroCfg)).To(Succeed()) - }) - - By(fmt.Sprintf("Waiting for backups sync to Velero in cluster-B (%s)", veleroCfg.StandbyCluster), func() { - Expect(WaitForBackupToBeCreated(context.Background(), veleroCfg.VeleroCLI, backupName, 5*time.Minute)).To(Succeed()) - }) - - By(fmt.Sprintf("CRD %s should not exist in cluster-B (%s)", crdName, veleroCfg.StandbyCluster), func() { - Expect(CRDShouldNotExist(context.Background(), crdName)).To(Succeed(), "Error: CRD already exists in cluster B, clean it and re-run test") - }) - - By("Restore CRD", func() { - Expect(VeleroRestore(context.Background(), veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace, restoreName, backupName, "")).To(Succeed(), func() string { - RunDebug(context.Background(), veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace, "", restoreName) - return "Fail to restore workload" - }) - }) - - By("Verify CRD restore ", func() { - Expect(CRDShouldExist(context.Background(), crdName)).To(Succeed()) - }) - }) - }) -} -func APIGropuVersionsTest() { - var ( - resource, group string - err error - ctx = context.Background() - ) - - BeforeEach(func() { - veleroCfg = VeleroCfg - resource = "rockbands" - group = "music.example.io" - UUIDgen, err = uuid.NewRandom() - Expect(err).NotTo(HaveOccurred()) - flag.Parse() - // TODO: install Velero once for the test suite once feature flag is - // removed and velero installation becomes the same as other e2e tests. - if veleroCfg.InstallVelero { - veleroCfg.Features = "EnableAPIGroupVersions" - veleroCfg.UseVolumeSnapshots = false - err = VeleroInstall(context.Background(), &veleroCfg) - Expect(err).NotTo(HaveOccurred()) - } - }) - - AfterEach(func() { - if !veleroCfg.Debug { - fmt.Printf("Clean up resource: kubectl delete crd %s.%s\n", resource, group) - cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", resource+"."+group) - _, stderr, err := veleroexec.RunCommand(cmd) - if strings.Contains(stderr, "NotFound") { - fmt.Printf("Ignore error: %v\n", stderr) - err = nil - } - Expect(err).NotTo(HaveOccurred()) - By("Clean backups after test", func() { - DeleteBackups(context.Background(), *veleroCfg.ClientToInstallVelero) - }) - if veleroCfg.InstallVelero { - - By("Uninstall Velero", func() { - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).NotTo(HaveOccurred()) - }) - } - } - }) - - Context("When EnableAPIGroupVersions flag is set", func() { - It("Should back up API group version and restore by version priority", func() { - Expect(runEnableAPIGroupVersionsTests( - ctx, - *veleroCfg.ClientToInstallVelero, - resource, - group, - )).To(Succeed(), "Failed to successfully backup and restore multiple API Groups") - }) - }) -} - -func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, resource, group string) error { - tests := []struct { - name string - namespaces []string - srcCrdYaml string - srcCRs map[string]string - tgtCrdYaml string - tgtVer string - cm *corev1api.ConfigMap - gvs map[string][]string - want map[string]map[string]string - }{ - { - name: "Target and source cluster preferred versions match; Preferred version v1 is restored (Priority 1, Case A).", - srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml", - srcCRs: map[string]string{ - "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", - "v1alpha1": "testdata/enable_api_group_versions/music_v1alpha1_rockband.yaml", - }, - tgtCrdYaml: "testdata/enable_api_group_versions/case-a-target.yaml", - tgtVer: "v1", - cm: nil, - want: map[string]map[string]string{ - "annotations": { - "rockbands.music.example.io/originalVersion": "v1", - }, - "specs": { - "genre": "60s rock", - }, - }, - }, - { - name: "Latest common non-preferred supported version v2beta2 is restored (Priority 3, Case D).", - srcCrdYaml: "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml", - srcCRs: map[string]string{ - "v2beta2": "testdata/enable_api_group_versions/music_v2beta2_rockband.yaml", - "v2beta1": "testdata/enable_api_group_versions/music_v2beta1_rockband.yaml", - "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", - }, - tgtCrdYaml: "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml", - tgtVer: "v2beta2", - cm: nil, - want: map[string]map[string]string{ - "annotations": { - "rockbands.music.example.io/originalVersion": "v2beta2", - }, - "specs": { - "genre": "60s rock", - }, - }, - }, - { - name: "No common supported versions means no rockbands custom resource is restored.", - srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml", - srcCRs: map[string]string{ - "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", - "v1alpha1": "testdata/enable_api_group_versions/music_v1alpha1_rockband.yaml", - }, - tgtCrdYaml: "testdata/enable_api_group_versions/case-b-target-manually-added-mutations.yaml", - tgtVer: "", - cm: nil, - want: nil, - }, - { - name: "User config map overrides Priority 3, Case D and restores v2beta1", - srcCrdYaml: "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml", - srcCRs: map[string]string{ - "v2beta2": "testdata/enable_api_group_versions/music_v2beta2_rockband.yaml", - "v2beta1": "testdata/enable_api_group_versions/music_v2beta1_rockband.yaml", - "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", - }, - tgtCrdYaml: "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml", - tgtVer: "v2beta1", - cm: builder.ForConfigMap(veleroCfg.VeleroNamespace, "enableapigroupversions").Data( - "restoreResourcesVersionPriority", - `rockbands.music.example.io=v2beta1,v2beta2,v2`, - ).Result(), - want: map[string]map[string]string{ - "annotations": { - "rockbands.music.example.io/originalVersion": "v2beta1", - }, - "specs": { - "genre": "60s rock", - }, - }, - }, - { - name: "Restore successful when CRD doesn't (yet) exist in target", - srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml", - srcCRs: map[string]string{ - "v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml", - }, - tgtCrdYaml: "", - tgtVer: "v1", - cm: nil, - want: map[string]map[string]string{ - "annotations": { - "rockbands.music.example.io/originalVersion": "v1", - }, - "specs": { - "genre": "60s rock", - }, - }, - }, - } - - for i, tc := range tests { - fmt.Printf("\n====== Test Case %d: %s ======\n", i, tc.name) - - err := installCRD(ctx, tc.srcCrdYaml) - if err != nil { - return errors.Wrap(err, "install music-system CRD on source cluster") - } - - for version, cr := range tc.srcCRs { - ns := resource + "-src-" + version - - if err := CreateNamespace(ctx, client, ns); err != nil { - return errors.Wrapf(err, "create %s namespace", ns) - } - defer func(namespace string) { - if err = DeleteNamespace(ctx, client, namespace, true); err != nil { - fmt.Println(errors.Wrapf(err, "failed to delete the namespace %q", ns)) - } - }(ns) - - if err := installCR(ctx, cr, ns); err != nil { - return errors.Wrapf(err, "install %s custom resource on source cluster in namespace %s", cr, ns) - } - - tc.namespaces = append(tc.namespaces, ns) - } - - // Velero server refresh api version data by discovery helper every 5 minutes - time.Sleep(6 * time.Minute) - - backup := "backup-rockbands-" + UUIDgen.String() + "-" + strconv.Itoa(i) - namespacesStr := strings.Join(tc.namespaces, ",") - - var BackupCfg BackupConfig - BackupCfg.BackupName = backup - BackupCfg.Namespace = namespacesStr - BackupCfg.BackupLocation = "" - BackupCfg.UseVolumeSnapshots = false - BackupCfg.Selector = "" - - Expect(VeleroBackupNamespace(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { - RunDebug(context.Background(), veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace, backup, "") - return "Fail to backup workload" - }) - - if err := deleteCRD(ctx, tc.srcCrdYaml); err != nil { - return errors.Wrapf(err, "delete music-system CRD from source cluster") - } - - for _, ns := range tc.namespaces { - if err := DeleteNamespace(ctx, client, ns, true); err != nil { - return errors.Wrapf(err, "delete %s namespace from source cluster", ns) - } - } - - // Install music-system CRD for target cluster. - if tc.tgtCrdYaml != "" { - if err := installCRD(ctx, tc.tgtCrdYaml); err != nil { - return errors.Wrapf(err, "install music-system CRD on target cluster") - } - } - - // Apply config map if there is one. - if tc.cm != nil { - _, err := client.ClientGo.CoreV1().ConfigMaps(veleroCfg.VeleroNamespace).Create(ctx, tc.cm, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, "create config map with user version priorities") - } - } - - // Velero server refresh api version data by discovery helper every 5 minutes - time.Sleep(6 * time.Minute) - - // Restore rockbands namespaces. - restore := "restore-rockbands-" + UUIDgen.String() + "-" + strconv.Itoa(i) - - if tc.want != nil { - if err := VeleroRestore(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, restore, backup, ""); err != nil { - RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, "", restore) - return errors.Wrapf(err, "restore %s namespaces on target cluster", namespacesStr) - } - - annoSpec, err := resourceInfo(ctx, group, tc.tgtVer, resource) - if err != nil { - return errors.Wrapf( - err, - "get annotation and spec from %s.%s/%s object", - resource, - group, - tc.tgtVer, - ) - } - - // Assertion - if !containsAll(annoSpec["annotations"], tc.want["annotations"]) { - msg := fmt.Sprintf( - "actual annotations: %v, expected annotations: %v", - annoSpec["annotations"], - tc.want["annotations"], - ) - return errors.New(msg) - } - - // Assertion - if !containsAll(annoSpec["specs"], tc.want["specs"]) { - msg := fmt.Sprintf( - "actual specs: %v, expected specs: %v", - annoSpec["specs"], - tc.want["specs"], - ) - return errors.New(msg) - } - - } else { - // No custom resource should have been restored. Expect "no resource found" - // error during restore. - err := VeleroRestore(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, restore, backup, "") - if !strings.Contains(err.Error(), "Unexpected restore phase got PartiallyFailed, expecting Completed") { - return errors.New("expected error but not none") - } - } - - _ = deleteCRD(ctx, tc.srcCrdYaml) - if tc.tgtCrdYaml != "" { - _ = deleteCRD(ctx, tc.tgtCrdYaml) - } - } - - return nil -} - -func installCRD(ctx context.Context, yaml string) error { - fmt.Printf("Install CRD with %s.\n", yaml) - err := KubectlApplyByFile(ctx, yaml) - return err -} - -func deleteCRD(ctx context.Context, yaml string) error { - fmt.Println("Delete CRD", yaml) - cmd := exec.CommandContext(ctx, "kubectl", "delete", "-f", yaml, "--wait") - - _, stderr, err := veleroexec.RunCommand(cmd) - if strings.Contains(stderr, "not found") { - return nil - } - if err != nil { - return errors.Wrap(err, stderr) - } - - return nil -} - -func deleteCRDByName(ctx context.Context, name string) error { - fmt.Println("Delete CRD", name) - cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", name, "--wait") - - _, stderr, err := veleroexec.RunCommand(cmd) - if strings.Contains(stderr, "not found") { - return nil - } - if err != nil { - return errors.Wrap(err, stderr) - } - - return nil -} - -func installCR(ctx context.Context, crFile, ns string) error { - retries := 5 - var stderr string - var err error - - for i := 0; i < retries; i++ { - fmt.Printf("Attempt %d: Install custom resource %s\n", i+1, crFile) - cmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", ns, "-f", crFile) - _, stderr, err = veleroexec.RunCommand(cmd) - if err == nil { - fmt.Printf("Successfully installed CR on %s.\n", ns) - return nil - } - - fmt.Printf("Sleep for %ds before next attempt.\n", 20*i) - time.Sleep(time.Second * time.Duration(i) * 20) - } - return errors.Wrap(err, stderr) -} - -func resourceInfo(ctx context.Context, g, v, r string) (map[string]map[string]string, error) { - rvg := r + "." + v + "." + g - ns := r + "-src-" + v - cmd := exec.CommandContext(ctx, "kubectl", "get", rvg, "-n", ns, "-o", "json") - - stdout, errMsg, err := veleroexec.RunCommand(cmd) - if err != nil { - return nil, errors.Wrap(err, errMsg) - } - - var info map[string]interface{} - if err := json.Unmarshal([]byte(stdout), &info); err != nil { - return nil, errors.Wrap(err, "unmarshal resource info JSON") - } - items := info["items"].([]interface{}) - - if len(items) < 1 { - return nil, errors.New("resource info is empty") - } - - item := items[0].(map[string]interface{}) - metadata := item["metadata"].(map[string]interface{}) - annotations := metadata["annotations"].(map[string]interface{}) - specs := item["spec"].(map[string]interface{}) - - annoSpec := make(map[string]map[string]string) - - for k, v := range annotations { - if annoSpec["annotations"] == nil { - annoSpec["annotations"] = map[string]string{ - k: v.(string), - } - } else { - annoSpec["annotations"][k] = v.(string) - } - } - - for k, v := range specs { - if val, ok := v.(string); ok { - if annoSpec["specs"] == nil { - annoSpec["specs"] = map[string]string{ - k: val, - } - } else { - annoSpec["specs"][k] = val - } - } - } - - return annoSpec, nil -} - -// containsAll returns true if all the map values in the needles argument -// are found in the haystack argument values. -func containsAll(haystack, needles map[string]string) bool { - for nkey, nval := range needles { - - hval, ok := haystack[nkey] - if !ok { - return false - } - - if hval != nval { - return false - } - } - return true -} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 26c61d821..2dfd3820b 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -31,6 +31,7 @@ import ( . "github.com/vmware-tanzu/velero/test/e2e/backup" . "github.com/vmware-tanzu/velero/test/e2e/backups" . "github.com/vmware-tanzu/velero/test/e2e/basic" + . "github.com/vmware-tanzu/velero/test/e2e/basic/api-group" . "github.com/vmware-tanzu/velero/test/e2e/basic/resources-check" . "github.com/vmware-tanzu/velero/test/e2e/bsl-mgmt" . "github.com/vmware-tanzu/velero/test/e2e/migration" diff --git a/test/e2e/util/k8s/crd.go b/test/e2e/util/k8s/crd.go new file mode 100644 index 000000000..3b46ddbb0 --- /dev/null +++ b/test/e2e/util/k8s/crd.go @@ -0,0 +1,85 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8s + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/pkg/errors" + "golang.org/x/net/context" + + veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec" +) + +func InstallCRD(ctx context.Context, yaml string) error { + fmt.Printf("Install CRD with %s.\n", yaml) + err := KubectlApplyByFile(ctx, yaml) + return err +} + +func DeleteCRD(ctx context.Context, yaml string) error { + fmt.Println("Delete CRD", yaml) + cmd := exec.CommandContext(ctx, "kubectl", "delete", "-f", yaml, "--wait") + + _, stderr, err := veleroexec.RunCommand(cmd) + if strings.Contains(stderr, "not found") { + return nil + } + if err != nil { + return errors.Wrap(err, stderr) + } + + return nil +} + +func DeleteCRDByName(ctx context.Context, name string) error { + fmt.Println("Delete CRD", name) + cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", name, "--wait") + + _, stderr, err := veleroexec.RunCommand(cmd) + if strings.Contains(stderr, "not found") { + return nil + } + if err != nil { + return errors.Wrap(err, stderr) + } + + return nil +} + +func InstallCR(ctx context.Context, crFile, ns string) error { + retries := 5 + var stderr string + var err error + + for i := 0; i < retries; i++ { + fmt.Printf("Attempt %d: Install custom resource %s\n", i+1, crFile) + cmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", ns, "-f", crFile) + _, stderr, err = veleroexec.RunCommand(cmd) + if err == nil { + fmt.Printf("Successfully installed CR on %s.\n", ns) + return nil + } + + fmt.Printf("Sleep for %ds before next attempt.\n", 20*i) + time.Sleep(time.Second * time.Duration(i) * 20) + } + return errors.Wrap(err, stderr) +} From bbc1e2e1515b11955b2abb8f4e969def97f82578 Mon Sep 17 00:00:00 2001 From: Xun Jiang Date: Tue, 25 Apr 2023 13:50:52 +0800 Subject: [PATCH 09/10] Enable stylecheck linter and resolve found issues. Signed-off-by: Xun Jiang --- changelogs/unreleased/6185-blackpiglet | 1 + golangci.yaml | 1 + hack/release-tools/chk_version.go | 8 +-- hack/release-tools/chk_version_test.go | 2 +- .../resourcepolicies/resource_policies.go | 18 +++--- pkg/backup/backup_test.go | 4 +- ...ustomresourcedefinition_v1beta1_builder.go | 10 ++-- .../v1_customresourcedefinition_builder.go | 30 +++++----- pkg/cmd/cli/backup/describe.go | 2 +- pkg/cmd/cli/delete_options.go | 2 +- pkg/cmd/cli/nodeagent/server_test.go | 3 +- pkg/cmd/cli/serverstatus/server_status.go | 3 +- .../util/downloadrequest/downloadrequest.go | 6 +- pkg/cmd/util/output/describe.go | 6 +- pkg/controller/backup_controller.go | 5 +- pkg/controller/backup_controller_test.go | 28 +++++----- pkg/controller/backup_sync_controller.go | 17 +++--- .../pod_volume_backup_controller.go | 2 +- .../pod_volume_restore_controller.go | 18 +++--- pkg/controller/schedule_controller.go | 7 +-- pkg/itemoperationmap/backup_operation_map.go | 28 +++++----- pkg/itemoperationmap/restore_operation_map.go | 28 +++++----- .../framework/backup_item_action_server.go | 11 ++-- .../framework/backup_item_action_test.go | 3 +- .../v2/backup_item_action_server.go | 2 +- pkg/plugin/framework/common/plugin_kinds.go | 2 +- .../v2/restore_item_action_server.go | 2 +- pkg/podexec/pod_command_executor_test.go | 4 +- pkg/repository/provider/unified_repo.go | 28 +++++----- pkg/repository/udmrepo/kopialib/backend/s3.go | 6 +- .../udmrepo/kopialib/backend/s3_test.go | 4 +- pkg/repository/udmrepo/repo_options.go | 6 +- pkg/restic/exec_commands.go | 6 +- pkg/restore/change_image_name_action.go | 10 ++-- pkg/restore/change_image_name_action_test.go | 16 +++--- pkg/restore/change_pvc_node_selector_test.go | 4 +- pkg/restore/restore.go | 56 +++++++++---------- pkg/restore/restore_test.go | 31 +++++----- pkg/test/fake_discovery_helper.go | 10 ++-- pkg/uploader/provider/restic.go | 12 ++-- 40 files changed, 215 insertions(+), 227 deletions(-) create mode 100644 changelogs/unreleased/6185-blackpiglet diff --git a/changelogs/unreleased/6185-blackpiglet b/changelogs/unreleased/6185-blackpiglet new file mode 100644 index 000000000..7eb6096d7 --- /dev/null +++ b/changelogs/unreleased/6185-blackpiglet @@ -0,0 +1 @@ +Enable stylecheck linter and resolve found issues. \ No newline at end of file diff --git a/golangci.yaml b/golangci.yaml index 48eb43726..29e07ebfb 100644 --- a/golangci.yaml +++ b/golangci.yaml @@ -298,6 +298,7 @@ linters: - gosec - govet - misspell + - stylecheck - typecheck - unparam - unused diff --git a/hack/release-tools/chk_version.go b/hack/release-tools/chk_version.go index 9f87bff25..b0064eb5f 100644 --- a/hack/release-tools/chk_version.go +++ b/hack/release-tools/chk_version.go @@ -29,19 +29,19 @@ import ( // minor // patch // prerelease (this will be alpha/beta/rc followed by a ".", followed by 1 or more digits (alpha.5) -var release_regex *regexp.Regexp = regexp.MustCompile(`^v(?P[[:digit:]]+)\.(?P[[:digit:]]+)\.(?P[[:digit:]]+)(-{1}(?P(alpha|beta|rc)\.[[:digit:]]+))*`) +var releaseRegex = regexp.MustCompile(`^v(?P[[:digit:]]+)\.(?P[[:digit:]]+)\.(?P[[:digit:]]+)(-{1}(?P(alpha|beta|rc)\.[[:digit:]]+))*`) // This small program exists because checking the VELERO_VERSION rules in bash is difficult, and difficult to test for correctness. // Calling it with --verify will verify whether or not the VELERO_VERSION environment variable is a valid version string, without parsing for its components. // Calling it without --verify will try to parse the version into its component pieces. func main() { - velero_version := os.Getenv("VELERO_VERSION") + veleroVersion := os.Getenv("VELERO_VERSION") - submatches := reSubMatchMap(release_regex, velero_version) + submatches := reSubMatchMap(releaseRegex, veleroVersion) // Didn't match the regex, exit. if len(submatches) == 0 { - fmt.Printf("VELERO_VERSION of %s was not valid. Please correct the value and retry.", velero_version) + fmt.Printf("VELERO_VERSION of %s was not valid. Please correct the value and retry.", veleroVersion) os.Exit(1) } diff --git a/hack/release-tools/chk_version_test.go b/hack/release-tools/chk_version_test.go index 772e9beaa..2402e7672 100644 --- a/hack/release-tools/chk_version_test.go +++ b/hack/release-tools/chk_version_test.go @@ -56,7 +56,7 @@ func TestRegexMatching(t *testing.T) { for _, test := range tests { name := fmt.Sprintf("Testing version string %s", test.version) t.Run(name, func(t *testing.T) { - results := reSubMatchMap(release_regex, test.version) + results := reSubMatchMap(releaseRegex, test.version) if len(results) == 0 && test.expectMatch { t.Fail() diff --git a/internal/resourcepolicies/resource_policies.go b/internal/resourcepolicies/resource_policies.go index 35e8b9f3e..ee2820b2a 100644 --- a/internal/resourcepolicies/resource_policies.go +++ b/internal/resourcepolicies/resource_policies.go @@ -54,7 +54,7 @@ func unmarshalResourcePolicies(yamlData *string) (*resourcePolicies, error) { } } -func (policies *Policies) buildPolicy(resPolicies *resourcePolicies) error { +func (p *Policies) buildPolicy(resPolicies *resourcePolicies) error { for _, vp := range resPolicies.VolumePolicies { con, err := unmarshalVolConditions(vp.Conditions) if err != nil { @@ -64,18 +64,18 @@ func (policies *Policies) buildPolicy(resPolicies *resourcePolicies) error { if err != nil { return errors.WithStack(err) } - var p volPolicy - p.action = vp.Action - p.conditions = append(p.conditions, &capacityCondition{capacity: *volCap}) - p.conditions = append(p.conditions, &storageClassCondition{storageClass: con.StorageClass}) - p.conditions = append(p.conditions, &nfsCondition{nfs: con.NFS}) - p.conditions = append(p.conditions, &csiCondition{csi: con.CSI}) - policies.volumePolicies = append(policies.volumePolicies, p) + var volP volPolicy + volP.action = vp.Action + volP.conditions = append(volP.conditions, &capacityCondition{capacity: *volCap}) + volP.conditions = append(volP.conditions, &storageClassCondition{storageClass: con.StorageClass}) + volP.conditions = append(volP.conditions, &nfsCondition{nfs: con.NFS}) + volP.conditions = append(volP.conditions, &csiCondition{csi: con.CSI}) + p.volumePolicies = append(p.volumePolicies, volP) } // Other resource policies - policies.version = resPolicies.Version + p.version = resPolicies.Version return nil } diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index f14375549..a90b3e0b1 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -52,7 +52,6 @@ import ( vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/test" - testutil "github.com/vmware-tanzu/velero/pkg/test" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/volume" ) @@ -107,7 +106,6 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) { if item.namespace != "" { fileWithVersion = fileWithVersion + "/v1-preferredversion/" + "namespaces/" + item.namespace } else { - file = file + "/cluster" fileWithVersion = fileWithVersion + "/v1-preferredversion" + "/cluster" } fileWithVersion = fileWithVersion + "/" + item.name + ".json" @@ -2845,7 +2843,7 @@ func TestBackupWithHooks(t *testing.T) { h = newHarness(t) req = &Request{Backup: tc.backup} backupFile = bytes.NewBuffer([]byte{}) - podCommandExecutor = new(testutil.MockPodCommandExecutor) + podCommandExecutor = new(test.MockPodCommandExecutor) ) h.backupper.podCommandExecutor = podCommandExecutor diff --git a/pkg/builder/customresourcedefinition_v1beta1_builder.go b/pkg/builder/customresourcedefinition_v1beta1_builder.go index d5ab9b248..1d6eea197 100644 --- a/pkg/builder/customresourcedefinition_v1beta1_builder.go +++ b/pkg/builder/customresourcedefinition_v1beta1_builder.go @@ -48,17 +48,17 @@ func (c *CustomResourceDefinitionV1Beta1Builder) Condition(cond apiextv1beta1.Cu } // Result returns the built CustomResourceDefinition. -func (b *CustomResourceDefinitionV1Beta1Builder) Result() *apiextv1beta1.CustomResourceDefinition { - return b.object +func (c *CustomResourceDefinitionV1Beta1Builder) Result() *apiextv1beta1.CustomResourceDefinition { + return c.object } // ObjectMeta applies functional options to the CustomResourceDefinition's ObjectMeta. -func (b *CustomResourceDefinitionV1Beta1Builder) ObjectMeta(opts ...ObjectMetaOpt) *CustomResourceDefinitionV1Beta1Builder { +func (c *CustomResourceDefinitionV1Beta1Builder) ObjectMeta(opts ...ObjectMetaOpt) *CustomResourceDefinitionV1Beta1Builder { for _, opt := range opts { - opt(b.object) + opt(c.object) } - return b + return c } // CustomResourceDefinitionV1Beta1ConditionBuilder builds CustomResourceDefinitionV1Beta1Condition objects. diff --git a/pkg/builder/v1_customresourcedefinition_builder.go b/pkg/builder/v1_customresourcedefinition_builder.go index 55969a0d3..4bb20590a 100644 --- a/pkg/builder/v1_customresourcedefinition_builder.go +++ b/pkg/builder/v1_customresourcedefinition_builder.go @@ -98,8 +98,8 @@ func (c *V1CustomResourceDefinitionConditionBuilder) Status(cs apiextv1.Conditio } // Result returns the built CustomResourceDefinitionCondition. -func (b *V1CustomResourceDefinitionConditionBuilder) Result() apiextv1.CustomResourceDefinitionCondition { - return b.object +func (c *V1CustomResourceDefinitionConditionBuilder) Result() apiextv1.CustomResourceDefinitionCondition { + return c.object } // V1CustomResourceDefinitionVersionBuilder builds CustomResourceDefinitionVersion objects. @@ -115,26 +115,26 @@ func ForV1CustomResourceDefinitionVersion(name string) *V1CustomResourceDefiniti } // Served sets the Served field on a CustomResourceDefinitionVersion. -func (b *V1CustomResourceDefinitionVersionBuilder) Served(s bool) *V1CustomResourceDefinitionVersionBuilder { - b.object.Served = s - return b +func (c *V1CustomResourceDefinitionVersionBuilder) Served(s bool) *V1CustomResourceDefinitionVersionBuilder { + c.object.Served = s + return c } // Storage sets the Storage field on a CustomResourceDefinitionVersion. -func (b *V1CustomResourceDefinitionVersionBuilder) Storage(s bool) *V1CustomResourceDefinitionVersionBuilder { - b.object.Storage = s - return b +func (c *V1CustomResourceDefinitionVersionBuilder) Storage(s bool) *V1CustomResourceDefinitionVersionBuilder { + c.object.Storage = s + return c } -func (b *V1CustomResourceDefinitionVersionBuilder) Schema(s *apiextv1.JSONSchemaProps) *V1CustomResourceDefinitionVersionBuilder { - if b.object.Schema == nil { - b.object.Schema = new(apiextv1.CustomResourceValidation) +func (c *V1CustomResourceDefinitionVersionBuilder) Schema(s *apiextv1.JSONSchemaProps) *V1CustomResourceDefinitionVersionBuilder { + if c.object.Schema == nil { + c.object.Schema = new(apiextv1.CustomResourceValidation) } - b.object.Schema.OpenAPIV3Schema = s - return b + c.object.Schema.OpenAPIV3Schema = s + return c } // Result returns the built CustomResourceDefinitionVersion. -func (b *V1CustomResourceDefinitionVersionBuilder) Result() apiextv1.CustomResourceDefinitionVersion { - return b.object +func (c *V1CustomResourceDefinitionVersionBuilder) Result() apiextv1.CustomResourceDefinitionVersion { + return c.object } diff --git a/pkg/cmd/cli/backup/describe.go b/pkg/cmd/cli/backup/describe.go index e47ddab11..e6af647df 100644 --- a/pkg/cmd/cli/backup/describe.go +++ b/pkg/cmd/cli/backup/describe.go @@ -61,7 +61,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { cmd.CheckError(err) if outputFormat != "plaintext" && outputFormat != "json" { - cmd.CheckError(fmt.Errorf("Invalid output format '%s'. Valid value are 'plaintext, json'", outputFormat)) + cmd.CheckError(fmt.Errorf("invalid output format '%s'. valid value are 'plaintext, json'", outputFormat)) } var backups *velerov1api.BackupList diff --git a/pkg/cmd/cli/delete_options.go b/pkg/cmd/cli/delete_options.go index 80fd29222..c6197a428 100644 --- a/pkg/cmd/cli/delete_options.go +++ b/pkg/cmd/cli/delete_options.go @@ -58,7 +58,7 @@ func (o *DeleteOptions) Complete(f client.Factory, args []string) error { // Validate validates the fields of the DeleteOptions struct. func (o *DeleteOptions) Validate(c *cobra.Command, f client.Factory, args []string) error { if o.Client == nil { - return errors.New("Velero client is not set; unable to proceed") + return errors.New("velero client is not set; unable to proceed") } return o.SelectOptions.Validate() diff --git a/pkg/cmd/cli/nodeagent/server_test.go b/pkg/cmd/cli/nodeagent/server_test.go index 5dab32b6b..d66fc08eb 100644 --- a/pkg/cmd/cli/nodeagent/server_test.go +++ b/pkg/cmd/cli/nodeagent/server_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" @@ -51,7 +50,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) { name: "no error when pod volumes are present and there are mirror pods", pods: []*corev1.Pod{ builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(), - builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(v1.MirrorPodAnnotationKey, "baz")).Result(), + builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1.MirrorPodAnnotationKey, "baz")).Result(), }, dirs: []string{"foo", "baz"}, wantErr: false, diff --git a/pkg/cmd/cli/serverstatus/server_status.go b/pkg/cmd/cli/serverstatus/server_status.go index 534fa4f59..f64458874 100644 --- a/pkg/cmd/cli/serverstatus/server_status.go +++ b/pkg/cmd/cli/serverstatus/server_status.go @@ -22,7 +22,6 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" kbclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -48,7 +47,7 @@ func (g *DefaultServerStatusGetter) GetServerStatus(kbClient kbclient.Client) (* ctx, cancel := context.WithCancel(g.Context) defer cancel() - key := client.ObjectKey{Name: created.Name, Namespace: g.Namespace} + key := kbclient.ObjectKey{Name: created.Name, Namespace: g.Namespace} checkFunc := func() { updated := &velerov1api.ServerStatusRequest{} if err := kbClient.Get(ctx, key, updated); err != nil { diff --git a/pkg/cmd/util/downloadrequest/downloadrequest.go b/pkg/cmd/util/downloadrequest/downloadrequest.go index 716815c9d..76b15a019 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest.go @@ -60,11 +60,11 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin key := kbclient.ObjectKey{Name: created.Name, Namespace: namespace} timeStreamFirstCheck := time.Now() - downloadUrlTimeout := false + downloadURLTimeout := false checkFunc := func() { // if timeout has been reached, cancel request if time.Now().After(timeStreamFirstCheck.Add(timeout)) { - downloadUrlTimeout = true + downloadURLTimeout = true cancel() } updated := &velerov1api.DownloadRequest{} @@ -85,7 +85,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin } wait.Until(checkFunc, 25*time.Millisecond, ctx.Done()) - if downloadUrlTimeout { + if downloadURLTimeout { return ErrDownloadRequestDownloadURLTimeout } diff --git a/pkg/cmd/util/output/describe.go b/pkg/cmd/util/output/describe.go index 61aac2fce..b6a44b676 100644 --- a/pkg/cmd/util/output/describe.go +++ b/pkg/cmd/util/output/describe.go @@ -149,7 +149,7 @@ func NewStructuredDescriber(format string) *StructuredDescriber { func DescribeInSF(fn func(d *StructuredDescriber), format string) string { d := NewStructuredDescriber(format) fn(d) - return d.JsonEncode() + return d.JSONEncode() } // Describe adds all types of argument to d.output. @@ -167,8 +167,8 @@ func (d *StructuredDescriber) DescribeMetadata(metadata metav1.ObjectMeta) { d.Describe("metadata", metadataInfo) } -// JsonEncode encodes d.output to json -func (d *StructuredDescriber) JsonEncode() string { +// JSONEncode encodes d.output to json +func (d *StructuredDescriber) JSONEncode() string { byteBuffer := &bytes.Buffer{} encoder := json.NewEncoder(byteBuffer) encoder.SetEscapeHTML(false) diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index cd628aefc..d29062826 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -31,7 +31,6 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" corev1api "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -468,7 +467,7 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg } if request.Spec.ResourcePolicy != nil && request.Spec.ResourcePolicy.Kind == resourcepolicies.ConfigmapRefType { - policiesConfigmap := &v1.ConfigMap{} + policiesConfigmap := &corev1api.ConfigMap{} err := b.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: request.Namespace, Name: request.Spec.ResourcePolicy.Name}, policiesConfigmap) if err != nil { request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("failed to get resource policies %s/%s configmap with err %v", request.Namespace, request.Spec.ResourcePolicy.Name, err)) @@ -1125,7 +1124,7 @@ func (b *backupReconciler) recreateVolumeSnapshotContent(vsc snapshotv1api.Volum // validation webhook will check whether name and namespace are nil. // external-snapshotter needs Source pointing to snapshot and VolumeSnapshot // reference's UID to nil to determine the VolumeSnapshotContent is deletable. - vsc.Spec.VolumeSnapshotRef = v1.ObjectReference{ + vsc.Spec.VolumeSnapshotRef = corev1api.ObjectReference{ APIVersion: snapshotv1api.SchemeGroupVersion.String(), Kind: "VolumeSnapshot", Namespace: "ns-" + string(vsc.UID), diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index ce8251e20..f57b13058 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -298,8 +298,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { name string backup *velerov1api.Backup backupLocationNameInBackup string - backupLocationInApiServer *velerov1api.BackupStorageLocation - defaultBackupLocationInApiServer *velerov1api.BackupStorageLocation + backupLocationInAPIServer *velerov1api.BackupStorageLocation + defaultBackupLocationInAPIServer *velerov1api.BackupStorageLocation expectedBackupLocation string expectedSuccess bool expectedValidationError string @@ -308,8 +308,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { name: "BackupLocation is specified in backup CR'spec and it can be found in ApiServer", backup: builder.ForBackup("velero", "backup-1").Result(), backupLocationNameInBackup: "test-backup-location", - backupLocationInApiServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(), - defaultBackupLocationInApiServer: builder.ForBackupStorageLocation("velero", "default-location").Result(), + backupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(), + defaultBackupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "default-location").Result(), expectedBackupLocation: "test-backup-location", expectedSuccess: true, }, @@ -317,8 +317,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { name: "BackupLocation is specified in backup CR'spec and it can't be found in ApiServer", backup: builder.ForBackup("velero", "backup-1").Result(), backupLocationNameInBackup: "test-backup-location", - backupLocationInApiServer: nil, - defaultBackupLocationInApiServer: nil, + backupLocationInAPIServer: nil, + defaultBackupLocationInAPIServer: nil, expectedSuccess: false, expectedValidationError: "an existing backup storage location wasn't specified at backup creation time and the default 'test-backup-location' wasn't found. Please address this issue (see `velero backup-location -h` for options) and create a new backup. Error: backupstoragelocations.velero.io \"test-backup-location\" not found", }, @@ -326,8 +326,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { name: "Using default BackupLocation and it can be found in ApiServer", backup: builder.ForBackup("velero", "backup-1").Result(), backupLocationNameInBackup: "", - backupLocationInApiServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(), - defaultBackupLocationInApiServer: builder.ForBackupStorageLocation("velero", "default-location").Result(), + backupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(), + defaultBackupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "default-location").Result(), expectedBackupLocation: defaultBackupLocation, expectedSuccess: true, }, @@ -335,8 +335,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { name: "Using default BackupLocation and it can't be found in ApiServer", backup: builder.ForBackup("velero", "backup-1").Result(), backupLocationNameInBackup: "", - backupLocationInApiServer: nil, - defaultBackupLocationInApiServer: nil, + backupLocationInAPIServer: nil, + defaultBackupLocationInAPIServer: nil, expectedSuccess: false, expectedValidationError: fmt.Sprintf("an existing backup storage location wasn't specified at backup creation time and the server default '%s' doesn't exist. Please address this issue (see `velero backup-location -h` for options) and create a new backup. Error: backupstoragelocations.velero.io \"%s\" not found", defaultBackupLocation, defaultBackupLocation), }, @@ -353,11 +353,11 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { // objects that should init with client objects := make([]runtime.Object, 0) - if test.backupLocationInApiServer != nil { - objects = append(objects, test.backupLocationInApiServer) + if test.backupLocationInAPIServer != nil { + objects = append(objects, test.backupLocationInAPIServer) } - if test.defaultBackupLocationInApiServer != nil { - objects = append(objects, test.defaultBackupLocationInApiServer) + if test.defaultBackupLocationInAPIServer != nil { + objects = append(objects, test.defaultBackupLocationInAPIServer) } fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objects...) diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 7e8b8a8e9..606077fa1 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -24,7 +24,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" - kuberrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -174,10 +173,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) // attempt to create backup custom resource via API err = b.client.Create(ctx, backup, &client.CreateOptions{}) switch { - case err != nil && kuberrs.IsAlreadyExists(err): + case err != nil && apierrors.IsAlreadyExists(err): log.Debug("Backup already exists in cluster") continue - case err != nil && !kuberrs.IsAlreadyExists(err): + case err != nil && !apierrors.IsAlreadyExists(err): log.WithError(errors.WithStack(err)).Error("Error syncing backup into cluster") continue default: @@ -211,10 +210,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) err = b.client.Create(ctx, podVolumeBackup, &client.CreateOptions{}) switch { - case err != nil && kuberrs.IsAlreadyExists(err): + case err != nil && apierrors.IsAlreadyExists(err): log.Debug("Pod volume backup already exists in cluster") continue - case err != nil && !kuberrs.IsAlreadyExists(err): + case err != nil && !apierrors.IsAlreadyExists(err): log.WithError(errors.WithStack(err)).Error("Error syncing pod volume backup into cluster") continue default: @@ -235,10 +234,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) vsClass.ResourceVersion = "" err := b.client.Create(ctx, vsClass, &client.CreateOptions{}) switch { - case err != nil && kuberrs.IsAlreadyExists(err): + case err != nil && apierrors.IsAlreadyExists(err): log.Debugf("VolumeSnapshotClass %s already exists in cluster", vsClass.Name) continue - case err != nil && !kuberrs.IsAlreadyExists(err): + case err != nil && !apierrors.IsAlreadyExists(err): log.WithError(errors.WithStack(err)).Errorf("Error syncing VolumeSnapshotClass %s into cluster", vsClass.Name) continue default: @@ -259,10 +258,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) snapCont.ResourceVersion = "" err := b.client.Create(ctx, snapCont, &client.CreateOptions{}) switch { - case err != nil && kuberrs.IsAlreadyExists(err): + case err != nil && apierrors.IsAlreadyExists(err): log.Debugf("volumesnapshotcontent %s already exists in cluster", snapCont.Name) continue - case err != nil && !kuberrs.IsAlreadyExists(err): + case err != nil && !apierrors.IsAlreadyExists(err): log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotcontent %s into cluster", snapCont.Name) continue default: diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index f0c480716..4a86679c3 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -43,7 +43,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/kube" ) -// For unit test to mock function +// NewUploaderProviderFunc is used for unit test to mock function var NewUploaderProviderFunc = provider.NewUploaderProvider // PodVolumeBackupReconciler reconciles a PodVolumeBackup object diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index 2fad4b84e..a4f4cb36c 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -314,19 +314,19 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve return nil } -func (r *PodVolumeRestoreReconciler) NewRestoreProgressUpdater(pvr *velerov1api.PodVolumeRestore, log logrus.FieldLogger, ctx context.Context) *RestoreProgressUpdater { - return &RestoreProgressUpdater{pvr, log, ctx, r.Client} +func (c *PodVolumeRestoreReconciler) NewRestoreProgressUpdater(pvr *velerov1api.PodVolumeRestore, log logrus.FieldLogger, ctx context.Context) *RestoreProgressUpdater { + return &RestoreProgressUpdater{pvr, log, ctx, c.Client} } // UpdateProgress which implement ProgressUpdater interface to update pvr progress status -func (r *RestoreProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) { - original := r.PodVolumeRestore.DeepCopy() - r.PodVolumeRestore.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone} - if r.Cli == nil { - r.Log.Errorf("failed to update restore pod %s volume %s progress with uninitailize client", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume) +func (c *RestoreProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) { + original := c.PodVolumeRestore.DeepCopy() + c.PodVolumeRestore.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone} + if c.Cli == nil { + c.Log.Errorf("failed to update restore pod %s volume %s progress with uninitailize client", c.PodVolumeRestore.Spec.Pod.Name, c.PodVolumeRestore.Spec.Volume) return } - if err := r.Cli.Patch(r.Ctx, r.PodVolumeRestore, client.MergeFrom(original)); err != nil { - r.Log.Errorf("update restore pod %s volume %s progress with %v", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume, err) + if err := c.Cli.Patch(c.Ctx, c.PodVolumeRestore, client.MergeFrom(original)); err != nil { + c.Log.Errorf("update restore pod %s volume %s progress with %v", c.PodVolumeRestore.Spec.Pod.Name, c.PodVolumeRestore.Spec.Volume, err) } } diff --git a/pkg/controller/schedule_controller.go b/pkg/controller/schedule_controller.go index 577d942e1..d3268399c 100644 --- a/pkg/controller/schedule_controller.go +++ b/pkg/controller/schedule_controller.go @@ -36,7 +36,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/util/kube" - kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" ) const ( @@ -151,7 +150,7 @@ func parseCronSchedule(itm *velerov1.Schedule, logger logrus.FieldLogger) (cron. return nil, validationErrors } - log := logger.WithField("schedule", kubeutil.NamespaceAndName(itm)) + log := logger.WithField("schedule", kube.NamespaceAndName(itm)) // adding a recover() around cron.Parse because it panics on empty string and is possible // that it panics under other scenarios as well. @@ -183,7 +182,7 @@ func parseCronSchedule(itm *velerov1.Schedule, logger logrus.FieldLogger) (cron. // checkIfBackupInNewOrProgress check whether there are backups created by this schedule still in New or InProgress state func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Schedule) bool { - log := c.logger.WithField("schedule", kubeutil.NamespaceAndName(schedule)) + log := c.logger.WithField("schedule", kube.NamespaceAndName(schedule)) backupList := &velerov1.BackupList{} options := &client.ListOptions{ Namespace: schedule.Namespace, @@ -211,7 +210,7 @@ func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Sch // ifDue check whether schedule is due to create a new backup. func (c *scheduleReconciler) ifDue(schedule *velerov1.Schedule, cronSchedule cron.Schedule) bool { isDue, nextRunTime := getNextRunTime(schedule, cronSchedule, c.clock.Now()) - log := c.logger.WithField("schedule", kubeutil.NamespaceAndName(schedule)) + log := c.logger.WithField("schedule", kube.NamespaceAndName(schedule)) if !isDue { log.WithField("nextRunTime", nextRunTime).Debug("Schedule is not due, skipping") diff --git a/pkg/itemoperationmap/backup_operation_map.go b/pkg/itemoperationmap/backup_operation_map.go index f818733e9..b014d7ebd 100644 --- a/pkg/itemoperationmap/backup_operation_map.go +++ b/pkg/itemoperationmap/backup_operation_map.go @@ -122,19 +122,19 @@ type OperationsForBackup struct { ErrsSinceUpdate []string } -func (in *OperationsForBackup) DeepCopy() *OperationsForBackup { - if in == nil { +func (m *OperationsForBackup) DeepCopy() *OperationsForBackup { + if m == nil { return nil } out := new(OperationsForBackup) - in.DeepCopyInto(out) + m.DeepCopyInto(out) return out } -func (in *OperationsForBackup) DeepCopyInto(out *OperationsForBackup) { - *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations +func (m *OperationsForBackup) DeepCopyInto(out *OperationsForBackup) { + *out = *m + if m.Operations != nil { + in, out := &m.Operations, &out.Operations *out = make([]*itemoperation.BackupOperation, len(*in)) for i := range *in { if (*in)[i] != nil { @@ -144,17 +144,17 @@ func (in *OperationsForBackup) DeepCopyInto(out *OperationsForBackup) { } } } - if in.ErrsSinceUpdate != nil { - in, out := &in.ErrsSinceUpdate, &out.ErrsSinceUpdate + if m.ErrsSinceUpdate != nil { + in, out := &m.ErrsSinceUpdate, &out.ErrsSinceUpdate *out = make([]string, len(*in)) copy(*out, *in) } } -func (o *OperationsForBackup) uploadProgress(backupStore persistence.BackupStore, backupName string) error { - if len(o.Operations) > 0 { +func (m *OperationsForBackup) uploadProgress(backupStore persistence.BackupStore, backupName string) error { + if len(m.Operations) > 0 { var backupItemOperations *bytes.Buffer - backupItemOperations, errs := encode.EncodeToJSONGzip(o.Operations, "backup item operations list") + backupItemOperations, errs := encode.EncodeToJSONGzip(m.Operations, "backup item operations list") if errs != nil { return errors.Wrap(errs[0], "error encoding item operations json") } @@ -163,7 +163,7 @@ func (o *OperationsForBackup) uploadProgress(backupStore persistence.BackupStore return errors.Wrap(err, "error uploading item operations json") } } - o.ChangesSinceUpdate = false - o.ErrsSinceUpdate = nil + m.ChangesSinceUpdate = false + m.ErrsSinceUpdate = nil return nil } diff --git a/pkg/itemoperationmap/restore_operation_map.go b/pkg/itemoperationmap/restore_operation_map.go index 743560749..a50e00b01 100644 --- a/pkg/itemoperationmap/restore_operation_map.go +++ b/pkg/itemoperationmap/restore_operation_map.go @@ -122,19 +122,19 @@ type OperationsForRestore struct { ErrsSinceUpdate []string } -func (in *OperationsForRestore) DeepCopy() *OperationsForRestore { - if in == nil { +func (m *OperationsForRestore) DeepCopy() *OperationsForRestore { + if m == nil { return nil } out := new(OperationsForRestore) - in.DeepCopyInto(out) + m.DeepCopyInto(out) return out } -func (in *OperationsForRestore) DeepCopyInto(out *OperationsForRestore) { - *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations +func (m *OperationsForRestore) DeepCopyInto(out *OperationsForRestore) { + *out = *m + if m.Operations != nil { + in, out := &m.Operations, &out.Operations *out = make([]*itemoperation.RestoreOperation, len(*in)) for i := range *in { if (*in)[i] != nil { @@ -144,17 +144,17 @@ func (in *OperationsForRestore) DeepCopyInto(out *OperationsForRestore) { } } } - if in.ErrsSinceUpdate != nil { - in, out := &in.ErrsSinceUpdate, &out.ErrsSinceUpdate + if m.ErrsSinceUpdate != nil { + in, out := &m.ErrsSinceUpdate, &out.ErrsSinceUpdate *out = make([]string, len(*in)) copy(*out, *in) } } -func (o *OperationsForRestore) uploadProgress(backupStore persistence.BackupStore, restoreName string) error { - if len(o.Operations) > 0 { +func (m *OperationsForRestore) uploadProgress(backupStore persistence.BackupStore, restoreName string) error { + if len(m.Operations) > 0 { var restoreItemOperations *bytes.Buffer - restoreItemOperations, errs := encode.EncodeToJSONGzip(o.Operations, "restore item operations list") + restoreItemOperations, errs := encode.EncodeToJSONGzip(m.Operations, "restore item operations list") if errs != nil { return errors.Wrap(errs[0], "error encoding item operations json") } @@ -163,7 +163,7 @@ func (o *OperationsForRestore) uploadProgress(backupStore persistence.BackupStor return errors.Wrap(err, "error uploading item operations json") } } - o.ChangesSinceUpdate = false - o.ErrsSinceUpdate = nil + m.ChangesSinceUpdate = false + m.ErrsSinceUpdate = nil return nil } diff --git a/pkg/plugin/framework/backup_item_action_server.go b/pkg/plugin/framework/backup_item_action_server.go index bde6c2148..6511591a9 100644 --- a/pkg/plugin/framework/backup_item_action_server.go +++ b/pkg/plugin/framework/backup_item_action_server.go @@ -26,7 +26,6 @@ import ( api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" - protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" ) @@ -52,8 +51,8 @@ func (s *BackupItemActionGRPCServer) getImpl(name string) (biav1.BackupItemActio } func (s *BackupItemActionGRPCServer) AppliesTo( - ctx context.Context, req *protobiav1.BackupItemActionAppliesToRequest) ( - response *protobiav1.BackupItemActionAppliesToResponse, err error) { + ctx context.Context, req *proto.BackupItemActionAppliesToRequest) ( + response *proto.BackupItemActionAppliesToResponse, err error) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -70,7 +69,7 @@ func (s *BackupItemActionGRPCServer) AppliesTo( return nil, common.NewGRPCError(err) } - return &protobiav1.BackupItemActionAppliesToResponse{ + return &proto.BackupItemActionAppliesToResponse{ ResourceSelector: &proto.ResourceSelector{ IncludedNamespaces: resourceSelector.IncludedNamespaces, ExcludedNamespaces: resourceSelector.ExcludedNamespaces, @@ -82,7 +81,7 @@ func (s *BackupItemActionGRPCServer) AppliesTo( } func (s *BackupItemActionGRPCServer) Execute( - ctx context.Context, req *protobiav1.ExecuteRequest) (response *protobiav1.ExecuteResponse, err error) { + ctx context.Context, req *proto.ExecuteRequest) (response *proto.ExecuteResponse, err error) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -121,7 +120,7 @@ func (s *BackupItemActionGRPCServer) Execute( } } - res := &protobiav1.ExecuteResponse{ + res := &proto.ExecuteResponse{ Item: updatedItemJSON, } diff --git a/pkg/plugin/framework/backup_item_action_test.go b/pkg/plugin/framework/backup_item_action_test.go index a88aaf853..fb304072e 100644 --- a/pkg/plugin/framework/backup_item_action_test.go +++ b/pkg/plugin/framework/backup_item_action_test.go @@ -31,7 +31,6 @@ import ( v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" - protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" mocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/backupitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -162,7 +161,7 @@ func TestBackupItemActionGRPCServerExecute(t *testing.T) { }, }} - req := &protobiav1.ExecuteRequest{ + req := &proto.ExecuteRequest{ Plugin: "xyz", Item: test.item, Backup: test.backup, diff --git a/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go b/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go index 6bb046cf5..c622490e7 100644 --- a/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go +++ b/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go @@ -216,6 +216,6 @@ func backupResourceIdentifierToProto(id velero.ResourceIdentifier) *proto.Resour // This shouldn't be called on the GRPC server since the server won't ever receive this request, as // the RestartableBackupItemAction in Velero won't delegate this to the server -func (c *BackupItemActionGRPCServer) Name() string { +func (s *BackupItemActionGRPCServer) Name() string { return "" } diff --git a/pkg/plugin/framework/common/plugin_kinds.go b/pkg/plugin/framework/common/plugin_kinds.go index bba4697e0..5cb06b930 100644 --- a/pkg/plugin/framework/common/plugin_kinds.go +++ b/pkg/plugin/framework/common/plugin_kinds.go @@ -51,7 +51,7 @@ const ( PluginKindPluginLister PluginKind = "PluginLister" ) -// If there are plugin kinds that are adaptable to newer API versions, list them here. +// PluginKindsAdaptableTo if there are plugin kinds that are adaptable to newer API versions, list them here. // The older (adaptable) version is the key, and the value is the full list of newer // plugin kinds that are capable of adapting it. var PluginKindsAdaptableTo = map[PluginKind][]PluginKind{ diff --git a/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go b/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go index c13ab3cff..2795d787d 100644 --- a/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go +++ b/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go @@ -266,6 +266,6 @@ func restoreResourceIdentifierToProto(id velero.ResourceIdentifier) *proto.Resou // This shouldn't be called on the GRPC server since the server won't ever receive this request, as // the RestartableRestoreItemAction in Velero won't delegate this to the server -func (c *RestoreItemActionGRPCServer) Name() string { +func (s *RestoreItemActionGRPCServer) Name() string { return "" } diff --git a/pkg/podexec/pod_command_executor_test.go b/pkg/podexec/pod_command_executor_test.go index addd62724..3f6937192 100644 --- a/pkg/podexec/pod_command_executor_test.go +++ b/pkg/podexec/pod_command_executor_test.go @@ -211,11 +211,11 @@ func TestExecutePodCommand(t *testing.T) { defer streamExecutorFactory.AssertExpectations(t) podCommandExecutor.streamExecutorFactory = streamExecutorFactory - baseUrl, _ := url.Parse("https://some.server") + baseURL, _ := url.Parse("https://some.server") contentConfig := rest.ClientContentConfig{ GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, } - poster.On("Post").Return(rest.NewRequestWithClient(baseUrl, "/api/v1", contentConfig, nil)) + poster.On("Post").Return(rest.NewRequestWithClient(baseURL, "/api/v1", contentConfig, nil)) streamExecutor := &mockStreamExecutor{} defer streamExecutor.AssertExpectations(t) diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 9161e0621..65571044f 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -422,7 +422,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr if err != nil { return map[string]string{}, errors.Wrap(err, "error get s3 credentials") } - result[udmrepo.StoreOptionS3KeyId] = credValue.AccessKeyID + result[udmrepo.StoreOptionS3KeyID] = credValue.AccessKeyID result[udmrepo.StoreOptionS3Provider] = credValue.ProviderName result[udmrepo.StoreOptionS3SecretKey] = credValue.SecretAccessKey result[udmrepo.StoreOptionS3Token] = credValue.SessionToken @@ -467,35 +467,35 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo region := config["region"] if backendType == repoconfig.AWSBackend { - s3Url := config["s3Url"] - disableTls := false + s3URL := config["s3Url"] + disableTLS := false var err error - if s3Url == "" { + if s3URL == "" { region, err = getS3BucketRegion(bucket) if err != nil { return map[string]string{}, errors.Wrap(err, "error get s3 bucket region") } - s3Url = fmt.Sprintf("s3-%s.amazonaws.com", region) - disableTls = false + s3URL = fmt.Sprintf("s3-%s.amazonaws.com", region) + disableTLS = false } else { - url, err := url.Parse(s3Url) + url, err := url.Parse(s3URL) if err != nil { - return map[string]string{}, errors.Wrapf(err, "error to parse s3Url %s", s3Url) + return map[string]string{}, errors.Wrapf(err, "error to parse s3Url %s", s3URL) } if url.Path != "" && url.Path != "/" { - return map[string]string{}, errors.Errorf("path is not expected in s3Url %s", s3Url) + return map[string]string{}, errors.Errorf("path is not expected in s3Url %s", s3URL) } - s3Url = url.Host - disableTls = (url.Scheme == "http") + s3URL = url.Host + disableTLS = (url.Scheme == "http") } - result[udmrepo.StoreOptionS3Endpoint] = strings.Trim(s3Url, "/") - result[udmrepo.StoreOptionS3DisableTlsVerify] = config["insecureSkipTLSVerify"] - result[udmrepo.StoreOptionS3DisableTls] = strconv.FormatBool(disableTls) + result[udmrepo.StoreOptionS3Endpoint] = strings.Trim(s3URL, "/") + result[udmrepo.StoreOptionS3DisableTLSVerify] = config["insecureSkipTLSVerify"] + result[udmrepo.StoreOptionS3DisableTLS] = strconv.FormatBool(disableTLS) } else if backendType == repoconfig.AzureBackend { domain, err := getAzureStorageDomain(config) if err != nil { diff --git a/pkg/repository/udmrepo/kopialib/backend/s3.go b/pkg/repository/udmrepo/kopialib/backend/s3.go index 38eeab106..90097c2ea 100644 --- a/pkg/repository/udmrepo/kopialib/backend/s3.go +++ b/pkg/repository/udmrepo/kopialib/backend/s3.go @@ -36,7 +36,7 @@ func (c *S3Backend) Setup(ctx context.Context, flags map[string]string) error { return err } - c.options.AccessKeyID, err = mustHaveString(udmrepo.StoreOptionS3KeyId, flags) + c.options.AccessKeyID, err = mustHaveString(udmrepo.StoreOptionS3KeyID, flags) if err != nil { return err } @@ -49,8 +49,8 @@ func (c *S3Backend) Setup(ctx context.Context, flags map[string]string) error { c.options.Endpoint = optionalHaveString(udmrepo.StoreOptionS3Endpoint, flags) c.options.Region = optionalHaveString(udmrepo.StoreOptionOssRegion, flags) c.options.Prefix = optionalHaveString(udmrepo.StoreOptionPrefix, flags) - c.options.DoNotUseTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTls, flags) - c.options.DoNotVerifyTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTlsVerify, flags) + c.options.DoNotUseTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTLS, flags) + c.options.DoNotVerifyTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTLSVerify, flags) c.options.SessionToken = optionalHaveString(udmrepo.StoreOptionS3Token, flags) c.options.Limits = setupLimits(ctx, flags) diff --git a/pkg/repository/udmrepo/kopialib/backend/s3_test.go b/pkg/repository/udmrepo/kopialib/backend/s3_test.go index 493c1e904..3aa793a71 100644 --- a/pkg/repository/udmrepo/kopialib/backend/s3_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/s3_test.go @@ -41,13 +41,13 @@ func TestS3Setup(t *testing.T) { flags: map[string]string{ udmrepo.StoreOptionOssBucket: "fake-bucket", }, - expectedErr: "key " + udmrepo.StoreOptionS3KeyId + " not found", + expectedErr: "key " + udmrepo.StoreOptionS3KeyID + " not found", }, { name: "must have access key", flags: map[string]string{ udmrepo.StoreOptionOssBucket: "fake-bucket", - udmrepo.StoreOptionS3KeyId: "fake-key-id", + udmrepo.StoreOptionS3KeyID: "fake-key-id", }, expectedErr: "key " + udmrepo.StoreOptionS3SecretKey + " not found", }, diff --git a/pkg/repository/udmrepo/repo_options.go b/pkg/repository/udmrepo/repo_options.go index f4a043ff2..4f1276514 100644 --- a/pkg/repository/udmrepo/repo_options.go +++ b/pkg/repository/udmrepo/repo_options.go @@ -35,13 +35,13 @@ const ( GenOptionOwnerName = "username" GenOptionOwnerDomain = "domainname" - StoreOptionS3KeyId = "accessKeyID" + StoreOptionS3KeyID = "accessKeyID" StoreOptionS3Provider = "providerName" StoreOptionS3SecretKey = "secretAccessKey" StoreOptionS3Token = "sessionToken" StoreOptionS3Endpoint = "endpoint" - StoreOptionS3DisableTls = "doNotUseTLS" - StoreOptionS3DisableTlsVerify = "skipTLSVerify" + StoreOptionS3DisableTLS = "doNotUseTLS" + StoreOptionS3DisableTLSVerify = "skipTLSVerify" StoreOptionAzureKey = "storageKey" StoreOptionAzureDomain = "storageDomain" diff --git a/pkg/restic/exec_commands.go b/pkg/restic/exec_commands.go index 6d9e29d46..b1382710e 100644 --- a/pkg/restic/exec_commands.go +++ b/pkg/restic/exec_commands.go @@ -47,8 +47,8 @@ type backupStatusLine struct { // GetSnapshotID runs provided 'restic snapshots' command to get the ID of a snapshot // and an error if a unique snapshot cannot be identified. -func GetSnapshotID(snapshotIdCmd *Command) (string, error) { - stdout, stderr, err := exec.RunCommand(snapshotIdCmd.Cmd()) +func GetSnapshotID(snapshotIDCmd *Command) (string, error) { + stdout, stderr, err := exec.RunCommand(snapshotIDCmd.Cmd()) if err != nil { return "", errors.Wrapf(err, "error running command, stderr=%s", stderr) } @@ -63,7 +63,7 @@ func GetSnapshotID(snapshotIdCmd *Command) (string, error) { } if len(snapshots) != 1 { - return "", errors.Errorf("expected one matching snapshot by command: %s, got %d", snapshotIdCmd.String(), len(snapshots)) + return "", errors.Errorf("expected one matching snapshot by command: %s, got %d", snapshotIDCmd.String(), len(snapshots)) } return snapshots[0].ShortID, nil diff --git a/pkg/restore/change_image_name_action.go b/pkg/restore/change_image_name_action.go index f6f1834f3..c9eb29e17 100644 --- a/pkg/restore/change_image_name_action.go +++ b/pkg/restore/change_image_name_action.go @@ -33,7 +33,7 @@ import ( ) const ( - DELIMITER_VALUE = "," + delimiterValue = "," ) // ChangeImageNameAction updates a deployment or Pod's image name @@ -194,13 +194,13 @@ func (a *ChangeImageNameAction) isImageReplaceRuleExist(log *logrus.Entry, oldIm //"case3":"abc:test,edf:test" //"case4":"1.1.1.1:5000/abc:test,2.2.2.2:3000/edf:test" for _, row := range cm.Data { - if !strings.Contains(row, DELIMITER_VALUE) { + if !strings.Contains(row, delimiterValue) { continue } - if strings.Contains(oldImageName, strings.TrimSpace(row[0:strings.Index(row, DELIMITER_VALUE)])) && len(row[strings.Index(row, DELIMITER_VALUE):]) > len(DELIMITER_VALUE) { + if strings.Contains(oldImageName, strings.TrimSpace(row[0:strings.Index(row, delimiterValue)])) && len(row[strings.Index(row, delimiterValue):]) > len(delimiterValue) { log.Infoln("match specific case:", row) - oldImagePart := strings.TrimSpace(row[0:strings.Index(row, DELIMITER_VALUE)]) - newImagePart := strings.TrimSpace(row[strings.Index(row, DELIMITER_VALUE)+len(DELIMITER_VALUE):]) + oldImagePart := strings.TrimSpace(row[0:strings.Index(row, delimiterValue)]) + newImagePart := strings.TrimSpace(row[strings.Index(row, delimiterValue)+len(delimiterValue):]) newImageName = strings.Replace(oldImageName, oldImagePart, newImagePart, -1) return true, newImageName, nil } diff --git a/pkg/restore/change_image_name_action_test.go b/pkg/restore/change_image_name_action_test.go index da9ae3ce5..cc2c8ff66 100644 --- a/pkg/restore/change_image_name_action_test.go +++ b/pkg/restore/change_image_name_action_test.go @@ -24,8 +24,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - corev1api "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -43,7 +41,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { tests := []struct { name string podOrObj interface{} - configMap *corev1api.ConfigMap + configMap *corev1.ConfigMap freshedImageName string imageNameSlice []string want interface{} @@ -52,7 +50,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { { name: "a valid mapping with spaces for a new image repository is applied correctly", podOrObj: builder.ForPod("default", "pod1").ObjectMeta(). - Containers(&v1.Container{ + Containers(&corev1.Container{ Name: "container1", Image: "1.1.1.1:5000/abc:test", }).Result(), @@ -67,7 +65,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { { name: "a valid mapping for a new image repository is applied correctly", podOrObj: builder.ForPod("default", "pod1").ObjectMeta(). - Containers(&v1.Container{ + Containers(&corev1.Container{ Name: "container2", Image: "1.1.1.1:5000/abc:test", }).Result(), @@ -82,7 +80,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { { name: "a valid mapping for a new image name is applied correctly", podOrObj: builder.ForPod("default", "pod1").ObjectMeta(). - Containers(&v1.Container{ + Containers(&corev1.Container{ Name: "container3", Image: "1.1.1.1:5000/abc:test", }).Result(), @@ -97,7 +95,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { { name: "a valid mapping for a new image repository port is applied correctly", podOrObj: builder.ForPod("default", "pod1").ObjectMeta(). - Containers(&v1.Container{ + Containers(&corev1.Container{ Name: "container4", Image: "1.1.1.1:5000/abc:test", }).Result(), @@ -112,7 +110,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { { name: "a valid mapping for a new image tag is applied correctly", podOrObj: builder.ForPod("default", "pod1").ObjectMeta(). - Containers(&v1.Container{ + Containers(&corev1.Container{ Name: "container5", Image: "1.1.1.1:5000/abc:test", }).Result(), @@ -127,7 +125,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { { name: "image name contains more than one part that matching the replacing words.", podOrObj: builder.ForPod("default", "pod1").ObjectMeta(). - Containers(&v1.Container{ + Containers(&corev1.Container{ Name: "container6", Image: "dev/image1:dev", }).Result(), diff --git a/pkg/restore/change_pvc_node_selector_test.go b/pkg/restore/change_pvc_node_selector_test.go index a39a9b15e..faca154ae 100644 --- a/pkg/restore/change_pvc_node_selector_test.go +++ b/pkg/restore/change_pvc_node_selector_test.go @@ -167,8 +167,8 @@ func TestChangePVCNodeSelectorActionExecute(t *testing.T) { res, err := a.Execute(input) // Make sure mapped selected-node exists. - log_output := buf.String() - assert.Equal(t, strings.Contains(log_output, "Selected-node's mapped node doesn't exist"), false) + logOutput := buf.String() + assert.Equal(t, strings.Contains(logOutput, "Selected-node's mapped node doesn't exist"), false) // validate for both error and non-error cases switch { diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index afeb26cbb..59e73ee5d 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -66,7 +66,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/collections" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" - . "github.com/vmware-tanzu/velero/pkg/util/results" + "github.com/vmware-tanzu/velero/pkg/util/results" "github.com/vmware-tanzu/velero/pkg/volume" ) @@ -80,12 +80,12 @@ type Restorer interface { Restore(req *Request, actions []riav2.RestoreItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter, - ) (Result, Result) + ) (results.Result, results.Result) RestoreWithResolvers( req *Request, restoreItemActionResolver framework.RestoreItemActionResolverV2, volumeSnapshotterGetter VolumeSnapshotterGetter, - ) (Result, Result) + ) (results.Result, results.Result) } // kubernetesRestorer implements Restorer for restoring into a Kubernetes cluster. @@ -134,11 +134,11 @@ func NewKubernetesRestorer( resourcePriorities: resourcePriorities, logger: logger, pvRenamer: func(string) (string, error) { - veleroCloneUuid, err := uuid.NewRandom() + veleroCloneUUID, err := uuid.NewRandom() if err != nil { return "", errors.WithStack(err) } - veleroCloneName := "velero-clone-" + veleroCloneUuid.String() + veleroCloneName := "velero-clone-" + veleroCloneUUID.String() return veleroCloneName, nil }, fileSystem: filesystem.NewFileSystem(), @@ -156,7 +156,7 @@ func (kr *kubernetesRestorer) Restore( req *Request, actions []riav2.RestoreItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter, -) (Result, Result) { +) (results.Result, results.Result) { resolver := framework.NewRestoreItemActionResolverV2(actions) return kr.RestoreWithResolvers(req, resolver, volumeSnapshotterGetter) } @@ -165,7 +165,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( req *Request, restoreItemActionResolver framework.RestoreItemActionResolverV2, volumeSnapshotterGetter VolumeSnapshotterGetter, -) (Result, Result) { +) (results.Result, results.Result) { // metav1.LabelSelectorAsSelector converts a nil LabelSelector to a // Nothing Selector, i.e. a selector that matches nothing. We want // a selector that matches everything. This can be accomplished by @@ -180,7 +180,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( for _, s := range req.Restore.Spec.OrLabelSelectors { labelAsSelector, err := metav1.LabelSelectorAsSelector(s) if err != nil { - return Result{}, Result{Velero: []string{err.Error()}} + return results.Result{}, results.Result{Velero: []string{err.Error()}} } OrSelectors = append(OrSelectors, labelAsSelector) } @@ -188,7 +188,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( selector, err := metav1.LabelSelectorAsSelector(ls) if err != nil { - return Result{}, Result{Velero: []string{err.Error()}} + return results.Result{}, results.Result{Velero: []string{err.Error()}} } // Get resource includes-excludes. @@ -216,7 +216,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( resolvedActions, err := restoreItemActionResolver.ResolveActions(kr.discoveryHelper, kr.logger) if err != nil { - return Result{}, Result{Velero: []string{err.Error()}} + return results.Result{}, results.Result{Velero: []string{err.Error()}} } podVolumeTimeout := kr.podVolumeTimeout @@ -239,13 +239,13 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( if kr.podVolumeRestorerFactory != nil { podVolumeRestorer, err = kr.podVolumeRestorerFactory.NewRestorer(ctx, req.Restore) if err != nil { - return Result{}, Result{Velero: []string{err.Error()}} + return results.Result{}, results.Result{Velero: []string{err.Error()}} } } resourceRestoreHooks, err := hook.GetRestoreHooksFromSpec(&req.Restore.Spec.Hooks) if err != nil { - return Result{}, Result{Velero: []string{err.Error()}} + return results.Result{}, results.Result{Velero: []string{err.Error()}} } hooksCtx, hooksCancelFunc := go_context.WithCancel(go_context.Background()) waitExecHookHandler := &hook.DefaultWaitExecHookHandler{ @@ -390,8 +390,8 @@ type progressUpdate struct { totalItems, itemsRestored int } -func (ctx *restoreContext) execute() (Result, Result) { - warnings, errs := Result{}, Result{} +func (ctx *restoreContext) execute() (results.Result, results.Result) { + warnings, errs := results.Result{}, results.Result{} ctx.log.Infof("Starting restore of backup %s", kube.NamespaceAndName(ctx.backup)) @@ -482,7 +482,7 @@ func (ctx *restoreContext) execute() (Result, Result) { } for _, selectedResource := range crdResourceCollection { - var w, e Result + var w, e results.Result // Restore this resource processedItems, w, e = ctx.processSelectedResource( selectedResource, @@ -514,7 +514,7 @@ func (ctx *restoreContext) execute() (Result, Result) { } for _, selectedResource := range selectedResourceCollection { - var w, e Result + var w, e results.Result // Restore this resource processedItems, w, e = ctx.processSelectedResource( selectedResource, @@ -590,8 +590,8 @@ func (ctx *restoreContext) processSelectedResource( processedItems int, existingNamespaces sets.String, update chan progressUpdate, -) (int, Result, Result) { - warnings, errs := Result{}, Result{} +) (int, results.Result, results.Result) { + warnings, errs := results.Result{}, results.Result{} groupResource := schema.ParseGroupResource(selectedResource.resource) for namespace, selectedItems := range selectedResource.selectedItemsByNamespace { @@ -939,8 +939,8 @@ func getResourceID(groupResource schema.GroupResource, namespace, name string) s return fmt.Sprintf("%s/%s/%s", groupResource.String(), namespace, name) } -func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (Result, Result, bool) { - warnings, errs := Result{}, Result{} +func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (results.Result, results.Result, bool) { + warnings, errs := results.Result{}, results.Result{} // itemExists bool is used to determine whether to include this item in the "wait for additional items" list itemExists := false resourceID := getResourceID(groupResource, namespace, obj.GetName()) @@ -1281,7 +1281,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { errs.Add(namespace, errors.Wrapf(err, "error verifying additional items are ready to use")) } else if !available { - errs.Add(namespace, fmt.Errorf("Additional items for %s are not ready to use.", resourceID)) + errs.Add(namespace, fmt.Errorf("additional items for %s are not ready to use", resourceID)) } } @@ -1548,7 +1548,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { errs.Add(namespace, errors.Wrapf(err, "error verifying custom resource definition is ready to use")) } else if !available { - errs.Add(namespace, fmt.Errorf("CRD %s is not available to use for custom resources.", name)) + errs.Add(namespace, fmt.Errorf("the CRD %s is not available to use for custom resources", name)) } } @@ -1911,8 +1911,8 @@ func (ctx *restoreContext) getOrderedResourceCollection( processedResources sets.String, resourcePriorities Priorities, includeAllResources bool, -) ([]restoreableResource, sets.String, Result, Result) { - var warnings, errs Result +) ([]restoreableResource, sets.String, results.Result, results.Result) { + var warnings, errs results.Result // Iterate through an ordered list of resources to restore, checking each // one to see if it should be restored. Note that resources *may* be in this // list twice, i.e. once due to being a prioritized resource, and once due @@ -2012,8 +2012,8 @@ func (ctx *restoreContext) getOrderedResourceCollection( // getSelectedRestoreableItems applies Kubernetes selectors on individual items // of each resource type to create a list of items which will be actually // restored. -func (ctx *restoreContext) getSelectedRestoreableItems(resource, targetNamespace, originalNamespace string, items []string) (restoreableResource, Result, Result) { - warnings, errs := Result{}, Result{} +func (ctx *restoreContext) getSelectedRestoreableItems(resource, targetNamespace, originalNamespace string, items []string) (restoreableResource, results.Result, results.Result) { + warnings, errs := results.Result{}, results.Result{} restorable := restoreableResource{ resource: resource, @@ -2112,7 +2112,7 @@ func removeRestoreLabels(obj metav1.Object) { } // updates the backup/restore labels -func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWithLabels *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs Result) { +func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWithLabels *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs results.Result) { patchBytes, err := generatePatch(fromCluster, fromClusterWithLabels) if err != nil { ctx.log.Errorf("error generating patch for %s %s: %v", fromCluster.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster), err) @@ -2140,7 +2140,7 @@ func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWit // function to process existingResourcePolicy as update, tries to patch the diff between in-cluster and restore obj first // if the patch fails then tries to update the backup/restore labels for the in-cluster version -func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterWithLabels, obj *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs Result) { +func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterWithLabels, obj *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs results.Result) { ctx.log.Infof("restore API has existingResourcePolicy defined as update , executing restore workflow accordingly for changed resource %s %s ", obj.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster)) ctx.log.Infof("attempting patch on %s %q", fromCluster.GetKind(), fromCluster.GetName()) // remove restore labels so that we apply the latest backup/restore names on the object via patch diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 2b8957c3f..f41ce759f 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -53,9 +53,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/podvolume" uploadermocks "github.com/vmware-tanzu/velero/pkg/podvolume/mocks" "github.com/vmware-tanzu/velero/pkg/test" - testutil "github.com/vmware-tanzu/velero/pkg/test" - velerotest "github.com/vmware-tanzu/velero/pkg/test" - "github.com/vmware-tanzu/velero/pkg/util/kube" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" . "github.com/vmware-tanzu/velero/pkg/util/results" "github.com/vmware-tanzu/velero/pkg/volume" @@ -3203,13 +3200,13 @@ func TestIsCompleted(t *testing.T) { groupResource: schema.GroupResource{Group: "", Resource: "namespaces"}, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - u := testutil.UnstructuredOrDie(test.content) - backup, err := isCompleted(u, test.groupResource) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + u := test.UnstructuredOrDie(tt.content) + backup, err := isCompleted(u, tt.groupResource) - if assert.Equal(t, test.expectedErr, err != nil) { - assert.Equal(t, test.expected, backup) + if assert.Equal(t, tt.expectedErr, err != nil) { + assert.Equal(t, tt.expected, backup) } }) } @@ -3392,7 +3389,7 @@ func newHarness(t *testing.T) *harness { apiServer := test.NewAPIServer(t) log := logrus.StandardLogger() - kbClient := velerotest.NewFakeControllerRuntimeClient(t) + kbClient := test.NewFakeControllerRuntimeClient(t) discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, log) require.NoError(t, err) @@ -3405,7 +3402,7 @@ func newHarness(t *testing.T) *harness { namespaceClient: apiServer.KubeClient.CoreV1().Namespaces(), resourceTerminatingTimeout: time.Minute, logger: log, - fileSystem: testutil.NewFakeFileSystem(), + fileSystem: test.NewFakeFileSystem(), // unsupported podVolumeRestorerFactory: nil, @@ -3452,9 +3449,9 @@ func Test_resetVolumeBindingInfo(t *testing.T) { name: "PVs that are bound have their binding and dynamic provisioning annotations removed", obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolume"). WithName("pv-1").WithAnnotations( - kube.KubeAnnBindCompleted, - kube.KubeAnnBoundByController, - kube.KubeAnnDynamicallyProvisioned, + kubeutil.KubeAnnBindCompleted, + kubeutil.KubeAnnBoundByController, + kubeutil.KubeAnnDynamicallyProvisioned, ).WithSpecField("claimRef", map[string]interface{}{ "namespace": "ns-1", "name": "pvc-1", @@ -3462,7 +3459,7 @@ func Test_resetVolumeBindingInfo(t *testing.T) { "resourceVersion": "1"}).Unstructured, expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolume"). WithName("pv-1"). - WithAnnotations(kube.KubeAnnDynamicallyProvisioned). + WithAnnotations(kubeutil.KubeAnnDynamicallyProvisioned). WithSpecField("claimRef", map[string]interface{}{ "namespace": "ns-1", "name": "pvc-1"}).Unstructured, }, @@ -3470,8 +3467,8 @@ func Test_resetVolumeBindingInfo(t *testing.T) { name: "PVCs that are bound have their binding annotations removed, but the volume name stays", obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim"). WithName("pvc-1").WithAnnotations( - kube.KubeAnnBindCompleted, - kube.KubeAnnBoundByController, + kubeutil.KubeAnnBindCompleted, + kubeutil.KubeAnnBoundByController, ).WithSpecField("volumeName", "pv-1").Unstructured, expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim"). WithName("pvc-1").WithAnnotations(). diff --git a/pkg/test/fake_discovery_helper.go b/pkg/test/fake_discovery_helper.go index 7be1ef3dc..79b3de7f2 100644 --- a/pkg/test/fake_discovery_helper.go +++ b/pkg/test/fake_discovery_helper.go @@ -149,7 +149,7 @@ func (dh *FakeDiscoveryHelper) APIGroups() []metav1.APIGroup { type FakeServerResourcesInterface struct { ResourceList []*metav1.APIResourceList - ApiGroup []*metav1.APIGroup + APIGroup []*metav1.APIGroup FailedGroups map[schema.GroupVersion]error ReturnError error } @@ -166,18 +166,18 @@ func (di *FakeServerResourcesInterface) ServerPreferredResources() ([]*metav1.AP func (di *FakeServerResourcesInterface) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { if di.ReturnError != nil { - return di.ApiGroup, di.ResourceList, di.ReturnError + return di.APIGroup, di.ResourceList, di.ReturnError } if di.FailedGroups == nil || len(di.FailedGroups) == 0 { - return di.ApiGroup, di.ResourceList, nil + return di.APIGroup, di.ResourceList, nil } - return di.ApiGroup, di.ResourceList, &discovery.ErrGroupDiscoveryFailed{Groups: di.FailedGroups} + return di.APIGroup, di.ResourceList, &discovery.ErrGroupDiscoveryFailed{Groups: di.FailedGroups} } func NewFakeServerResourcesInterface(resourceList []*metav1.APIResourceList, apiGroup []*metav1.APIGroup, failedGroups map[schema.GroupVersion]error, returnError error) *FakeServerResourcesInterface { helper := &FakeServerResourcesInterface{ ResourceList: resourceList, - ApiGroup: apiGroup, + APIGroup: apiGroup, FailedGroups: failedGroups, ReturnError: returnError, } diff --git a/pkg/uploader/provider/restic.go b/pkg/uploader/provider/restic.go index 70ae4823c..ce8ec4914 100644 --- a/pkg/uploader/provider/restic.go +++ b/pkg/uploader/provider/restic.go @@ -33,7 +33,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) -// mainly used to make testing more convenient +// ResticBackupCMDFunc and ResticRestoreCMDFunc are mainly used to make testing more convenient var ResticBackupCMDFunc = restic.BackupCommand var ResticRestoreCMDFunc = restic.RestoreCommand @@ -144,13 +144,13 @@ func (rp *resticProvider) RunBackup( return "", false, errors.WithStack(fmt.Errorf("error running restic backup command %s with error: %v stderr: %v", backupCmd.String(), err, stderrBuf)) } // GetSnapshotID - snapshotIdCmd := restic.GetSnapshotCommand(rp.repoIdentifier, rp.credentialsFile, tags) - snapshotIdCmd.Env = rp.cmdEnv - snapshotIdCmd.CACertFile = rp.caCertFile + snapshotIDCmd := restic.GetSnapshotCommand(rp.repoIdentifier, rp.credentialsFile, tags) + snapshotIDCmd.Env = rp.cmdEnv + snapshotIDCmd.CACertFile = rp.caCertFile if len(rp.extraFlags) != 0 { - snapshotIdCmd.ExtraFlags = append(snapshotIdCmd.ExtraFlags, rp.extraFlags...) + snapshotIDCmd.ExtraFlags = append(snapshotIDCmd.ExtraFlags, rp.extraFlags...) } - snapshotID, err := restic.GetSnapshotID(snapshotIdCmd) + snapshotID, err := restic.GetSnapshotID(snapshotIDCmd) if err != nil { return "", false, errors.WithStack(fmt.Errorf("error getting snapshot id with error: %v", err)) } From d928124b0176205ab2b66b691a4935c99593ab12 Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Tue, 25 Apr 2023 12:20:09 +0800 Subject: [PATCH 10/10] fix issue 6182 Signed-off-by: Lyndon-Li --- changelogs/unreleased/6184-Lyndon-Li | 1 + pkg/podvolume/backupper.go | 25 +++++++++++++++---------- 2 files changed, 16 insertions(+), 10 deletions(-) create mode 100644 changelogs/unreleased/6184-Lyndon-Li diff --git a/changelogs/unreleased/6184-Lyndon-Li b/changelogs/unreleased/6184-Lyndon-Li new file mode 100644 index 000000000..fb3999f08 --- /dev/null +++ b/changelogs/unreleased/6184-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #6182. If pod is not running, don't treat it as an error, let it go and leave a warning. \ No newline at end of file diff --git a/pkg/podvolume/backupper.go b/pkg/podvolume/backupper.go index cc62aaaca..641c41f28 100644 --- a/pkg/podvolume/backupper.go +++ b/pkg/podvolume/backupper.go @@ -132,6 +132,21 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. return nil, nil } + err := kube.IsPodRunning(pod) + if err != nil { + for _, volumeName := range volumesToBackup { + err = errors.Wrapf(err, "backup for volume %s is skipped", volumeName) + log.WithError(err).Warn("Skip pod volume") + } + + return nil, nil + } + + err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient) + if err != nil { + return nil, []error{err} + } + repositoryType := getRepositoryType(b.uploaderType) if repositoryType == "" { err := errors.Errorf("empty repository type, uploader %s", b.uploaderType) @@ -143,16 +158,6 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. return nil, []error{err} } - err = kube.IsPodRunning(pod) - if err != nil { - return nil, []error{err} - } - - err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient) - if err != nil { - return nil, []error{err} - } - // get a single non-exclusive lock since we'll wait for all individual // backups to be complete before releasing it. b.repoLocker.Lock(repo.Name)