diff --git a/Dockerfile b/Dockerfile index 46238a9cf..94d7ceb9c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,7 @@ # limitations under the License. # Velero binary build section -FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS velero-builder +FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder ARG GOPROXY ARG BIN @@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \ go clean -modcache -cache # Restic binary build section -FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS restic-builder +FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS restic-builder ARG GOPROXY ARG BIN diff --git a/Dockerfile-Windows b/Dockerfile-Windows index 34ccc3b62..d20ab7748 100644 --- a/Dockerfile-Windows +++ b/Dockerfile-Windows @@ -15,7 +15,7 @@ ARG OS_VERSION=1809 # Velero binary build section -FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS velero-builder +FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder ARG GOPROXY ARG BIN diff --git a/Tiltfile b/Tiltfile index 39de3b43e..fd0879cd4 100644 --- a/Tiltfile +++ b/Tiltfile @@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip( tilt_helper_dockerfile_header = """ # Tilt image -FROM golang:1.23 as tilt-helper +FROM golang:1.24 as tilt-helper # Support live reloading with Tilt RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \ diff --git a/go.mod b/go.mod index 60913d171..733ebbbed 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/vmware-tanzu/velero -go 1.24 - -toolchain go1.24.5 +go 1.24.0 require ( cloud.google.com/go/storage v1.55.0 diff --git a/hack/build-image/Dockerfile b/hack/build-image/Dockerfile index 904b6dabe..9e8153e73 100644 --- a/hack/build-image/Dockerfile +++ b/hack/build-image/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM --platform=$TARGETPLATFORM golang:1.23-bookworm +FROM --platform=$TARGETPLATFORM golang:1.24-bookworm ARG GOPROXY diff --git a/internal/credentials/file_store_test.go b/internal/credentials/file_store_test.go index 374879c0d..d748ca29e 100644 --- a/internal/credentials/file_store_test.go +++ b/internal/credentials/file_store_test.go @@ -17,7 +17,6 @@ limitations under the License. package credentials import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -68,7 +67,7 @@ func TestNamespacedFileStore(t *testing.T) { client := velerotest.NewFakeControllerRuntimeClient(t) for _, secret := range tc.secrets { - require.NoError(t, client.Create(context.Background(), secret)) + require.NoError(t, client.Create(t.Context(), secret)) } fs := velerotest.NewFakeFileSystem() diff --git a/internal/delete/actions/csi/volumesnapshotcontent_action_test.go b/internal/delete/actions/csi/volumesnapshotcontent_action_test.go index 30ab85828..1c84cf7bf 100644 --- a/internal/delete/actions/csi/volumesnapshotcontent_action_test.go +++ b/internal/delete/actions/csi/volumesnapshotcontent_action_test.go @@ -194,13 +194,12 @@ func TestCheckVSCReadiness(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() crClient := velerotest.NewFakeControllerRuntimeClient(t) if test.createVSC { - require.NoError(t, crClient.Create(ctx, test.vsc)) + require.NoError(t, crClient.Create(t.Context(), test.vsc)) } - ready, err := checkVSCReadiness(ctx, test.vsc, crClient) + ready, err := checkVSCReadiness(t.Context(), test.vsc, crClient) require.Equal(t, test.ready, ready) if test.expectErr { require.Error(t, err) diff --git a/internal/delete/delete_item_action_handler_test.go b/internal/delete/delete_item_action_handler_test.go index a71de864d..6743cd1f9 100644 --- a/internal/delete/delete_item_action_handler_test.go +++ b/internal/delete/delete_item_action_handler_test.go @@ -17,7 +17,6 @@ limitations under the License. package delete import ( - "context" "io" "sort" "testing" @@ -221,9 +220,9 @@ func (h *harness) addResource(t *testing.T, resource *test.APIResource) { unstructuredObj := &unstructured.Unstructured{Object: obj} if resource.Namespaced { - _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{}) + _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(t.Context(), unstructuredObj, metav1.CreateOptions{}) } else { - _, err = h.DynamicClient.Resource(resource.GVR()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{}) + _, err = h.DynamicClient.Resource(resource.GVR()).Create(t.Context(), unstructuredObj, metav1.CreateOptions{}) } require.NoError(t, err) } diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index 8fccca5db..7f549ba32 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -736,7 +736,7 @@ func TestWaitExecHandleHooks(t *testing.T) { podCommandExecutor.On("ExecutePodCommand", mock.Anything, obj, e.pod.Namespace, e.pod.Name, e.name, e.hook).Return(e.error) } - ctx := context.Background() + ctx := t.Context() if test.sharedHooksContextTimeout > 0 { var ctxCancel context.CancelFunc ctx, ctxCancel = context.WithTimeout(ctx, test.sharedHooksContextTimeout) @@ -1268,7 +1268,7 @@ func TestRestoreHookTrackerUpdate(t *testing.T) { podCommandExecutor.On("ExecutePodCommand", mock.Anything, obj, e.pod.Namespace, e.pod.Name, e.name, e.hook).Return(e.error) } - ctx := context.Background() + ctx := t.Context() _ = h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer, test.hookTracker, "restore1") _, actualFailed := test.hookTracker.Stat("restore1") assert.Equal(t, test.expectedFailed, actualFailed) diff --git a/internal/storage/storagelocation_test.go b/internal/storage/storagelocation_test.go index 285c5db60..44eabff48 100644 --- a/internal/storage/storagelocation_test.go +++ b/internal/storage/storagelocation_test.go @@ -16,7 +16,6 @@ limitations under the License. package storage import ( - "context" "testing" "time" @@ -165,10 +164,10 @@ func TestListBackupStorageLocations(t *testing.T) { client := fake.NewClientBuilder().WithScheme(util.VeleroScheme).WithRuntimeObjects(tt.backupLocations).Build() if tt.expectError { - _, err := ListBackupStorageLocations(context.Background(), client, "ns-1") + _, err := ListBackupStorageLocations(t.Context(), client, "ns-1") g.Expect(err).To(HaveOccurred()) } else { - _, err := ListBackupStorageLocations(context.Background(), client, "ns-1") + _, err := ListBackupStorageLocations(t.Context(), client, "ns-1") g.Expect(err).ToNot(HaveOccurred()) } }) diff --git a/internal/volume/volumes_information_test.go b/internal/volume/volumes_information_test.go index d8e7a82be..b39ba82f8 100644 --- a/internal/volume/volumes_information_test.go +++ b/internal/volume/volumes_information_test.go @@ -17,7 +17,6 @@ limitations under the License. package volume import ( - "context" "sync" "testing" @@ -738,7 +737,7 @@ func TestGenerateVolumeInfoFromPVB(t *testing.T) { } } if tc.pod != nil { - require.NoError(t, volumesInfo.crClient.Create(context.TODO(), tc.pod)) + require.NoError(t, volumesInfo.crClient.Create(t.Context(), tc.pod)) } volumesInfo.logger = logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) diff --git a/internal/volumehelper/volume_policy_helper_test.go b/internal/volumehelper/volume_policy_helper_test.go index e6837f3f6..7dd8ae361 100644 --- a/internal/volumehelper/volume_policy_helper_test.go +++ b/internal/volumehelper/volume_policy_helper_test.go @@ -17,7 +17,6 @@ limitations under the License. package volumehelper import ( - "context" "testing" "github.com/sirupsen/logrus" @@ -311,7 +310,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) { t.Run(tc.name, func(t *testing.T) { fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...) if tc.pod != nil { - fakeClient.Create(context.Background(), tc.pod) + fakeClient.Create(t.Context(), tc.pod) } var p *resourcepolicies.Policies @@ -676,7 +675,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) { t.Run(tc.name, func(t *testing.T) { fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.resources...) if tc.pod != nil { - fakeClient.Create(context.Background(), tc.pod) + fakeClient.Create(t.Context(), tc.pod) } var p *resourcepolicies.Policies diff --git a/pkg/backup/actions/csi/pvc_action_test.go b/pkg/backup/actions/csi/pvc_action_test.go index e07d724ac..051c0174c 100644 --- a/pkg/backup/actions/csi/pvc_action_test.go +++ b/pkg/backup/actions/csi/pvc_action_test.go @@ -186,7 +186,7 @@ func TestExecute(t *testing.T) { if boolptr.IsSetToTrue(tc.backup.Spec.SnapshotMoveData) == true { go func() { var vsList snapshotv1api.VolumeSnapshotList - err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 10*time.Second, true, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 10*time.Second, true, func(ctx context.Context) (bool, error) { err = pvcBIA.crClient.List(ctx, &vsList, &crclient.ListOptions{Namespace: tc.pvc.Namespace}) require.NoError(t, err) @@ -204,12 +204,12 @@ func TestExecute(t *testing.T) { BoundVolumeSnapshotContentName: &vscName, ReadyToUse: &readyToUse, } - err = pvcBIA.crClient.Update(context.Background(), &vsList.Items[0]) + err = pvcBIA.crClient.Update(t.Context(), &vsList.Items[0]) require.NoError(t, err) handleName := "testHandle" vsc := builder.ForVolumeSnapshotContent("testVSC").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &handleName}).Result() - err = pvcBIA.crClient.Create(context.Background(), vsc) + err = pvcBIA.crClient.Create(t.Context(), vsc) require.NoError(t, err) }() } @@ -223,7 +223,7 @@ func TestExecute(t *testing.T) { if tc.expectedDataUpload != nil { dataUploadList := new(velerov2alpha1.DataUploadList) - err := crClient.List(context.Background(), dataUploadList, &crclient.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{velerov1api.BackupNameLabel: tc.backup.Name})}) + err := crClient.List(t.Context(), dataUploadList, &crclient.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{velerov1api.BackupNameLabel: tc.backup.Name})}) require.NoError(t, err) require.Len(t, dataUploadList.Items, 1) require.True(t, cmp.Equal(tc.expectedDataUpload, &dataUploadList.Items[0], cmpopts.IgnoreFields(velerov2alpha1.DataUpload{}, "ResourceVersion", "Name", "Spec.CSISnapshot.VolumeSnapshot"))) @@ -306,7 +306,7 @@ func TestProgress(t *testing.T) { } if tc.dataUpload != nil { - err := crClient.Create(context.Background(), tc.dataUpload) + err := crClient.Create(t.Context(), tc.dataUpload) require.NoError(t, err) } @@ -375,7 +375,7 @@ func TestCancel(t *testing.T) { crClient: crClient, } - err := crClient.Create(context.Background(), &tc.dataUpload) + err := crClient.Create(t.Context(), &tc.dataUpload) require.NoError(t, err) err = pvcBIA.Cancel(tc.operationID, tc.backup) @@ -384,7 +384,7 @@ func TestCancel(t *testing.T) { } du := new(velerov2alpha1.DataUpload) - err = crClient.Get(context.Background(), crclient.ObjectKey{Namespace: tc.dataUpload.Namespace, Name: tc.dataUpload.Name}, du) + err = crClient.Get(t.Context(), crclient.ObjectKey{Namespace: tc.dataUpload.Namespace, Name: tc.dataUpload.Name}, du) require.NoError(t, err) require.True(t, cmp.Equal(tc.expectedDataUpload, *du, cmpopts.IgnoreFields(velerov2alpha1.DataUpload{}, "ResourceVersion"))) @@ -526,7 +526,7 @@ func TestListGroupedPVCs(t *testing.T) { crClient: client, } - result, err := action.listGroupedPVCs(context.TODO(), tt.namespace, tt.labelKey, tt.groupValue) + result, err := action.listGroupedPVCs(t.Context(), tt.namespace, tt.labelKey, tt.groupValue) if tt.expectError { require.Error(t, err) } else { @@ -774,7 +774,7 @@ func TestDetermineVGSClass(t *testing.T) { action := &pvcBackupItemAction{crClient: client, log: logger} - result, err := action.determineVGSClass(context.TODO(), testDriver, tt.backup, tt.pvc) + result, err := action.determineVGSClass(t.Context(), testDriver, tt.backup, tt.pvc) if tt.expectError { require.Error(t, err) @@ -814,7 +814,7 @@ func TestCreateVolumeGroupSnapshot(t *testing.T) { crClient: crClient, } - vgs, err := action.createVolumeGroupSnapshot(context.TODO(), testBackup, testPVC, testLabelKey, testLabelValue, testVGSClass) + vgs, err := action.createVolumeGroupSnapshot(t.Context(), testBackup, testPVC, testLabelKey, testLabelValue, testVGSClass) require.NoError(t, err) require.NotNil(t, vgs) @@ -830,7 +830,7 @@ func TestCreateVolumeGroupSnapshot(t *testing.T) { // Check that it exists in fake client retrieved := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{} - err = crClient.Get(context.TODO(), crclient.ObjectKey{Name: vgs.Name, Namespace: vgs.Namespace}, retrieved) + err = crClient.Get(t.Context(), crclient.ObjectKey{Name: vgs.Name, Namespace: vgs.Namespace}, retrieved) require.NoError(t, err) } @@ -969,7 +969,7 @@ func TestWaitForVGSAssociatedVS(t *testing.T) { crClient: client, } - vsMap, err := action.waitForVGSAssociatedVS(context.TODO(), tt.groupedPVCs, vgs, 2*time.Second) + vsMap, err := action.waitForVGSAssociatedVS(t.Context(), tt.groupedPVCs, vgs, 2*time.Second) if tt.expectErr { if err == nil { @@ -1073,12 +1073,12 @@ func TestUpdateVGSCreatedVS(t *testing.T) { *tt.vs.Spec.Source.PersistentVolumeClaimName: tt.vs, } - err := action.updateVGSCreatedVS(context.TODO(), vsMap, vgs, backup) + err := action.updateVGSCreatedVS(t.Context(), vsMap, vgs, backup) require.NoError(t, err) // Fetch updated VS updated := &snapshotv1api.VolumeSnapshot{} - err = client.Get(context.TODO(), crclient.ObjectKey{Name: tt.vs.Name, Namespace: tt.vs.Namespace}, updated) + err = client.Get(t.Context(), crclient.ObjectKey{Name: tt.vs.Name, Namespace: tt.vs.Namespace}, updated) require.NoError(t, err) if tt.expectOwnerCleared { @@ -1149,7 +1149,7 @@ func TestPatchVGSCDeletionPolicy(t *testing.T) { crClient: client, } - err := action.patchVGSCDeletionPolicy(context.TODO(), vgs) + err := action.patchVGSCDeletionPolicy(t.Context(), vgs) if tt.expectErr { require.Error(t, err) return @@ -1157,7 +1157,7 @@ func TestPatchVGSCDeletionPolicy(t *testing.T) { require.NoError(t, err) updated := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{} - err = client.Get(context.TODO(), crclient.ObjectKey{Name: "test-vgsc"}, updated) + err = client.Get(t.Context(), crclient.ObjectKey{Name: "test-vgsc"}, updated) require.NoError(t, err) require.Equal(t, tt.expectedPolicy, updated.Spec.DeletionPolicy) }) @@ -1229,19 +1229,19 @@ func TestDeleteVGSAndVGSC(t *testing.T) { crClient: client, } - err := action.deleteVGSAndVGSC(context.TODO(), tt.vgs) + err := action.deleteVGSAndVGSC(t.Context(), tt.vgs) require.NoError(t, err) // Check VGSC is deleted if tt.expectVGSCDelete { got := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{} - err = client.Get(context.TODO(), crclient.ObjectKey{Name: "test-vgsc"}, got) + err = client.Get(t.Context(), crclient.ObjectKey{Name: "test-vgsc"}, got) assert.True(t, apierrors.IsNotFound(err), "expected VGSC to be deleted") } // Check VGS is deleted gotVGS := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{} - err = client.Get(context.TODO(), crclient.ObjectKey{Name: "test-vgs", Namespace: "ns"}, gotVGS) + err = client.Get(t.Context(), crclient.ObjectKey{Name: "test-vgs", Namespace: "ns"}, gotVGS) assert.True(t, apierrors.IsNotFound(err), "expected VGS to be deleted") }) } @@ -1321,7 +1321,7 @@ func TestFindExistingVSForBackup(t *testing.T) { crClient: client, } - vs, err := action.findExistingVSForBackup(context.TODO(), backupUID, backupName, pvcName, namespace) + vs, err := action.findExistingVSForBackup(t.Context(), backupUID, backupName, pvcName, namespace) require.NoError(t, err) if tt.expectNil { @@ -1377,7 +1377,7 @@ func TestWaitForVGSCBinding(t *testing.T) { crClient: client, } - err := action.waitForVGSCBinding(context.TODO(), tt.vgs, 1*time.Second) + err := action.waitForVGSCBinding(t.Context(), tt.vgs, 1*time.Second) if tt.expectErr { require.Error(t, err) @@ -1454,7 +1454,7 @@ func TestGetVGSByLabels(t *testing.T) { crClient: client, } - vgs, err := action.getVGSByLabels(context.TODO(), "test-ns", testLabels) + vgs, err := action.getVGSByLabels(t.Context(), "test-ns", testLabels) if tt.expectError != "" { if err == nil || !strings.Contains(err.Error(), tt.expectError) { diff --git a/pkg/backup/actions/csi/volumesnapshot_action_test.go b/pkg/backup/actions/csi/volumesnapshot_action_test.go index 793466b99..381daa2ee 100644 --- a/pkg/backup/actions/csi/volumesnapshot_action_test.go +++ b/pkg/backup/actions/csi/volumesnapshot_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package csi import ( - "context" "fmt" "testing" @@ -132,7 +131,7 @@ func TestVSExecute(t *testing.T) { require.NoError(t, err) if tc.vsc != nil { - require.NoError(t, vsBIA.crClient.Create(context.TODO(), tc.vsc)) + require.NoError(t, vsBIA.crClient.Create(t.Context(), tc.vsc)) } _, additionalItems, _, itemToUpdate, err := vsBIA.Execute(&unstructured.UnstructuredList{Object: item}, tc.backup) @@ -262,12 +261,12 @@ func TestVSProgress(t *testing.T) { } if tc.vs != nil { - err := crClient.Create(context.Background(), tc.vs) + err := crClient.Create(t.Context(), tc.vs) require.NoError(t, err) } if tc.vsc != nil { - require.NoError(t, crClient.Create(context.TODO(), tc.vsc)) + require.NoError(t, crClient.Create(t.Context(), tc.vsc)) } progress, err := vsBIA.Progress(tc.operationID, tc.backup) diff --git a/pkg/backup/actions/remap_crd_version_action_test.go b/pkg/backup/actions/remap_crd_version_action_test.go index cd7296be1..8f21fb151 100644 --- a/pkg/backup/actions/remap_crd_version_action_test.go +++ b/pkg/backup/actions/remap_crd_version_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package actions import ( - "context" "encoding/json" "fmt" "os" @@ -48,7 +47,7 @@ func TestRemapCRDVersionAction(t *testing.T) { // keep the same one for all 3 tests, since there's little value in recreating it b := builder.ForCustomResourceDefinitionV1Beta1("test.velero.io") c := b.Result() - _, err := betaClient.Create(context.TODO(), c, metav1.CreateOptions{}) + _, err := betaClient.Create(t.Context(), c, metav1.CreateOptions{}) require.NoError(t, err) a := NewRemapCRDVersionAction(velerotest.NewLogger(), betaClient, fakeDiscoveryHelper()) @@ -189,7 +188,7 @@ func TestRemapCRDVersionActionData(t *testing.T) { err = json.Unmarshal(f, &crd) require.NoError(t, err) - _, err = betaClient.Create(context.TODO(), &crd, metav1.CreateOptions{}) + _, err = betaClient.Create(t.Context(), &crd, metav1.CreateOptions{}) require.NoError(t, err) // Run method under test @@ -213,7 +212,7 @@ func TestRemapCRDVersionActionData(t *testing.T) { } // Clean up the item created in the test. - betaClient.Delete(context.TODO(), crd.Name, metav1.DeleteOptions{}) + betaClient.Delete(t.Context(), crd.Name, metav1.DeleteOptions{}) }) } } diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 9cd05980c..1cf80fe0f 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -871,7 +871,7 @@ func TestBackupOldResourceFiltering(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -1052,7 +1052,7 @@ func TestCRDInclusion(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -1151,7 +1151,7 @@ func TestBackupResourceCohabitation(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -1250,7 +1250,7 @@ func TestBackupResourceOrdering(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -1361,7 +1361,7 @@ func (a *recordResourcesAction) WithSkippedCSISnapshotFlag(flag bool) *recordRes // TestBackupItemActionsForSkippedPV runs backups with backup item actions, and // verifies that the data in SkippedPVTracker is updated as expected. func TestBackupItemActionsForSkippedPV(t *testing.T) { - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() tests := []struct { @@ -1669,7 +1669,7 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -1754,7 +1754,7 @@ func TestBackupWithInvalidActions(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -1908,7 +1908,7 @@ func TestBackupActionModifications(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -2168,7 +2168,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -2429,7 +2429,7 @@ func TestItemBlockActionsRunForCorrectItems(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -2514,7 +2514,7 @@ func TestBackupWithInvalidItemBlockActions(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -2770,7 +2770,7 @@ func TestItemBlockActionRelatedItems(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -2929,7 +2929,7 @@ func (*fakeVolumeSnapshotter) DeleteSnapshot(snapshotID string) error { // struct in place of real volume snapshotters. func TestBackupWithSnapshots(t *testing.T) { // TODO: add more verification for skippedPVTracker - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() tests := []struct { name string @@ -3329,7 +3329,7 @@ func TestBackupWithAsyncOperations(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() tests := []struct { name string @@ -3484,7 +3484,7 @@ func TestBackupWithInvalidHooks(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -3958,7 +3958,7 @@ func TestBackupWithHooks(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -4182,7 +4182,7 @@ func TestBackupWithPodVolume(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -4199,7 +4199,7 @@ func TestBackupWithPodVolume(t *testing.T) { ) if tc.pod != nil { - require.NoError(t, h.backupper.kbClient.Create(context.Background(), tc.pod)) + require.NoError(t, h.backupper.kbClient.Create(t.Context(), tc.pod)) } h.backupper.podVolumeBackupperFactory = new(fakePodVolumeBackupperFactory) @@ -4296,9 +4296,9 @@ func (h *harness) addItems(t *testing.T, resource *test.APIResource) { unstructuredObj := &unstructured.Unstructured{Object: obj} if resource.Namespaced { - _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{}) + _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(t.Context(), unstructuredObj, metav1.CreateOptions{}) } else { - _, err = h.DynamicClient.Resource(resource.GVR()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{}) + _, err = h.DynamicClient.Resource(resource.GVR()).Create(t.Context(), unstructuredObj, metav1.CreateOptions{}) } require.NoError(t, err) } @@ -4314,7 +4314,7 @@ func newHarness(t *testing.T, itemBlockPool *ItemBlockWorkerPool) *harness { require.NoError(t, err) if itemBlockPool == nil { - itemBlockPool = StartItemBlockWorkerPool(context.Background(), 1, log) + itemBlockPool = StartItemBlockWorkerPool(t.Context(), 1, log) } return &harness{ APIServer: apiServer, @@ -5302,7 +5302,7 @@ func TestBackupNewResourceFiltering(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -5467,7 +5467,7 @@ func TestBackupNamespaces(t *testing.T) { }, } - itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger()) + itemBlockPool := StartItemBlockWorkerPool(t.Context(), 1, logrus.StandardLogger()) defer itemBlockPool.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/client/factory_test.go b/pkg/client/factory_test.go index 3c75e7e0a..5b9db37f1 100644 --- a/pkg/client/factory_test.go +++ b/pkg/client/factory_test.go @@ -16,7 +16,6 @@ limitations under the License. package client import ( - "context" "fmt" "os" "strings" @@ -125,7 +124,7 @@ func TestFactory(t *testing.T) { Version: "verion_test", } list, e := dynamicClient.Resource(*resource).Namespace(namespace).List( - context.Background(), + t.Context(), metav1.ListOptions{ LabelSelector: "none", }, diff --git a/pkg/cmd/cli/backup/create_test.go b/pkg/cmd/cli/backup/create_test.go index d3c86fd26..c8fd15baa 100644 --- a/pkg/cmd/cli/backup/create_test.go +++ b/pkg/cmd/cli/backup/create_test.go @@ -17,7 +17,6 @@ limitations under the License. package backup import ( - "context" "fmt" "strconv" "strings" @@ -139,7 +138,7 @@ func TestCreateOptions_BuildBackupFromSchedule(t *testing.T) { expectedBackupSpec := builder.ForBackup("test", cmdtest.VeleroNameSpace).IncludedNamespaces("test").Result().Spec schedule := builder.ForSchedule(cmdtest.VeleroNameSpace, "test").Template(expectedBackupSpec).ObjectMeta(builder.WithLabels("velero.io/test", "true"), builder.WithAnnotations("velero.io/test", "true")).Result() - o.client.Create(context.TODO(), schedule, &kbclient.CreateOptions{}) + o.client.Create(t.Context(), schedule, &kbclient.CreateOptions{}) t.Run("existing schedule", func(t *testing.T) { backup, err := o.BuildBackup(cmdtest.VeleroNameSpace) @@ -391,7 +390,7 @@ func TestCreateCommand(t *testing.T) { kbclient := velerotest.NewFakeControllerRuntimeClient(t).(kbclient.WithWatch) schedule := builder.ForSchedule(cmdtest.VeleroNameSpace, fromSchedule).Result() - kbclient.Create(context.Background(), schedule, &controllerclient.CreateOptions{}) + kbclient.Create(t.Context(), schedule, &controllerclient.CreateOptions{}) f.On("Namespace").Return(cmdtest.VeleroNameSpace) f.On("KubebuilderWatchClient").Return(kbclient, nil) diff --git a/pkg/cmd/cli/backup/delete_test.go b/pkg/cmd/cli/backup/delete_test.go index 420523dd3..85718541c 100644 --- a/pkg/cmd/cli/backup/delete_test.go +++ b/pkg/cmd/cli/backup/delete_test.go @@ -17,7 +17,6 @@ limitations under the License. package backup import ( - "context" "fmt" "os" "os/exec" @@ -43,8 +42,8 @@ func TestDeleteCommand(t *testing.T) { f := &factorymocks.Factory{} client := velerotest.NewFakeControllerRuntimeClient(t) - client.Create(context.Background(), builder.ForBackup(cmdtest.VeleroNameSpace, backup1).Result(), &controllerclient.CreateOptions{}) - client.Create(context.Background(), builder.ForBackup("default", backup2).Result(), &controllerclient.CreateOptions{}) + client.Create(t.Context(), builder.ForBackup(cmdtest.VeleroNameSpace, backup1).Result(), &controllerclient.CreateOptions{}) + client.Create(t.Context(), builder.ForBackup("default", backup2).Result(), &controllerclient.CreateOptions{}) f.On("KubebuilderClient").Return(client, nil) f.On("Namespace").Return(cmdtest.VeleroNameSpace) diff --git a/pkg/cmd/cli/backup/describe_test.go b/pkg/cmd/cli/backup/describe_test.go index 8db741fdb..7ca12ae3e 100644 --- a/pkg/cmd/cli/backup/describe_test.go +++ b/pkg/cmd/cli/backup/describe_test.go @@ -17,7 +17,6 @@ limitations under the License. package backup import ( - "context" "fmt" "os" "os/exec" @@ -44,7 +43,7 @@ func TestNewDescribeCommand(t *testing.T) { clientConfig := rest.Config{} kbClient := test.NewFakeControllerRuntimeClient(t) - kbClient.Create(context.Background(), testBackup, &controllerclient.CreateOptions{}) + kbClient.Create(t.Context(), testBackup, &controllerclient.CreateOptions{}) f.On("ClientConfig").Return(&clientConfig, nil) f.On("Namespace").Return(cmdtest.VeleroNameSpace) diff --git a/pkg/cmd/cli/backup/download_test.go b/pkg/cmd/cli/backup/download_test.go index 058537e3b..aaada0db3 100644 --- a/pkg/cmd/cli/backup/download_test.go +++ b/pkg/cmd/cli/backup/download_test.go @@ -17,7 +17,6 @@ limitations under the License. package backup import ( - "context" "fmt" "os" "os/exec" @@ -41,9 +40,9 @@ func TestNewDownloadCommand(t *testing.T) { backupName := "backup-1" kbclient := velerotest.NewFakeControllerRuntimeClient(t) - err := kbclient.Create(context.Background(), builder.ForBackup(cmdtest.VeleroNameSpace, backupName).Result()) + err := kbclient.Create(t.Context(), builder.ForBackup(cmdtest.VeleroNameSpace, backupName).Result()) require.NoError(t, err) - err = kbclient.Create(context.Background(), builder.ForBackup(cmdtest.VeleroNameSpace, "bk-to-be-download").Result()) + err = kbclient.Create(t.Context(), builder.ForBackup(cmdtest.VeleroNameSpace, "bk-to-be-download").Result()) require.NoError(t, err) f.On("Namespace").Return(cmdtest.VeleroNameSpace) diff --git a/pkg/cmd/cli/backup/get_test.go b/pkg/cmd/cli/backup/get_test.go index 126f9d1d0..511c33f51 100644 --- a/pkg/cmd/cli/backup/get_test.go +++ b/pkg/cmd/cli/backup/get_test.go @@ -17,7 +17,6 @@ limitations under the License. package backup import ( - "context" "fmt" "os" "os/exec" @@ -45,7 +44,7 @@ func TestNewGetCommand(t *testing.T) { for _, backupName := range args { backup := builder.ForBackup(cmdtest.VeleroNameSpace, backupName).ObjectMeta(builder.WithLabels("abc", "abc")).Result() - err := client.Create(context.Background(), backup, &kbclient.CreateOptions{}) + err := client.Create(t.Context(), backup, &kbclient.CreateOptions{}) require.NoError(t, err) } diff --git a/pkg/cmd/cli/backup/logs_test.go b/pkg/cmd/cli/backup/logs_test.go index 2d6ee828e..264068bc9 100644 --- a/pkg/cmd/cli/backup/logs_test.go +++ b/pkg/cmd/cli/backup/logs_test.go @@ -17,7 +17,6 @@ limitations under the License. package backup import ( - "context" "fmt" "strconv" "testing" @@ -62,7 +61,7 @@ func TestNewLogsCommand(t *testing.T) { kbClient := velerotest.NewFakeControllerRuntimeClient(t) backup := builder.ForBackup(cmdtest.VeleroNameSpace, backupName).Result() - err := kbClient.Create(context.Background(), backup, &kbclient.CreateOptions{}) + err := kbClient.Create(t.Context(), backup, &kbclient.CreateOptions{}) require.NoError(t, err) f.On("Namespace").Return(cmdtest.VeleroNameSpace) @@ -117,7 +116,7 @@ func TestNewLogsCommand(t *testing.T) { kbClient := velerotest.NewFakeControllerRuntimeClient(t) backup := builder.ForBackup(cmdtest.VeleroNameSpace, backupName).Phase(velerov1api.BackupPhaseCompleted).Result() - err := kbClient.Create(context.Background(), backup, &kbclient.CreateOptions{}) + err := kbClient.Create(t.Context(), backup, &kbclient.CreateOptions{}) require.NoError(t, err) f.On("Namespace").Return(cmdtest.VeleroNameSpace) diff --git a/pkg/cmd/cli/nodeagent/server_test.go b/pkg/cmd/cli/nodeagent/server_test.go index 96187636f..1ecd3ea0d 100644 --- a/pkg/cmd/cli/nodeagent/server_test.go +++ b/pkg/cmd/cli/nodeagent/server_test.go @@ -108,7 +108,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) { kubeClient := fake.NewSimpleClientset() for _, pod := range tt.pods { - _, err := kubeClient.CoreV1().Pods(pod.GetNamespace()).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := kubeClient.CoreV1().Pods(pod.GetNamespace()).Create(t.Context(), pod, metav1.CreateOptions{}) if err != nil { t.Error(err) } diff --git a/pkg/cmd/cli/restore/create_test.go b/pkg/cmd/cli/restore/create_test.go index 877de29d0..8cc369dea 100644 --- a/pkg/cmd/cli/restore/create_test.go +++ b/pkg/cmd/cli/restore/create_test.go @@ -17,7 +17,6 @@ limitations under the License. package restore import ( - "context" "testing" "time" @@ -157,9 +156,9 @@ func TestCreateCommand(t *testing.T) { kbclient := velerotest.NewFakeControllerRuntimeClient(t).(kbclient.WithWatch) schedule := builder.ForSchedule(cmdtest.VeleroNameSpace, fromSchedule).Result() - require.NoError(t, kbclient.Create(context.Background(), schedule, &controllerclient.CreateOptions{})) + require.NoError(t, kbclient.Create(t.Context(), schedule, &controllerclient.CreateOptions{})) backup := builder.ForBackup(cmdtest.VeleroNameSpace, "test-backup").FromSchedule(schedule).Phase(velerov1api.BackupPhaseCompleted).Result() - require.NoError(t, kbclient.Create(context.Background(), backup, &controllerclient.CreateOptions{})) + require.NoError(t, kbclient.Create(t.Context(), backup, &controllerclient.CreateOptions{})) f.On("Namespace").Return(cmdtest.VeleroNameSpace) f.On("KubebuilderWatchClient").Return(kbclient, nil) diff --git a/pkg/cmd/cli/restore/delete_test.go b/pkg/cmd/cli/restore/delete_test.go index 187aad39d..9085e4cd9 100644 --- a/pkg/cmd/cli/restore/delete_test.go +++ b/pkg/cmd/cli/restore/delete_test.go @@ -17,7 +17,6 @@ limitations under the License. package restore import ( - "context" "fmt" "os" "os/exec" @@ -43,8 +42,8 @@ func TestDeleteCommand(t *testing.T) { f := &factorymocks.Factory{} client := velerotest.NewFakeControllerRuntimeClient(t) - client.Create(context.Background(), builder.ForRestore(cmdtest.VeleroNameSpace, restore1).Result(), &controllerclient.CreateOptions{}) - client.Create(context.Background(), builder.ForRestore("default", restore2).Result(), &controllerclient.CreateOptions{}) + client.Create(t.Context(), builder.ForRestore(cmdtest.VeleroNameSpace, restore1).Result(), &controllerclient.CreateOptions{}) + client.Create(t.Context(), builder.ForRestore("default", restore2).Result(), &controllerclient.CreateOptions{}) f.On("KubebuilderClient").Return(client, nil) f.On("Namespace").Return(cmdtest.VeleroNameSpace) diff --git a/pkg/cmd/cli/restore/describe_test.go b/pkg/cmd/cli/restore/describe_test.go index 9023dd44f..717fe2b7c 100644 --- a/pkg/cmd/cli/restore/describe_test.go +++ b/pkg/cmd/cli/restore/describe_test.go @@ -17,7 +17,6 @@ limitations under the License. package restore import ( - "context" "fmt" "os" "os/exec" @@ -44,7 +43,7 @@ func TestNewDescribeCommand(t *testing.T) { clientConfig := rest.Config{} kbClient := test.NewFakeControllerRuntimeClient(t) - kbClient.Create(context.Background(), testRestore, &controllerclient.CreateOptions{}) + kbClient.Create(t.Context(), testRestore, &controllerclient.CreateOptions{}) f.On("ClientConfig").Return(&clientConfig, nil) f.On("Namespace").Return(cmdtest.VeleroNameSpace) diff --git a/pkg/cmd/cli/restore/get_test.go b/pkg/cmd/cli/restore/get_test.go index fcb5a04c4..df2c25488 100644 --- a/pkg/cmd/cli/restore/get_test.go +++ b/pkg/cmd/cli/restore/get_test.go @@ -17,7 +17,6 @@ limitations under the License. package restore import ( - "context" "fmt" "os" "os/exec" @@ -44,7 +43,7 @@ func TestNewGetCommand(t *testing.T) { for _, restoreName := range args { restore := builder.ForRestore(cmdtest.VeleroNameSpace, restoreName).ObjectMeta(builder.WithLabels("abc", "abc")).Result() - err := client.Create(context.Background(), restore, &kbclient.CreateOptions{}) + err := client.Create(t.Context(), restore, &kbclient.CreateOptions{}) require.NoError(t, err) } diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index 528e08c48..ebf0b478b 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -17,7 +17,6 @@ limitations under the License. package server import ( - "context" "errors" "testing" @@ -334,14 +333,14 @@ func Test_markInProgressBackupsFailed(t *testing.T) { }, }). Build() - markInProgressBackupsFailed(context.Background(), c, "velero", logrus.New()) + markInProgressBackupsFailed(t.Context(), c, "velero", logrus.New()) backup01 := &velerov1api.Backup{} - require.NoError(t, c.Get(context.Background(), client.ObjectKey{Namespace: "velero", Name: "backup01"}, backup01)) + require.NoError(t, c.Get(t.Context(), client.ObjectKey{Namespace: "velero", Name: "backup01"}, backup01)) assert.Equal(t, velerov1api.BackupPhaseFailed, backup01.Status.Phase) backup02 := &velerov1api.Backup{} - require.NoError(t, c.Get(context.Background(), client.ObjectKey{Namespace: "velero", Name: "backup02"}, backup02)) + require.NoError(t, c.Get(t.Context(), client.ObjectKey{Namespace: "velero", Name: "backup02"}, backup02)) assert.Equal(t, velerov1api.BackupPhaseCompleted, backup02.Status.Phase) } @@ -374,14 +373,14 @@ func Test_markInProgressRestoresFailed(t *testing.T) { }, }). Build() - markInProgressRestoresFailed(context.Background(), c, "velero", logrus.New()) + markInProgressRestoresFailed(t.Context(), c, "velero", logrus.New()) restore01 := &velerov1api.Restore{} - require.NoError(t, c.Get(context.Background(), client.ObjectKey{Namespace: "velero", Name: "restore01"}, restore01)) + require.NoError(t, c.Get(t.Context(), client.ObjectKey{Namespace: "velero", Name: "restore01"}, restore01)) assert.Equal(t, velerov1api.RestorePhaseFailed, restore01.Status.Phase) restore02 := &velerov1api.Restore{} - require.NoError(t, c.Get(context.Background(), client.ObjectKey{Namespace: "velero", Name: "restore02"}, restore02)) + require.NoError(t, c.Get(t.Context(), client.ObjectKey{Namespace: "velero", Name: "restore02"}, restore02)) assert.Equal(t, velerov1api.RestorePhaseCompleted, restore02.Status.Phase) } @@ -408,22 +407,22 @@ func Test_setDefaultBackupLocation(t *testing.T) { }, }). Build() - setDefaultBackupLocation(context.Background(), c, "velero", "default", logrus.New()) + setDefaultBackupLocation(t.Context(), c, "velero", "default", logrus.New()) defaultLocation := &velerov1api.BackupStorageLocation{} - require.NoError(t, c.Get(context.Background(), client.ObjectKey{Namespace: "velero", Name: "default"}, defaultLocation)) + require.NoError(t, c.Get(t.Context(), client.ObjectKey{Namespace: "velero", Name: "default"}, defaultLocation)) assert.True(t, defaultLocation.Spec.Default) nonDefaultLocation := &velerov1api.BackupStorageLocation{} - require.NoError(t, c.Get(context.Background(), client.ObjectKey{Namespace: "velero", Name: "non-default"}, nonDefaultLocation)) + require.NoError(t, c.Get(t.Context(), client.ObjectKey{Namespace: "velero", Name: "non-default"}, nonDefaultLocation)) assert.False(t, nonDefaultLocation.Spec.Default) // no default location specified c = fake.NewClientBuilder().WithScheme(scheme).Build() - err := setDefaultBackupLocation(context.Background(), c, "velero", "", logrus.New()) + err := setDefaultBackupLocation(t.Context(), c, "velero", "", logrus.New()) require.NoError(t, err) // no default location created - err = setDefaultBackupLocation(context.Background(), c, "velero", "default", logrus.New()) + err = setDefaultBackupLocation(t.Context(), c, "velero", "default", logrus.New()) assert.NoError(t, err) } diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 2a806243d..eb20af907 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -18,7 +18,6 @@ package controller import ( "bytes" - "context" "fmt" "io" "reflect" @@ -136,11 +135,11 @@ func TestProcessBackupNonProcessedItems(t *testing.T) { kbClient: velerotest.NewFakeControllerRuntimeClient(t), formatFlag: formatFlag, logger: logger, - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() if test.backup != nil { - require.NoError(t, c.kbClient.Create(context.Background(), test.backup)) + require.NoError(t, c.kbClient.Create(t.Context(), test.backup)) } actualResult, err := c.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) assert.Equal(t, ctrl.Result{}, actualResult) @@ -235,18 +234,18 @@ func TestProcessBackupValidationFailures(t *testing.T) { clock: &clock.RealClock{}, formatFlag: formatFlag, metrics: metrics.NewServerMetrics(), - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() require.NotNil(t, test.backup) - require.NoError(t, c.kbClient.Create(context.Background(), test.backup)) + require.NoError(t, c.kbClient.Create(t.Context(), test.backup)) actualResult, err := c.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) assert.Equal(t, ctrl.Result{}, actualResult) require.NoError(t, err) res := &velerov1api.Backup{} - err = c.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: test.backup.Namespace, Name: test.backup.Name}, res) + err = c.kbClient.Get(t.Context(), kbclient.ObjectKey{Namespace: test.backup.Namespace, Name: test.backup.Name}, res) require.NoError(t, err) assert.Equal(t, velerov1api.BackupPhaseFailedValidation, res.Status.Phase) @@ -300,7 +299,7 @@ func TestBackupLocationLabel(t *testing.T) { defaultBackupLocation: test.backupLocation.Name, clock: &clock.RealClock{}, formatFlag: formatFlag, - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() @@ -397,7 +396,7 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { defaultBackupTTL: defaultBackupTTL.Duration, clock: testclocks.NewFakeClock(now), formatFlag: formatFlag, - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() @@ -473,7 +472,7 @@ func TestDefaultBackupTTL(t *testing.T) { defaultBackupTTL: defaultBackupTTL.Duration, clock: testclocks.NewFakeClock(now), formatFlag: formatFlag, - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() @@ -534,7 +533,7 @@ func TestPrepareBackupRequest_SetsVGSLabelKey(t *testing.T) { defaultVGSLabelKey: test.serverFlagKey, discoveryHelper: discoveryHelper, clock: testclocks.NewFakeClock(now), - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() @@ -636,7 +635,7 @@ func TestDefaultVolumesToResticDeprecation(t *testing.T) { clock: &clock.RealClock{}, formatFlag: formatFlag, defaultVolumesToFsBackup: test.globalVal, - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() @@ -1504,7 +1503,7 @@ func TestProcessBackupCompletions(t *testing.T) { } if test.volumeSnapshot != nil { - require.NoError(t, fakeGlobalClient.Create(context.TODO(), test.volumeSnapshot)) + require.NoError(t, fakeGlobalClient.Create(t.Context(), test.volumeSnapshot)) } apiServer := velerotest.NewAPIServer(t) @@ -1539,7 +1538,7 @@ func TestProcessBackupCompletions(t *testing.T) { backupper: backupper, formatFlag: formatFlag, globalCRClient: fakeGlobalClient, - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() @@ -1566,10 +1565,10 @@ func TestProcessBackupCompletions(t *testing.T) { // add the test's backup to the informer/lister store require.NotNil(t, test.backup) - require.NoError(t, c.kbClient.Create(context.Background(), test.backup)) + require.NoError(t, c.kbClient.Create(t.Context(), test.backup)) // add the default backup storage location to the clientset and the informer/lister store - require.NoError(t, fakeClient.Create(context.Background(), defaultBackupLocation)) + require.NoError(t, fakeClient.Create(t.Context(), defaultBackupLocation)) // Enable CSI feature flag for SnapshotDataMovement test. if test.enableCSI { @@ -1586,7 +1585,7 @@ func TestProcessBackupCompletions(t *testing.T) { } res := &velerov1api.Backup{} - err = c.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: test.backup.Namespace, Name: test.backup.Name}, res) + err = c.kbClient.Get(t.Context(), kbclient.ObjectKey{Namespace: test.backup.Namespace, Name: test.backup.Name}, res) require.NoError(t, err) res.ResourceVersion = "" assert.Equal(t, test.expectedResult, res) @@ -1747,7 +1746,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { logger: logger, defaultSnapshotLocations: test.defaultLocations, kbClient: velerotest.NewFakeControllerRuntimeClient(t), - workerPool: pkgbackup.StartItemBlockWorkerPool(context.Background(), 1, logger), + workerPool: pkgbackup.StartItemBlockWorkerPool(t.Context(), 1, logger), } defer c.workerPool.Stop() @@ -1755,7 +1754,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { backup := test.backup.DeepCopy() backup.Spec.VolumeSnapshotLocations = test.backup.Spec.VolumeSnapshotLocations for _, location := range test.locations { - require.NoError(t, c.kbClient.Create(context.Background(), location)) + require.NoError(t, c.kbClient.Create(t.Context(), location)) } providerLocations, errs := c.validateAndGetSnapshotLocations(backup) @@ -1933,7 +1932,7 @@ func TestPatchResourceWorksWithStatus(t *testing.T) { }, } // check original exists - if err := fakeClient.Get(context.Background(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil { + if err := fakeClient.Get(t.Context(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil { t.Errorf("PatchResource() error = %v", err) } // ignore resourceVersion @@ -1943,7 +1942,7 @@ func TestPatchResourceWorksWithStatus(t *testing.T) { t.Errorf("PatchResource() error = %v, wantErr %v", err, tt.wantErr) } // check updated exists - if err := fakeClient.Get(context.Background(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil { + if err := fakeClient.Get(t.Context(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil { t.Errorf("PatchResource() error = %v", err) } diff --git a/pkg/controller/backup_deletion_controller_test.go b/pkg/controller/backup_deletion_controller_test.go index a9b2c2035..ab3687438 100644 --- a/pkg/controller/backup_deletion_controller_test.go +++ b/pkg/controller/backup_deletion_controller_test.go @@ -177,7 +177,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { BackupName: input.Spec.BackupName, }, } - err := td.fakeClient.Create(context.TODO(), existing) + err := td.fakeClient.Create(t.Context(), existing) require.NoError(t, err) existing2 := &velerov1api.DeleteBackupRequest{ @@ -192,12 +192,12 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { BackupName: "some-other-backup", }, } - err = td.fakeClient.Create(context.TODO(), existing2) + err = td.fakeClient.Create(t.Context(), existing2) require.NoError(t, err) - _, err = td.controller.Reconcile(context.TODO(), td.req) + _, err = td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) // verify "existing" is deleted - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: existing.Namespace, Name: existing.Name, }, &velerov1api.DeleteBackupRequest{}) @@ -205,7 +205,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) // verify "existing2" remains - assert.NoError(t, td.fakeClient.Get(context.TODO(), types.NamespacedName{ + assert.NoError(t, td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: existing2.Namespace, Name: existing2.Name, }, &velerov1api.DeleteBackupRequest{})) @@ -215,7 +215,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td := setupBackupDeletionControllerTest(t, dbr) td.controller.backupTracker.Add(td.req.Namespace, dbr.Spec.BackupName) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) res := &velerov1api.DeleteBackupRequest{} @@ -229,7 +229,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { t.Run("unable to find backup", func(t *testing.T) { td := setupBackupDeletionControllerTest(t, defaultTestDbr()) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) res := &velerov1api.DeleteBackupRequest{} @@ -244,7 +244,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td := setupBackupDeletionControllerTest(t, defaultTestDbr(), backup) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) res := &velerov1api.DeleteBackupRequest{} @@ -261,7 +261,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td := setupBackupDeletionControllerTest(t, defaultTestDbr(), location, backup) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) res := &velerov1api.DeleteBackupRequest{} @@ -278,7 +278,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td := setupBackupDeletionControllerTest(t, defaultTestDbr(), location, backup) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) res := &velerov1api.DeleteBackupRequest{} @@ -357,7 +357,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td.backupStore.On("GetBackupContents", input.Spec.BackupName).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil) td.backupStore.On("DeleteBackup", input.Spec.BackupName).Return(nil) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) // the dbr should be deleted @@ -369,26 +369,26 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { } // backup CR, restore CR restore-1 and restore-2 should be deleted - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: backup.Name, }, &velerov1api.Backup{}) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: "restore-1", }, &velerov1api.Restore{}) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: "restore-2", }, &velerov1api.Restore{}) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) // restore-3 should remain - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: "restore-3", }, &velerov1api.Restore{}) @@ -481,7 +481,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td.volumeSnapshotter.SnapshotsTaken.Insert("snap-1") - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) // the dbr should be deleted @@ -493,26 +493,26 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { } // backup CR, restore CR restore-1 and restore-2 should be deleted - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: backup.Name, }, &velerov1api.Backup{}) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: "restore-1", }, &velerov1api.Restore{}) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: "restore-2", }, &velerov1api.Restore{}) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) // restore-3 should remain - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: "restore-3", }, &velerov1api.Restore{}) @@ -582,7 +582,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td.backupStore.On("GetBackupVolumeSnapshots", input.Spec.BackupName).Return(snapshots, nil) td.backupStore.On("DeleteBackup", input.Spec.BackupName).Return(nil) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) td.backupStore.AssertNotCalled(t, "GetBackupContents", mock.Anything) @@ -597,7 +597,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { } // backup CR should be deleted - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: backup.Name, }, &velerov1api.Backup{}) @@ -674,7 +674,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td.backupStore.On("GetBackupContents", input.Spec.BackupName).Return(nil, fmt.Errorf("error downloading tarball")) td.backupStore.On("DeleteBackup", input.Spec.BackupName).Return(nil) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) td.backupStore.AssertCalled(t, "GetBackupContents", input.Spec.BackupName) @@ -689,14 +689,14 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { } // backup CR should be deleted - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: backup.Name, }, &velerov1api.Backup{}) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) // leaked CSI snapshot should be deleted - err = td.fakeClient.Get(context.TODO(), types.NamespacedName{ + err = td.fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: "user-ns", Name: "vs-1", }, &snapshotv1api.VolumeSnapshot{}) @@ -714,7 +714,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { input.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed td := setupBackupDeletionControllerTest(t, input) td.backupStore.On("DeleteBackup", mock.Anything).Return(nil) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) res := &velerov1api.DeleteBackupRequest{} @@ -733,7 +733,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { td := setupBackupDeletionControllerTest(t, input) td.backupStore.On("DeleteBackup", mock.Anything).Return(nil) - _, err := td.controller.Reconcile(context.TODO(), td.req) + _, err := td.controller.Reconcile(t.Context(), td.req) require.NoError(t, err) res := &velerov1api.DeleteBackupRequest{} @@ -899,7 +899,7 @@ func TestGetSnapshotsInBackup(t *testing.T) { Items: test.podVolumeBackups, }) - res, err := getSnapshotsInBackup(context.TODO(), veleroBackup, clientBuilder.Build()) + res, err := getSnapshotsInBackup(t.Context(), veleroBackup, clientBuilder.Build()) require.NoError(t, err) assert.True(t, reflect.DeepEqual(res, test.expected)) @@ -1066,7 +1066,7 @@ func TestDeleteMovedSnapshots(t *testing.T) { batchDeleteSnapshotFunc = batchDeleteFail } - errs := controller.deleteMovedSnapshots(context.Background(), veleroBackup) + errs := controller.deleteMovedSnapshots(t.Context(), veleroBackup) if test.expected == nil { assert.Nil(t, errs) } else { diff --git a/pkg/controller/backup_finalizer_controller_test.go b/pkg/controller/backup_finalizer_controller_test.go index 57ef5d50f..ade3ce2cd 100644 --- a/pkg/controller/backup_finalizer_controller_test.go +++ b/pkg/controller/backup_finalizer_controller_test.go @@ -18,7 +18,6 @@ package controller import ( "bytes" - "context" "io" "testing" "time" @@ -227,12 +226,12 @@ func TestBackupFinalizerReconcile(t *testing.T) { backupStore.On("PutBackupVolumeInfos", mock.Anything, mock.Anything).Return(nil) pluginManager.On("GetBackupItemActionsV2").Return(nil, nil) backupper.On("FinalizeBackup", mock.Anything, mock.Anything, mock.Anything, mock.Anything, framework.BackupItemActionResolverV2{}, mock.Anything, mock.Anything).Return(nil) - _, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) + _, err := reconciler.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) gotErr := err != nil assert.Equal(t, test.expectError, gotErr) backupAfter := velerov1api.Backup{} - err = fakeClient.Get(context.TODO(), types.NamespacedName{ + err = fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: test.backup.Namespace, Name: test.backup.Name, }, &backupAfter) diff --git a/pkg/controller/backup_operations_controller_test.go b/pkg/controller/backup_operations_controller_test.go index 00ffd7f7a..0a25676d3 100644 --- a/pkg/controller/backup_operations_controller_test.go +++ b/pkg/controller/backup_operations_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "testing" "time" @@ -296,12 +295,12 @@ func TestBackupOperationsReconcile(t *testing.T) { }, nil) pluginManager.On("GetBackupItemActionV2", operation.Spec.BackupItemAction).Return(bia, nil) } - _, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) + _, err := reconciler.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) gotErr := err != nil assert.Equal(t, test.expectError, gotErr) backupAfter := velerov1api.Backup{} - err = fakeClient.Get(context.TODO(), types.NamespacedName{ + err = fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: test.backup.Namespace, Name: test.backup.Name, }, &backupAfter) diff --git a/pkg/controller/backup_repository_controller_test.go b/pkg/controller/backup_repository_controller_test.go index 6f1226f1d..601552faa 100644 --- a/pkg/controller/backup_repository_controller_test.go +++ b/pkg/controller/backup_repository_controller_test.go @@ -86,12 +86,12 @@ func mockBackupRepositoryCR() *velerov1api.BackupRepository { func TestPatchBackupRepository(t *testing.T) { rr := mockBackupRepositoryCR() reconciler := mockBackupRepoReconciler(t, "", nil, nil) - err := reconciler.Client.Create(context.TODO(), rr) + err := reconciler.Client.Create(t.Context(), rr) require.NoError(t, err) - err = reconciler.patchBackupRepository(context.Background(), rr, repoReady()) + err = reconciler.patchBackupRepository(t.Context(), rr, repoReady()) require.NoError(t, err) assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase) - err = reconciler.patchBackupRepository(context.Background(), rr, repoNotReady("not ready")) + err = reconciler.patchBackupRepository(t.Context(), rr, repoNotReady("not ready")) require.NoError(t, err) assert.NotEqual(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase) } @@ -102,7 +102,7 @@ func TestCheckNotReadyRepo(t *testing.T) { rr.Spec.ResticIdentifier = "fake-identifier" rr.Spec.VolumeNamespace = "volume-ns-1" reconciler := mockBackupRepoReconciler(t, "PrepareRepo", rr, nil) - err := reconciler.Client.Create(context.TODO(), rr) + err := reconciler.Client.Create(t.Context(), rr) require.NoError(t, err) location := velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ @@ -114,7 +114,7 @@ func TestCheckNotReadyRepo(t *testing.T) { }, } - _, err = reconciler.checkNotReadyRepo(context.TODO(), rr, &location, reconciler.logger) + _, err = reconciler.checkNotReadyRepo(t.Context(), rr, &location, reconciler.logger) require.NoError(t, err) assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase) assert.Equal(t, "s3:test.amazonaws.com/bucket/restic/volume-ns-1", rr.Spec.ResticIdentifier) @@ -371,13 +371,13 @@ func TestRunMaintenanceIfDue(t *testing.T) { t.Run(test.name, func(t *testing.T) { reconciler := mockBackupRepoReconciler(t, "", test.repo, nil) reconciler.clock = &fakeClock{now} - err := reconciler.Client.Create(context.TODO(), test.repo) + err := reconciler.Client.Create(t.Context(), test.repo) require.NoError(t, err) funcStartMaintenanceJob = test.startJobFunc funcWaitMaintenanceJobComplete = test.waitJobFunc - err = reconciler.runMaintenanceIfDue(context.TODO(), test.repo, velerotest.NewLogger()) + err = reconciler.runMaintenanceIfDue(t.Context(), test.repo, velerotest.NewLogger()) if test.expectedErr == "" { require.NoError(t, err) } @@ -404,7 +404,7 @@ func TestInitializeRepo(t *testing.T) { rr := mockBackupRepositoryCR() rr.Spec.BackupStorageLocation = "default" reconciler := mockBackupRepoReconciler(t, "PrepareRepo", rr, nil) - err := reconciler.Client.Create(context.TODO(), rr) + err := reconciler.Client.Create(t.Context(), rr) require.NoError(t, err) location := velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ @@ -416,7 +416,7 @@ func TestInitializeRepo(t *testing.T) { }, } - err = reconciler.initializeRepo(context.TODO(), rr, &location, reconciler.logger) + err = reconciler.initializeRepo(t.Context(), rr, &location, reconciler.logger) require.NoError(t, err) assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase) } @@ -473,9 +473,9 @@ func TestBackupRepoReconcile(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { reconciler := mockBackupRepoReconciler(t, "", test.repo, nil) - err := reconciler.Client.Create(context.TODO(), test.repo) + err := reconciler.Client.Create(t.Context(), test.repo) require.NoError(t, err) - _, err = reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.repo.Namespace, Name: "repo"}}) + _, err = reconciler.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.repo.Namespace, Name: "repo"}}) if test.expectNil { assert.NoError(t, err) } else { @@ -765,7 +765,7 @@ func TestGetBackupRepositoryConfig(t *testing.T) { fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() - result, err := getBackupRepositoryConfig(context.Background(), fakeClient, test.congiName, velerov1api.DefaultNamespace, test.repoName, test.repoType, velerotest.NewLogger()) + result, err := getBackupRepositoryConfig(t.Context(), fakeClient, test.congiName, velerov1api.DefaultNamespace, test.repoName, test.repoType, velerotest.NewLogger()) if test.expectedErr != "" { assert.EqualError(t, err, test.expectedErr) @@ -1053,7 +1053,7 @@ func TestRecallMaintenance(t *testing.T) { lastTm := backupRepo.Status.LastMaintenanceTime - err := r.recallMaintenance(context.TODO(), backupRepo, velerotest.NewLogger()) + err := r.recallMaintenance(t.Context(), backupRepo, velerotest.NewLogger()) if test.expectedErr != "" { assert.EqualError(t, err, test.expectedErr) } else { @@ -1467,7 +1467,7 @@ func TestDeleteOldMaintenanceJob(t *testing.T) { t.Run(test.name, func(t *testing.T) { crClient := velerotest.NewFakeControllerRuntimeClient(t, test.repo, test.bsl) for _, job := range test.maintenanceJobs { - require.NoError(t, crClient.Create(context.TODO(), &job)) + require.NoError(t, crClient.Create(t.Context(), &job)) } repoLocker := repository.NewRepoLocker() @@ -1487,7 +1487,7 @@ func TestDeleteOldMaintenanceJob(t *testing.T) { nil, ) - _, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.repo.Namespace, Name: "repo"}}) + _, err := reconciler.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.repo.Namespace, Name: "repo"}}) if test.expectNil { require.NoError(t, err) } else { @@ -1496,7 +1496,7 @@ func TestDeleteOldMaintenanceJob(t *testing.T) { if len(test.maintenanceJobs) > 0 { jobList := new(batchv1api.JobList) - require.NoError(t, reconciler.Client.List(context.TODO(), jobList, &client.ListOptions{Namespace: "velero"})) + require.NoError(t, reconciler.Client.List(t.Context(), jobList, &client.ListOptions{Namespace: "velero"})) assert.Len(t, jobList.Items, 1) } }) diff --git a/pkg/controller/backup_storage_location_controller_test.go b/pkg/controller/backup_storage_location_controller_test.go index faeec5477..b07c4a1fb 100644 --- a/pkg/controller/backup_storage_location_controller_test.go +++ b/pkg/controller/backup_storage_location_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "testing" "time" @@ -246,7 +245,7 @@ func TestEnsureSingleDefaultBSL(t *testing.T) { require.NoError(t, velerov1api.AddToScheme(scheme.Scheme)) t.Run(test.name, func(t *testing.T) { r := &backupStorageLocationReconciler{ - ctx: context.Background(), + ctx: t.Context(), client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(&test.locations).Build(), defaultBackupLocationInfo: test.defaultBackupInfo, metrics: metrics.NewServerMetrics(), @@ -291,14 +290,14 @@ func TestBSLReconcile(t *testing.T) { require.NoError(t, velerov1api.AddToScheme(scheme.Scheme)) t.Run(test.name, func(t *testing.T) { r := &backupStorageLocationReconciler{ - ctx: context.Background(), + ctx: t.Context(), client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(&test.locationList).Build(), newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, metrics: metrics.NewServerMetrics(), log: velerotest.NewLogger(), } - result, err := r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: velerov1api.DefaultNamespace, Name: "location-1"}}) + result, err := r.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: velerov1api.DefaultNamespace, Name: "location-1"}}) assert.Equal(t, test.expectedError, err) assert.Equal(t, ctrl.Result{}, result) }) diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index 756fd03a8..fd24162b2 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -479,12 +479,12 @@ func TestDataDownloadReconcile(t *testing.T) { require.NoError(t, err) if !test.notCreateDD { - err = r.client.Create(context.Background(), test.dd) + err = r.client.Create(t.Context(), test.dd) require.NoError(t, err) } if test.needDelete { - err = r.client.Delete(context.Background(), test.dd) + err = r.client.Delete(t.Context(), test.dd) require.NoError(t, err) } @@ -627,7 +627,7 @@ func TestDataDownloadReconcile(t *testing.T) { func TestOnDataDownloadFailed(t *testing.T) { for _, getErr := range []bool{true, false} { - ctx := context.TODO() + ctx := t.Context() needErrs := []bool{getErr, false, false, false} r, err := initDataDownloadReconciler(t, nil, needErrs...) require.NoError(t, err) @@ -653,7 +653,7 @@ func TestOnDataDownloadFailed(t *testing.T) { func TestOnDataDownloadCancelled(t *testing.T) { for _, getErr := range []bool{true, false} { - ctx := context.TODO() + ctx := t.Context() needErrs := []bool{getErr, false, false, false} r, err := initDataDownloadReconciler(t, nil, needErrs...) require.NoError(t, err) @@ -695,7 +695,7 @@ func TestOnDataDownloadCompleted(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() needErrs := []bool{test.isGetErr, false, false, false} r, err := initDataDownloadReconciler(t, nil, needErrs...) r.restoreExposer = func() exposer.GenericRestoreExposer { @@ -760,7 +760,7 @@ func TestOnDataDownloadProgress(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataDownloadReconciler(t, nil, test.needErrs...) require.NoError(t, err) @@ -772,7 +772,7 @@ func TestOnDataDownloadProgress(t *testing.T) { namespace := dd.Namespace duName := dd.Name // Add the DataDownload object to the fake client - require.NoError(t, r.client.Create(context.Background(), dd)) + require.NoError(t, r.client.Create(t.Context(), dd)) // Create a Progress object progress := &uploader.Progress{ @@ -841,11 +841,11 @@ func TestFindDataDownloadForPod(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() assert.NoError(t, r.client.Create(ctx, test.pod)) assert.NoError(t, r.client.Create(ctx, test.du)) // Call the findSnapshotRestoreForPod function - requests := r.findSnapshotRestoreForPod(context.Background(), test.pod) + requests := r.findSnapshotRestoreForPod(t.Context(), test.pod) test.checkFunc(test.du, requests) r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) if test.pod != nil { @@ -881,7 +881,7 @@ func TestAcceptDataDownload(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initDataDownloadReconcilerWithError(t, nil, test.needErrs...) require.NoError(t, err) @@ -925,7 +925,7 @@ func TestOnDdPrepareTimeout(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initDataDownloadReconcilerWithError(t, nil, test.needErrs...) require.NoError(t, err) @@ -970,7 +970,7 @@ func TestTryCancelDataDownload(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initDataDownloadReconcilerWithError(t, nil, test.needErrs...) require.NoError(t, err) @@ -1027,7 +1027,7 @@ func TestUpdateDataDownloadWithRetry(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - ctx, cancelFunc := context.WithTimeout(context.TODO(), time.Second*5) + ctx, cancelFunc := context.WithTimeout(t.Context(), time.Second*5) defer cancelFunc() r, err := initDataDownloadReconciler(t, nil, tc.needErrs...) require.NoError(t, err) @@ -1145,7 +1145,7 @@ func TestAttemptDataDownloadResume(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataDownloadReconciler(t, nil, test.needErrs...) r.nodeName = "node-1" require.NoError(t, err) @@ -1172,21 +1172,21 @@ func TestAttemptDataDownloadResume(t *testing.T) { // Verify DataDownload marked as Canceled for _, duName := range test.cancelledDataDownloads { dataDownload := &velerov2alpha1api.DataDownload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataDownload) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: duName}, dataDownload) require.NoError(t, err) assert.True(t, dataDownload.Spec.Cancel) } // Verify DataDownload marked as Accepted for _, duName := range test.acceptedDataDownloads { dataUpload := &velerov2alpha1api.DataDownload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) require.NoError(t, err) assert.Equal(t, velerov2alpha1api.DataDownloadPhaseAccepted, dataUpload.Status.Phase) } // Verify DataDownload marked as Prepared for _, duName := range test.prepareddDataDownloads { dataUpload := &velerov2alpha1api.DataDownload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) require.NoError(t, err) assert.Equal(t, velerov2alpha1api.DataDownloadPhasePrepared, dataUpload.Status.Phase) } @@ -1263,7 +1263,7 @@ func TestResumeCancellableRestore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataDownloadReconciler(t, nil, false) r.nodeName = "node-1" require.NoError(t, err) diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index cc2284f43..db4c29c1e 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -616,12 +616,12 @@ func TestReconcile(t *testing.T) { require.NoError(t, err) if !test.notCreateDU { - err = r.client.Create(context.Background(), test.du) + err = r.client.Create(t.Context(), test.du) require.NoError(t, err) } if test.needDelete { - err = r.client.Delete(context.Background(), test.du) + err = r.client.Delete(t.Context(), test.du) require.NoError(t, err) } @@ -737,7 +737,7 @@ func TestReconcile(t *testing.T) { } func TestOnDataUploadCancelled(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataUploaderReconciler() require.NoError(t, err) // Create a DataUpload object @@ -785,7 +785,7 @@ func TestOnDataUploadProgress(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataUploaderReconciler(test.needErrs...) require.NoError(t, err) @@ -797,7 +797,7 @@ func TestOnDataUploadProgress(t *testing.T) { namespace := du.Namespace duName := du.Name // Add the DataUpload object to the fake client - require.NoError(t, r.client.Create(context.Background(), du)) + require.NoError(t, r.client.Create(t.Context(), du)) // Create a Progress object progress := &uploader.Progress{ @@ -820,7 +820,7 @@ func TestOnDataUploadProgress(t *testing.T) { } func TestOnDataUploadFailed(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataUploaderReconciler() require.NoError(t, err) @@ -840,7 +840,7 @@ func TestOnDataUploadFailed(t *testing.T) { } func TestOnDataUploadCompleted(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataUploaderReconciler() require.NoError(t, err) // Create a DataUpload object @@ -903,11 +903,11 @@ func TestFindDataUploadForPod(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() assert.NoError(t, r.client.Create(ctx, test.pod)) assert.NoError(t, r.client.Create(ctx, test.du)) // Call the findDataUploadForPod function - requests := r.findDataUploadForPod(context.Background(), test.pod) + requests := r.findDataUploadForPod(t.Context(), test.pod) test.checkFunc(test.du, requests) r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) if test.pod != nil { @@ -957,7 +957,7 @@ func TestAcceptDataUpload(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initDataUploaderReconcilerWithError(test.needErrs...) require.NoError(t, err) @@ -1001,7 +1001,7 @@ func TestOnDuPrepareTimeout(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initDataUploaderReconcilerWithError(test.needErrs...) require.NoError(t, err) @@ -1046,7 +1046,7 @@ func TestTryCancelDataUpload(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initDataUploaderReconcilerWithError(test.needErrs...) require.NoError(t, err) @@ -1103,7 +1103,7 @@ func TestUpdateDataUploadWithRetry(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - ctx, cancelFunc := context.WithTimeout(context.TODO(), time.Second*5) + ctx, cancelFunc := context.WithTimeout(t.Context(), time.Second*5) defer cancelFunc() r, err := initDataUploaderReconciler(tc.needErrs...) require.NoError(t, err) @@ -1212,7 +1212,7 @@ func TestAttemptDataUploadResume(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataUploaderReconciler(test.needErrs...) r.nodeName = "node-1" require.NoError(t, err) @@ -1236,28 +1236,28 @@ func TestAttemptDataUploadResume(t *testing.T) { // Verify DataUploads marked as Canceled for _, duName := range test.cancelledDataUploads { dataUpload := &velerov2alpha1api.DataUpload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) require.NoError(t, err) assert.True(t, dataUpload.Spec.Cancel) } // Verify DataUploads marked as Accepted for _, duName := range test.acceptedDataUploads { dataUpload := &velerov2alpha1api.DataUpload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) require.NoError(t, err) assert.Equal(t, velerov2alpha1api.DataUploadPhaseAccepted, dataUpload.Status.Phase) } // Verify DataUploads marked as Prepared for _, duName := range test.prepareddDataUploads { dataUpload := &velerov2alpha1api.DataUpload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) require.NoError(t, err) assert.Equal(t, velerov2alpha1api.DataUploadPhasePrepared, dataUpload.Status.Phase) } // Verify DataUploads marked as InProgress for _, duName := range test.inProgressDataUploads { dataUpload := &velerov2alpha1api.DataUpload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) require.NoError(t, err) assert.Equal(t, velerov2alpha1api.DataUploadPhaseInProgress, dataUpload.Status.Phase) } @@ -1339,7 +1339,7 @@ func TestResumeCancellableBackup(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initDataUploaderReconciler() r.nodeName = "node-1" require.NoError(t, err) diff --git a/pkg/controller/gc_controller_test.go b/pkg/controller/gc_controller_test.go index 197deb050..754b46e0a 100644 --- a/pkg/controller/gc_controller_test.go +++ b/pkg/controller/gc_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "testing" "time" @@ -145,7 +144,7 @@ func TestGCReconcile(t *testing.T) { fakeClient := velerotest.NewFakeControllerRuntimeClient(t, initObjs...) reconciler := mockGCReconciler(fakeClient, fakeClock, defaultGCFrequency) - _, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) + _, err := reconciler.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) gotErr := err != nil assert.Equal(t, test.expectError, gotErr) }) diff --git a/pkg/controller/pod_volume_backup_controller_test.go b/pkg/controller/pod_volume_backup_controller_test.go index f27a5a77f..f58a7d1a8 100644 --- a/pkg/controller/pod_volume_backup_controller_test.go +++ b/pkg/controller/pod_volume_backup_controller_test.go @@ -465,12 +465,12 @@ func TestPVBReconcile(t *testing.T) { require.NoError(t, err) if !test.notCreatePvb { - err = r.client.Create(context.Background(), test.pvb) + err = r.client.Create(t.Context(), test.pvb) require.NoError(t, err) } if test.needDelete { - err = r.client.Delete(context.Background(), test.pvb) + err = r.client.Delete(t.Context(), test.pvb) require.NoError(t, err) } @@ -579,7 +579,7 @@ func TestPVBReconcile(t *testing.T) { } func TestOnPVBCancelled(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPVBReconciler() require.NoError(t, err) pvb := pvbBuilder().Result() @@ -626,7 +626,7 @@ func TestOnPVBProgress(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPVBReconciler(test.needErrs...) require.NoError(t, err) @@ -638,7 +638,7 @@ func TestOnPVBProgress(t *testing.T) { namespace := pvb.Namespace pvbName := pvb.Name - require.NoError(t, r.client.Create(context.Background(), pvb)) + require.NoError(t, r.client.Create(t.Context(), pvb)) // Create a Progress object progress := &uploader.Progress{ @@ -658,7 +658,7 @@ func TestOnPVBProgress(t *testing.T) { } func TestOnPvbFailed(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPVBReconciler() require.NoError(t, err) @@ -677,7 +677,7 @@ func TestOnPvbFailed(t *testing.T) { } func TestOnPvbCompleted(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPVBReconciler() require.NoError(t, err) @@ -741,11 +741,11 @@ func TestFindPvbForPod(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() assert.NoError(t, r.client.Create(ctx, test.pod)) assert.NoError(t, r.client.Create(ctx, test.pvb)) - requests := r.findPVBForPod(context.Background(), test.pod) + requests := r.findPVBForPod(t.Context(), test.pod) test.checkFunc(test.pvb, requests) r.client.Delete(ctx, test.pvb, &client.DeleteOptions{}) if test.pod != nil { @@ -774,7 +774,7 @@ func TestAcceptPvb(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initPVBReconcilerWithError(test.needErrs...) require.NoError(t, err) @@ -817,7 +817,7 @@ func TestOnPvbPrepareTimeout(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initPVBReconcilerWithError(test.needErrs...) require.NoError(t, err) @@ -862,7 +862,7 @@ func TestTryCancelPvb(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initPVBReconcilerWithError(test.needErrs...) require.NoError(t, err) @@ -919,7 +919,7 @@ func TestUpdatePvbWithRetry(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - ctx, cancelFunc := context.WithTimeout(context.TODO(), time.Second*5) + ctx, cancelFunc := context.WithTimeout(t.Context(), time.Second*5) defer cancelFunc() r, err := initPVBReconciler(tc.needErrs...) require.NoError(t, err) @@ -1028,7 +1028,7 @@ func TestAttemptPVBResume(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPVBReconciler(test.needErrs...) r.nodeName = "node-1" require.NoError(t, err) @@ -1051,28 +1051,28 @@ func TestAttemptPVBResume(t *testing.T) { for _, pvbName := range test.cancelledPvbs { pvb := &velerov1api.PodVolumeBackup{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) require.NoError(t, err) assert.True(t, pvb.Spec.Cancel) } for _, pvbName := range test.acceptedPvbs { pvb := &velerov1api.PodVolumeBackup{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) require.NoError(t, err) assert.Equal(t, velerov1api.PodVolumeBackupPhaseAccepted, pvb.Status.Phase) } for _, pvbName := range test.preparedPvbs { pvb := &velerov1api.PodVolumeBackup{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) require.NoError(t, err) assert.Equal(t, velerov1api.PodVolumeBackupPhasePrepared, pvb.Status.Phase) } for _, pvbName := range test.inProgressPvbs { pvb := &velerov1api.PodVolumeBackup{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: pvbName}, pvb) require.NoError(t, err) assert.Equal(t, velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase) } @@ -1149,7 +1149,7 @@ func TestResumeCancellablePodVolumeBackup(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPVBReconciler() r.nodeName = "node-1" require.NoError(t, err) diff --git a/pkg/controller/pod_volume_restore_controller_legacy_test.go b/pkg/controller/pod_volume_restore_controller_legacy_test.go index 87bb00a13..a107603e0 100644 --- a/pkg/controller/pod_volume_restore_controller_legacy_test.go +++ b/pkg/controller/pod_volume_restore_controller_legacy_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "testing" "github.com/sirupsen/logrus" @@ -43,7 +42,7 @@ func TestFindVolumeRestoresForPodLegacy(t *testing.T) { Client: clientBuilder.Build(), logger: logrus.New(), } - requests := reconciler.findVolumeRestoresForPod(context.Background(), pod) + requests := reconciler.findVolumeRestoresForPod(t.Context(), pod) assert.Empty(t, requests) // contain one matching PVR @@ -89,6 +88,6 @@ func TestFindVolumeRestoresForPodLegacy(t *testing.T) { }, }, }).Build() - requests = reconciler.findVolumeRestoresForPod(context.Background(), pod) + requests = reconciler.findVolumeRestoresForPod(t.Context(), pod) assert.Len(t, requests, 1) } diff --git a/pkg/controller/pod_volume_restore_controller_test.go b/pkg/controller/pod_volume_restore_controller_test.go index af6637e6d..f748ba507 100644 --- a/pkg/controller/pod_volume_restore_controller_test.go +++ b/pkg/controller/pod_volume_restore_controller_test.go @@ -204,7 +204,7 @@ func TestShouldProcess(t *testing.T) { for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var objs []runtime.Object if ts.obj != nil { @@ -513,7 +513,7 @@ func TestFindPVRForTargetPod(t *testing.T) { client: clientBuilder.Build(), logger: logrus.New(), } - requests := reconciler.findPVRForTargetPod(context.Background(), pod) + requests := reconciler.findPVRForTargetPod(t.Context(), pod) assert.Empty(t, requests) // contain one matching PVR @@ -537,7 +537,7 @@ func TestFindPVRForTargetPod(t *testing.T) { }, }, }).Build() - requests = reconciler.findPVRForTargetPod(context.Background(), pod) + requests = reconciler.findPVRForTargetPod(t.Context(), pod) assert.Len(t, requests, 1) } @@ -932,12 +932,12 @@ func TestPodVolumeRestoreReconcile(t *testing.T) { require.NoError(t, err) if !test.notCreatePVR { - err = r.client.Create(context.Background(), test.pvr) + err = r.client.Create(t.Context(), test.pvr) require.NoError(t, err) } if test.needDelete { - err = r.client.Delete(context.Background(), test.pvr) + err = r.client.Delete(t.Context(), test.pvr) require.NoError(t, err) } @@ -1084,7 +1084,7 @@ func TestPodVolumeRestoreReconcile(t *testing.T) { func TestOnPodVolumeRestoreFailed(t *testing.T) { for _, getErr := range []bool{true, false} { - ctx := context.TODO() + ctx := t.Context() needErrs := []bool{getErr, false, false, false} r, err := initPodVolumeRestoreReconciler(nil, []client.Object{}, needErrs...) require.NoError(t, err) @@ -1110,7 +1110,7 @@ func TestOnPodVolumeRestoreFailed(t *testing.T) { func TestOnPodVolumeRestoreCancelled(t *testing.T) { for _, getErr := range []bool{true, false} { - ctx := context.TODO() + ctx := t.Context() needErrs := []bool{getErr, false, false, false} r, err := initPodVolumeRestoreReconciler(nil, nil, needErrs...) require.NoError(t, err) @@ -1152,7 +1152,7 @@ func TestOnPodVolumeRestoreCompleted(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() needErrs := []bool{test.isGetErr, false, false, false} r, err := initPodVolumeRestoreReconciler(nil, []client.Object{}, needErrs...) r.exposer = func() exposer.PodVolumeExposer { @@ -1212,7 +1212,7 @@ func TestOnPodVolumeRestoreProgress(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPodVolumeRestoreReconciler(nil, []client.Object{}, test.needErrs...) require.NoError(t, err) @@ -1224,7 +1224,7 @@ func TestOnPodVolumeRestoreProgress(t *testing.T) { namespace := pvr.Namespace pvrName := pvr.Name - require.NoError(t, r.client.Create(context.Background(), pvr)) + require.NoError(t, r.client.Create(t.Context(), pvr)) // Create a Progress object progress := &uploader.Progress{ @@ -1290,11 +1290,11 @@ func TestFindPVBForRestorePod(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() assert.NoError(t, r.client.Create(ctx, test.pod)) assert.NoError(t, r.client.Create(ctx, test.pvr)) // Call the findSnapshotRestoreForPod function - requests := r.findPVRForRestorePod(context.Background(), test.pod) + requests := r.findPVRForRestorePod(t.Context(), test.pod) test.checkFunc(test.pvr, requests) r.client.Delete(ctx, test.pvr, &kbclient.DeleteOptions{}) if test.pod != nil { @@ -1330,7 +1330,7 @@ func TestOnPVRPrepareTimeout(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initPodVolumeRestoreReconcilerWithError(nil, []client.Object{}, test.needErrs...) require.NoError(t, err) @@ -1375,7 +1375,7 @@ func TestTryCancelPVR(t *testing.T) { }, } for _, test := range tests { - ctx := context.Background() + ctx := t.Context() r, err := initPodVolumeRestoreReconcilerWithError(nil, []client.Object{}, test.needErrs...) require.NoError(t, err) @@ -1432,7 +1432,7 @@ func TestUpdatePVRWithRetry(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - ctx, cancelFunc := context.WithTimeout(context.TODO(), time.Second*5) + ctx, cancelFunc := context.WithTimeout(t.Context(), time.Second*5) defer cancelFunc() r, err := initPodVolumeRestoreReconciler(nil, []client.Object{}, tc.needErrs...) require.NoError(t, err) @@ -1512,7 +1512,7 @@ func TestAttemptPVRResume(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPodVolumeRestoreReconciler(nil, []client.Object{}, test.needErrs...) r.nodeName = "node-1" require.NoError(t, err) @@ -1538,21 +1538,21 @@ func TestAttemptPVRResume(t *testing.T) { for _, pvrName := range test.cancelledPvrs { pvr := &velerov1api.PodVolumeRestore{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: pvrName}, pvr) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: pvrName}, pvr) require.NoError(t, err) assert.True(t, pvr.Spec.Cancel) } for _, pvrName := range test.acceptedPvrs { pvr := &velerov1api.PodVolumeRestore{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: pvrName}, pvr) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: pvrName}, pvr) require.NoError(t, err) assert.Equal(t, velerov1api.PodVolumeRestorePhaseAccepted, pvr.Status.Phase) } for _, pvrName := range test.preparedPvrs { pvr := &velerov1api.PodVolumeRestore{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: pvrName}, pvr) + err := r.client.Get(t.Context(), types.NamespacedName{Namespace: "velero", Name: pvrName}, pvr) require.NoError(t, err) assert.Equal(t, velerov1api.PodVolumeRestorePhasePrepared, pvr.Status.Phase) } @@ -1629,7 +1629,7 @@ func TestResumeCancellablePodVolumeRestore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() r, err := initPodVolumeRestoreReconciler(nil, []client.Object{}) r.nodeName = "node-1" require.NoError(t, err) diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index 7ef36aae2..3acc03d2a 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -18,7 +18,6 @@ package controller import ( "bytes" - "context" "io" "testing" "time" @@ -103,7 +102,7 @@ func TestFetchBackupInfo(t *testing.T) { defer backupStore.AssertExpectations(t) r := NewRestoreReconciler( - context.Background(), + t.Context(), velerov1api.DefaultNamespace, restorer, fakeClient, @@ -121,11 +120,11 @@ func TestFetchBackupInfo(t *testing.T) { if test.backupStoreError == nil { for _, itm := range test.informerLocations { - require.NoError(t, r.kbClient.Create(context.Background(), itm)) + require.NoError(t, r.kbClient.Create(t.Context(), itm)) } for _, itm := range test.informerBackups { - require.NoError(t, r.kbClient.Create(context.Background(), itm)) + require.NoError(t, r.kbClient.Create(t.Context(), itm)) } } @@ -180,11 +179,11 @@ func TestProcessQueueItemSkips(t *testing.T) { ) if test.restore != nil { - require.NoError(t, fakeClient.Create(context.Background(), test.restore)) + require.NoError(t, fakeClient.Create(t.Context(), test.restore)) } r := NewRestoreReconciler( - context.Background(), + t.Context(), velerov1api.DefaultNamespace, restorer, fakeClient, @@ -200,7 +199,7 @@ func TestProcessQueueItemSkips(t *testing.T) { 10*time.Minute, ) - _, err := r.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{ + _, err := r.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: test.namespace, Name: test.restoreName, }}) @@ -503,7 +502,7 @@ func TestRestoreReconcile(t *testing.T) { }() r := NewRestoreReconciler( - context.Background(), + t.Context(), velerov1api.DefaultNamespace, restorer, fakeClient, @@ -521,15 +520,15 @@ func TestRestoreReconcile(t *testing.T) { r.clock = clocktesting.NewFakeClock(now) if test.location != nil { - require.NoError(t, r.kbClient.Create(context.Background(), test.location)) + require.NoError(t, r.kbClient.Create(t.Context(), test.location)) } if test.backup != nil { - require.NoError(t, r.kbClient.Create(context.Background(), test.backup)) + require.NoError(t, r.kbClient.Create(t.Context(), test.backup)) } if test.restore != nil { isDeletionTimestampSet := test.restore.DeletionTimestamp != nil - require.NoError(t, r.kbClient.Create(context.Background(), test.restore)) + require.NoError(t, r.kbClient.Create(t.Context(), test.restore)) // because of the changes introduced by https://github.com/kubernetes-sigs/controller-runtime/commit/7a66d580c0c53504f5b509b45e9300cc18a1cc30 // the fake client ignores the DeletionTimestamp when calling the Create(), // so call Delete() here @@ -596,7 +595,7 @@ func TestRestoreReconcile(t *testing.T) { } //err = r.processQueueItem(key) - _, err = r.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{ + _, err = r.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: test.restore.Namespace, Name: test.restore.Name, }}) @@ -687,7 +686,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { ) r := NewRestoreReconciler( - context.Background(), + t.Context(), velerov1api.DefaultNamespace, nil, fakeClient, @@ -714,7 +713,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { } // no backups created from the schedule: fail validation - require.NoError(t, r.kbClient.Create(context.Background(), defaultBackup(). + require.NoError(t, r.kbClient.Create(t.Context(), defaultBackup(). ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "non-matching-schedule")). Phase(velerov1api.BackupPhaseCompleted). Result())) @@ -725,7 +724,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { // no completed backups created from the schedule: fail validation require.NoError(t, r.kbClient.Create( - context.Background(), + t.Context(), defaultBackup(). ObjectMeta( builder.WithName("backup-2"), @@ -742,7 +741,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { // multiple completed backups created from the schedule: use most recent now := time.Now() - require.NoError(t, r.kbClient.Create(context.Background(), + require.NoError(t, r.kbClient.Create(t.Context(), defaultBackup(). ObjectMeta( builder.WithName("foo"), @@ -755,7 +754,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { )) location := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result() - require.NoError(t, r.kbClient.Create(context.Background(), location)) + require.NoError(t, r.kbClient.Create(t.Context(), location)) restore = &velerov1api.Restore{ ObjectMeta: metav1.ObjectMeta{ @@ -783,7 +782,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) { ) r := NewRestoreReconciler( - context.Background(), + t.Context(), velerov1api.DefaultNamespace, nil, fakeClient, @@ -814,10 +813,10 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) { } location := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result() - require.NoError(t, r.kbClient.Create(context.Background(), location)) + require.NoError(t, r.kbClient.Create(t.Context(), location)) require.NoError(t, r.kbClient.Create( - context.Background(), + t.Context(), defaultBackup(). ObjectMeta( builder.WithName("backup-1"), @@ -852,7 +851,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) { "sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: replace\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n", }, } - require.NoError(t, r.kbClient.Create(context.Background(), cm1)) + require.NoError(t, r.kbClient.Create(t.Context(), cm1)) r.validateAndComplete(restore1) assert.Nil(t, restore1.Status.ValidationErrors) @@ -881,7 +880,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) { "sub.yml": "version1: v1\nresourceModifierRules:\n- conditions:\n groupResource: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: replace\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n", }, } - require.NoError(t, r.kbClient.Create(context.Background(), invalidVersionCm)) + require.NoError(t, r.kbClient.Create(t.Context(), invalidVersionCm)) r.validateAndComplete(restore2) assert.Contains(t, restore2.Status.ValidationErrors[0], "Error in parsing resource modifiers provided in configmap") @@ -909,7 +908,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) { "sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: invalid\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n", }, } - require.NoError(t, r.kbClient.Create(context.Background(), invalidOperatorCm)) + require.NoError(t, r.kbClient.Create(t.Context(), invalidOperatorCm)) r.validateAndComplete(restore3) assert.Contains(t, restore3.Status.ValidationErrors[0], "Validation error in resource modifiers provided in configmap") diff --git a/pkg/controller/restore_finalizer_controller_test.go b/pkg/controller/restore_finalizer_controller_test.go index 18f2b3c48..56366a360 100644 --- a/pkg/controller/restore_finalizer_controller_test.go +++ b/pkg/controller/restore_finalizer_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "fmt" "syscall" "testing" @@ -146,21 +145,21 @@ func TestRestoreFinalizerReconcile(t *testing.T) { r.clock = testclocks.NewFakeClock(now) if test.restore != nil && test.restore.Namespace == velerov1api.DefaultNamespace { - require.NoError(t, r.Client.Create(context.Background(), test.restore)) + require.NoError(t, r.Client.Create(t.Context(), test.restore)) backupStore.On("GetRestoredResourceList", test.restore.Name).Return(map[string][]string{}, nil) backupStore.On("GetRestoreItemOperations", test.restore.Name).Return([]*itemoperation.RestoreOperation{}, nil) } if test.backup != nil { - require.NoError(t, r.Client.Create(context.Background(), test.backup)) + require.NoError(t, r.Client.Create(t.Context(), test.backup)) backupStore.On("GetBackupVolumeInfos", test.backup.Name).Return(nil, nil) pluginManager.On("GetRestoreItemActionsV2").Return(nil, nil) pluginManager.On("CleanupClients") } if test.location != nil { - require.NoError(t, r.Client.Create(context.Background(), test.location)) + require.NoError(t, r.Client.Create(t.Context(), test.location)) } - _, err = r.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{ + _, err = r.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: test.restore.Namespace, Name: test.restore.Name, }}) @@ -172,7 +171,7 @@ func TestRestoreFinalizerReconcile(t *testing.T) { if test.statusCompare { restoreAfter := velerov1api.Restore{} - err = fakeClient.Get(context.TODO(), types.NamespacedName{ + err = fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: test.restore.Namespace, Name: test.restore.Name, }, &restoreAfter) @@ -443,10 +442,10 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { } for _, pv := range tc.restoredPV { - require.NoError(t, ctx.crClient.Create(context.Background(), pv)) + require.NoError(t, ctx.crClient.Create(t.Context(), pv)) } for _, pvc := range tc.restoredPVC { - require.NoError(t, ctx.crClient.Create(context.Background(), pvc)) + require.NoError(t, ctx.crClient.Create(t.Context(), pvc)) } errs := ctx.patchDynamicPVWithVolumeInfo() @@ -456,7 +455,7 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { for pvName, expectedPVInfo := range tc.expectedPatch { pv := &corev1api.PersistentVolume{} - err := ctx.crClient.Get(context.Background(), crclient.ObjectKey{Name: pvName}, pv) + err := ctx.crClient.Get(t.Context(), crclient.ObjectKey{Name: pvName}, pv) require.NoError(t, err) assert.Equal(t, expectedPVInfo.ReclaimPolicy, string(pv.Spec.PersistentVolumeReclaimPolicy)) @@ -541,7 +540,7 @@ func TestWaitRestoreExecHook(t *testing.T) { restore: tc.restore, multiHookTracker: tc.hookTracker, } - require.NoError(t, ctx.crClient.Create(context.Background(), tc.restore)) + require.NoError(t, ctx.crClient.Create(t.Context(), tc.restore)) if tc.waitSec > 0 { go func() { @@ -554,7 +553,7 @@ func TestWaitRestoreExecHook(t *testing.T) { assert.Len(t, errs.Namespaces, tc.expectedHookErrs) updated := &velerov1api.Restore{} - err := ctx.crClient.Get(context.Background(), crclient.ObjectKey{Namespace: velerov1api.DefaultNamespace, Name: tc.restore.Name}, updated) + err := ctx.crClient.Get(t.Context(), crclient.ObjectKey{Namespace: velerov1api.DefaultNamespace, Name: tc.restore.Name}, updated) require.NoError(t, err) assert.Equal(t, tc.expectedHooksAttempted, updated.Status.HookStatus.HooksAttempted) assert.Equal(t, tc.expectedHooksFailed, updated.Status.HookStatus.HooksFailed) diff --git a/pkg/controller/restore_operations_controller_test.go b/pkg/controller/restore_operations_controller_test.go index b4b70f03b..a1ec0f792 100644 --- a/pkg/controller/restore_operations_controller_test.go +++ b/pkg/controller/restore_operations_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "testing" "time" @@ -303,12 +302,12 @@ func TestRestoreOperationsReconcile(t *testing.T) { restorePluginManager.On("GetRestoreItemActionV2", operation.Spec.RestoreItemAction).Return(ria, nil) } - _, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.restore.Namespace, Name: test.restore.Name}}) + _, err := reconciler.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.restore.Namespace, Name: test.restore.Name}}) gotErr := err != nil assert.Equal(t, test.expectError, gotErr) restoreAfter := velerov1api.Restore{} - err = fakeClient.Get(context.TODO(), types.NamespacedName{ + err = fakeClient.Get(t.Context(), types.NamespacedName{ Namespace: test.restore.Namespace, Name: test.restore.Name, }, &restoreAfter) diff --git a/pkg/datamover/backup_micro_service_test.go b/pkg/datamover/backup_micro_service_test.go index 5936ba465..c5e0e273a 100644 --- a/pkg/datamover/backup_micro_service_test.go +++ b/pkg/datamover/backup_micro_service_test.go @@ -112,7 +112,7 @@ func TestOnDataUploadFailed(t *testing.T) { expectedEventReason := datapath.EventReasonFailed expectedEventMsg := "Data path for data upload fake-data-upload failed, error fake-error" - go bs.OnDataUploadFailed(context.TODO(), velerov1api.DefaultNamespace, dataUploadName, errors.New("fake-error")) + go bs.OnDataUploadFailed(t.Context(), velerov1api.DefaultNamespace, dataUploadName, errors.New("fake-error")) result := <-bs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -136,7 +136,7 @@ func TestOnDataUploadCancelled(t *testing.T) { expectedEventReason := datapath.EventReasonCancelled expectedEventMsg := "Data path for data upload fake-data-upload canceled" - go bs.OnDataUploadCancelled(context.TODO(), velerov1api.DefaultNamespace, dataUploadName) + go bs.OnDataUploadCancelled(t.Context(), velerov1api.DefaultNamespace, dataUploadName) result := <-bs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -184,7 +184,7 @@ func TestOnDataUploadCompleted(t *testing.T) { funcMarshal = bt.Marshal - go bs.OnDataUploadCompleted(context.TODO(), velerov1api.DefaultNamespace, dataUploadName, datapath.Result{}) + go bs.OnDataUploadCompleted(t.Context(), velerov1api.DefaultNamespace, dataUploadName, datapath.Result{}) result := <-bs.resultSignal if test.marshalErr != nil { @@ -237,7 +237,7 @@ func TestOnDataUploadProgress(t *testing.T) { funcMarshal = bt.Marshal - bs.OnDataUploadProgress(context.TODO(), velerov1api.DefaultNamespace, dataUploadName, &uploader.Progress{}) + bs.OnDataUploadProgress(t.Context(), velerov1api.DefaultNamespace, dataUploadName, &uploader.Progress{}) if test.marshalErr != nil { assert.False(t, bt.withEvent) @@ -295,7 +295,7 @@ func TestRunCancelableDataPath(t *testing.T) { dataUploadName := "fake-data-upload" du := builder.ForDataUpload(velerov1api.DefaultNamespace, dataUploadName).Phase(velerov2alpha1api.DataUploadPhaseNew).Result() duInProgress := builder.ForDataUpload(velerov1api.DefaultNamespace, dataUploadName).Phase(velerov2alpha1api.DataUploadPhaseInProgress).Result() - ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second) + ctxTimeout, cancel := context.WithTimeout(t.Context(), time.Second) tests := []struct { name string @@ -322,21 +322,21 @@ func TestRunCancelableDataPath(t *testing.T) { }, { name: "create data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{duInProgress}, dataPathMgr: datapath.NewManager(0), expectedErr: "error to create data path: Concurrent number exceeds", }, { name: "init data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{duInProgress}, initErr: errors.New("fake-init-error"), expectedErr: "error to initialize data path: fake-init-error", }, { name: "start data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{duInProgress}, startErr: errors.New("fake-start-error"), expectedErr: "error starting data path backup: fake-start-error", @@ -351,7 +351,7 @@ func TestRunCancelableDataPath(t *testing.T) { }, { name: "data path returns error", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{duInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -362,7 +362,7 @@ func TestRunCancelableDataPath(t *testing.T) { }, { name: "succeed", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{duInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -387,7 +387,7 @@ func TestRunCancelableDataPath(t *testing.T) { bs := &BackupMicroService{ namespace: velerov1api.DefaultNamespace, dataUploadName: dataUploadName, - ctx: context.Background(), + ctx: t.Context(), client: fakeClient, dataPathMgr: datapath.NewManager(1), eventRecorder: bt, diff --git a/pkg/datamover/restore_micro_service_test.go b/pkg/datamover/restore_micro_service_test.go index 44410b042..f315ef639 100644 --- a/pkg/datamover/restore_micro_service_test.go +++ b/pkg/datamover/restore_micro_service_test.go @@ -58,7 +58,7 @@ func TestOnDataDownloadFailed(t *testing.T) { expectedEventReason := datapath.EventReasonFailed expectedEventMsg := "Data path for data download fake-data-download failed, error fake-error" - go bs.OnDataDownloadFailed(context.TODO(), velerov1api.DefaultNamespace, dataDownloadName, errors.New("fake-error")) + go bs.OnDataDownloadFailed(t.Context(), velerov1api.DefaultNamespace, dataDownloadName, errors.New("fake-error")) result := <-bs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -82,7 +82,7 @@ func TestOnDataDownloadCancelled(t *testing.T) { expectedEventReason := datapath.EventReasonCancelled expectedEventMsg := "Data path for data download fake-data-download canceled" - go bs.OnDataDownloadCancelled(context.TODO(), velerov1api.DefaultNamespace, dataDownloadName) + go bs.OnDataDownloadCancelled(t.Context(), velerov1api.DefaultNamespace, dataDownloadName) result := <-bs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -130,7 +130,7 @@ func TestOnDataDownloadCompleted(t *testing.T) { funcMarshal = bt.Marshal - go bs.OnDataDownloadCompleted(context.TODO(), velerov1api.DefaultNamespace, dataDownloadName, datapath.Result{}) + go bs.OnDataDownloadCompleted(t.Context(), velerov1api.DefaultNamespace, dataDownloadName, datapath.Result{}) result := <-bs.resultSignal if test.marshalErr != nil { @@ -181,7 +181,7 @@ func TestOnDataDownloadProgress(t *testing.T) { funcMarshal = bt.Marshal - bs.OnDataDownloadProgress(context.TODO(), velerov1api.DefaultNamespace, dataDownloadName, &uploader.Progress{}) + bs.OnDataDownloadProgress(t.Context(), velerov1api.DefaultNamespace, dataDownloadName, &uploader.Progress{}) if test.marshalErr != nil { assert.False(t, bt.withEvent) @@ -239,7 +239,7 @@ func TestRunCancelableRestore(t *testing.T) { dataDownloadName := "fake-data-download" dd := builder.ForDataDownload(velerov1api.DefaultNamespace, dataDownloadName).Phase(velerov2alpha1api.DataDownloadPhaseNew).Result() ddInProgress := builder.ForDataDownload(velerov1api.DefaultNamespace, dataDownloadName).Phase(velerov2alpha1api.DataDownloadPhaseInProgress).Result() - ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second) + ctxTimeout, cancel := context.WithTimeout(t.Context(), time.Second) tests := []struct { name string @@ -266,21 +266,21 @@ func TestRunCancelableRestore(t *testing.T) { }, { name: "create data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{ddInProgress}, dataPathMgr: datapath.NewManager(0), expectedErr: "error to create data path: Concurrent number exceeds", }, { name: "init data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{ddInProgress}, initErr: errors.New("fake-init-error"), expectedErr: "error to initialize data path: fake-init-error", }, { name: "start data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{ddInProgress}, startErr: errors.New("fake-start-error"), expectedErr: "error starting data path restore: fake-start-error", @@ -295,7 +295,7 @@ func TestRunCancelableRestore(t *testing.T) { }, { name: "data path returns error", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{ddInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -306,7 +306,7 @@ func TestRunCancelableRestore(t *testing.T) { }, { name: "succeed", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{ddInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -331,7 +331,7 @@ func TestRunCancelableRestore(t *testing.T) { rs := &RestoreMicroService{ namespace: velerov1api.DefaultNamespace, dataDownloadName: dataDownloadName, - ctx: context.Background(), + ctx: t.Context(), client: fakeClient, dataPathMgr: datapath.NewManager(1), eventRecorder: bt, diff --git a/pkg/datapath/manager_test.go b/pkg/datapath/manager_test.go index 57b5220d1..c91517bac 100644 --- a/pkg/datapath/manager_test.go +++ b/pkg/datapath/manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package datapath import ( - "context" "testing" "github.com/stretchr/testify/assert" @@ -27,13 +26,13 @@ import ( func TestCreateFileSystemBR(t *testing.T) { m := NewManager(2) - async_job_1, err := m.CreateFileSystemBR("job-1", "test", context.TODO(), nil, "velero", Callbacks{}, nil) + async_job_1, err := m.CreateFileSystemBR("job-1", "test", t.Context(), nil, "velero", Callbacks{}, nil) require.NoError(t, err) - _, err = m.CreateFileSystemBR("job-2", "test", context.TODO(), nil, "velero", Callbacks{}, nil) + _, err = m.CreateFileSystemBR("job-2", "test", t.Context(), nil, "velero", Callbacks{}, nil) require.NoError(t, err) - _, err = m.CreateFileSystemBR("job-3", "test", context.TODO(), nil, "velero", Callbacks{}, nil) + _, err = m.CreateFileSystemBR("job-3", "test", t.Context(), nil, "velero", Callbacks{}, nil) assert.Equal(t, ConcurrentLimitExceed, err) ret := m.GetAsyncBR("job-0") @@ -55,16 +54,16 @@ func TestCreateFileSystemBR(t *testing.T) { func TestCreateMicroServiceBRWatcher(t *testing.T) { m := NewManager(2) - async_job_1, err := m.CreateMicroServiceBRWatcher(context.TODO(), nil, nil, nil, "test", "job-1", "velero", "pod-1", "container", "du-1", Callbacks{}, false, nil) + async_job_1, err := m.CreateMicroServiceBRWatcher(t.Context(), nil, nil, nil, "test", "job-1", "velero", "pod-1", "container", "du-1", Callbacks{}, false, nil) require.NoError(t, err) - _, err = m.CreateMicroServiceBRWatcher(context.TODO(), nil, nil, nil, "test", "job-2", "velero", "pod-2", "container", "du-2", Callbacks{}, false, nil) + _, err = m.CreateMicroServiceBRWatcher(t.Context(), nil, nil, nil, "test", "job-2", "velero", "pod-2", "container", "du-2", Callbacks{}, false, nil) require.NoError(t, err) - _, err = m.CreateMicroServiceBRWatcher(context.TODO(), nil, nil, nil, "test", "job-3", "velero", "pod-3", "container", "du-3", Callbacks{}, false, nil) + _, err = m.CreateMicroServiceBRWatcher(t.Context(), nil, nil, nil, "test", "job-3", "velero", "pod-3", "container", "du-3", Callbacks{}, false, nil) assert.Equal(t, ConcurrentLimitExceed, err) - async_job_4, err := m.CreateMicroServiceBRWatcher(context.TODO(), nil, nil, nil, "test", "job-4", "velero", "pod-4", "container", "du-4", Callbacks{}, true, nil) + async_job_4, err := m.CreateMicroServiceBRWatcher(t.Context(), nil, nil, nil, "test", "job-4", "velero", "pod-4", "container", "du-4", Callbacks{}, true, nil) require.NoError(t, err) ret := m.GetAsyncBR("job-0") diff --git a/pkg/datapath/micro_service_watcher_test.go b/pkg/datapath/micro_service_watcher_test.go index c4991deb0..6724c290c 100644 --- a/pkg/datapath/micro_service_watcher_test.go +++ b/pkg/datapath/micro_service_watcher_test.go @@ -101,7 +101,7 @@ func TestReEnsureThisPod(t *testing.T) { log: velerotest.NewLogger(), } - err := ms.reEnsureThisPod(context.Background()) + err := ms.reEnsureThisPod(t.Context()) if test.expectErr != "" { assert.EqualError(t, err, test.expectErr) } else { @@ -374,7 +374,7 @@ func TestStartWatch(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) eventWaitTimeout = time.Second * 5 sw := startWatchFake{ @@ -615,7 +615,7 @@ func TestRedirectDataMoverLogs(t *testing.T) { fakeKubeClient := kubeclientfake.NewSimpleClientset() - err = redirectDataMoverLogs(context.Background(), fakeKubeClient, "", test.thisPod, "", test.logger) + err = redirectDataMoverLogs(t.Context(), fakeKubeClient, "", test.thisPod, "", test.logger) if test.expectErr != "" { assert.EqualError(t, err, test.expectErr) } else { diff --git a/pkg/discovery/helper_test.go b/pkg/discovery/helper_test.go index a42b3c886..d70c10438 100644 --- a/pkg/discovery/helper_test.go +++ b/pkg/discovery/helper_test.go @@ -83,14 +83,14 @@ func TestSortResources(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - t.Logf("before") + t.Log("before") for _, r := range test.resources { - t.Logf(r.GroupVersion) + t.Log(r.GroupVersion) } sortResources(test.resources) t.Logf("after") for _, r := range test.resources { - t.Logf(r.GroupVersion) + t.Log(r.GroupVersion) } assert.Equal(t, test.expected, test.resources) }) diff --git a/pkg/exposer/csi_snapshot_test.go b/pkg/exposer/csi_snapshot_test.go index f0a989c69..223a85773 100644 --- a/pkg/exposer/csi_snapshot_test.go +++ b/pkg/exposer/csi_snapshot_test.go @@ -17,7 +17,6 @@ limitations under the License. package exposer import ( - "context" "fmt" "testing" "time" @@ -630,20 +629,20 @@ func TestExpose(t *testing.T) { } } - err := exposer.Expose(context.Background(), ownerObject, &test.exposeParam) + err := exposer.Expose(t.Context(), ownerObject, &test.exposeParam) if err == nil { require.NoError(t, err) - backupPod, err := exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) + backupPod, err := exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{}) require.NoError(t, err) - backupPVC, err := exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) + backupPVC, err := exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{}) require.NoError(t, err) - expectedVS, err := exposer.csiSnapshotClient.VolumeSnapshots(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) + expectedVS, err := exposer.csiSnapshotClient.VolumeSnapshots(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{}) require.NoError(t, err) - expectedVSC, err := exposer.csiSnapshotClient.VolumeSnapshotContents().Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) + expectedVSC, err := exposer.csiSnapshotClient.VolumeSnapshotContents().Get(t.Context(), ownerObject.Name, metav1.GetOptions{}) require.NoError(t, err) assert.Equal(t, expectedVS.Annotations, vsObject.Annotations) @@ -844,7 +843,7 @@ func TestGetExpose(t *testing.T) { test.exposeWaitParam.NodeClient = fakeClient - result, err := exposer.GetExposed(context.Background(), ownerObject, test.Timeout, &test.exposeWaitParam) + result, err := exposer.GetExposed(t.Context(), ownerObject, test.Timeout, &test.exposeWaitParam) if test.err == "" { require.NoError(t, err) @@ -942,7 +941,7 @@ func TestPeekExpose(t *testing.T) { } } - err := exposer.PeekExposed(context.Background(), ownerObject) + err := exposer.PeekExposed(t.Context(), ownerObject) if test.err == "" { assert.NoError(t, err) } else { @@ -1087,7 +1086,7 @@ func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) { APIVersion: tt.ownerBackup.APIVersion, } } - got, err := e.createBackupPVC(context.Background(), ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly) + got, err := e.createBackupPVC(t.Context(), ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly) if !tt.wantErr(t, err, fmt.Sprintf("createBackupPVC(%v, %v, %v, %v, %v, %v)", ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly)) { return } @@ -1481,7 +1480,7 @@ end diagnose CSI exposer`, } } - diag := e.DiagnoseExpose(context.Background(), ownerObject) + diag := e.DiagnoseExpose(t.Context(), ownerObject) assert.Equal(t, tt.expected, diag) }) } diff --git a/pkg/exposer/generic_restore_test.go b/pkg/exposer/generic_restore_test.go index 45fe862b0..657d75347 100644 --- a/pkg/exposer/generic_restore_test.go +++ b/pkg/exposer/generic_restore_test.go @@ -17,7 +17,6 @@ limitations under the License. package exposer import ( - "context" "testing" "time" @@ -195,7 +194,7 @@ func TestRestoreExpose(t *testing.T) { } err := exposer.Expose( - context.Background(), + t.Context(), ownerObject, GenericRestoreExposeParam{ TargetPVCName: test.targetPVCName, @@ -440,7 +439,7 @@ func TestRebindVolume(t *testing.T) { hookCount = 0 - err := exposer.RebindVolume(context.Background(), ownerObject, test.targetPVCName, test.targetNamespace, time.Millisecond) + err := exposer.RebindVolume(t.Context(), ownerObject, test.targetPVCName, test.targetNamespace, time.Millisecond) assert.EqualError(t, err, test.err) }) } @@ -523,7 +522,7 @@ func TestRestorePeekExpose(t *testing.T) { } } - err := exposer.PeekExposed(context.Background(), ownerObject) + err := exposer.PeekExposed(t.Context(), ownerObject) if test.err == "" { assert.NoError(t, err) } else { @@ -782,7 +781,7 @@ end diagnose restore exposer`, } } - diag := e.DiagnoseExpose(context.Background(), ownerObject) + diag := e.DiagnoseExpose(t.Context(), ownerObject) assert.Equal(t, test.expected, diag) }) } @@ -901,7 +900,7 @@ func TestCreateRestorePod(t *testing.T) { } pod, err := exposer.createRestorePod( - context.Background(), + t.Context(), corev1api.ObjectReference{ Namespace: velerov1.DefaultNamespace, Name: "data-download", diff --git a/pkg/exposer/host_path_test.go b/pkg/exposer/host_path_test.go index 115b7425a..e751afe0d 100644 --- a/pkg/exposer/host_path_test.go +++ b/pkg/exposer/host_path_test.go @@ -99,7 +99,7 @@ func TestGetPodVolumeHostPath(t *testing.T) { singlePathMatch = test.pathMatchFunc } - _, err := GetPodVolumeHostPath(context.Background(), test.pod, test.pvc, nil, nil, velerotest.NewLogger()) + _, err := GetPodVolumeHostPath(t.Context(), test.pod, test.pvc, nil, nil, velerotest.NewLogger()) if test.err != "" || err != nil { assert.EqualError(t, err, test.err) } @@ -150,7 +150,7 @@ func TestExtractPodVolumeHostPath(t *testing.T) { getHostPodPath = test.getHostPodPathFunc } - path, err := ExtractPodVolumeHostPath(context.Background(), test.path, nil, "", test.osType) + path, err := ExtractPodVolumeHostPath(t.Context(), test.path, nil, "", test.osType) if test.expectedErr != "" { assert.EqualError(t, err, test.expectedErr) diff --git a/pkg/exposer/image_test.go b/pkg/exposer/image_test.go index 84732b031..e793f3c8d 100644 --- a/pkg/exposer/image_test.go +++ b/pkg/exposer/image_test.go @@ -17,7 +17,6 @@ limitations under the License. package exposer import ( - "context" "reflect" "testing" @@ -325,7 +324,7 @@ func TestGetInheritedPodInfo(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - info, err := getInheritedPodInfo(context.Background(), fakeKubeClient, test.namespace, kube.NodeOSLinux) + info, err := getInheritedPodInfo(t.Context(), fakeKubeClient, test.namespace, kube.NodeOSLinux) if test.expectErr == "" { require.NoError(t, err) diff --git a/pkg/exposer/pod_volume_test.go b/pkg/exposer/pod_volume_test.go index 74b5720ce..f36fda4f4 100644 --- a/pkg/exposer/pod_volume_test.go +++ b/pkg/exposer/pod_volume_test.go @@ -220,11 +220,11 @@ func TestPodVolumeExpose(t *testing.T) { extractPodVolumeHostPath = test.funcExtractPodVolumeHostPath } - err := exposer.Expose(context.Background(), ownerObject, test.exposeParam) + err := exposer.Expose(t.Context(), ownerObject, test.exposeParam) if err == nil { require.NoError(t, err) - _, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) + _, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{}) assert.NoError(t, err) } else { assert.EqualError(t, err, test.err) @@ -318,7 +318,7 @@ func TestGetPodVolumeExpose(t *testing.T) { } } - result, err := exposer.GetExposed(context.Background(), ownerObject, fakeClient, test.nodeName, test.Timeout) + result, err := exposer.GetExposed(t.Context(), ownerObject, fakeClient, test.nodeName, test.Timeout) if test.err == "" { require.NoError(t, err) @@ -416,7 +416,7 @@ func TestPodVolumePeekExpose(t *testing.T) { } } - err := exposer.PeekExposed(context.Background(), ownerObject) + err := exposer.PeekExposed(t.Context(), ownerObject) if test.err == "" { assert.NoError(t, err) } else { @@ -585,7 +585,7 @@ end diagnose pod volume exposer`, } } - diag := e.DiagnoseExpose(context.Background(), ownerObject) + diag := e.DiagnoseExpose(t.Context(), ownerObject) assert.Equal(t, tt.expected, diag) }) } diff --git a/pkg/exposer/vgdp_counter_test.go b/pkg/exposer/vgdp_counter_test.go index 68a57537b..725b5f8d6 100644 --- a/pkg/exposer/vgdp_counter_test.go +++ b/pkg/exposer/vgdp_counter_test.go @@ -1,7 +1,6 @@ package exposer import ( - "context" "testing" "github.com/stretchr/testify/assert" @@ -151,7 +150,7 @@ func TestIsConstrained(t *testing.T) { test.counter.client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(test.kubeClientObj...).Build() - result := test.counter.IsConstrained(context.TODO(), velerotest.NewLogger()) + result := test.counter.IsConstrained(t.Context(), velerotest.NewLogger()) assert.Equal(t, test.expected, result) diff --git a/pkg/itemblock/actions/pvc_action_test.go b/pkg/itemblock/actions/pvc_action_test.go index fcd54b022..8b3cf58d5 100644 --- a/pkg/itemblock/actions/pvc_action_test.go +++ b/pkg/itemblock/actions/pvc_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package actions import ( - "context" "fmt" "testing" @@ -162,15 +161,15 @@ func TestBackupPVAction(t *testing.T) { a := i.(*PVCAction) if tc.pvc != nil { - require.NoError(t, crClient.Create(context.Background(), tc.pvc)) + require.NoError(t, crClient.Create(t.Context(), tc.pvc)) } for _, pod := range tc.pods { - require.NoError(t, crClient.Create(context.Background(), pod)) + require.NoError(t, crClient.Create(t.Context(), pod)) } if tc.name == "Test with PVC grouping via VGS label" { groupedPVC := builder.ForPersistentVolumeClaim("velero", "groupedPVC").ObjectMeta(builder.WithLabels("velero.io/group", "db")).VolumeName("groupedPV").Phase(corev1api.ClaimBound).Result() - require.NoError(t, crClient.Create(context.Background(), groupedPVC)) + require.NoError(t, crClient.Create(t.Context(), groupedPVC)) backup.Spec.VolumeGroupSnapshotLabelKey = "velero.io/group" } @@ -252,7 +251,7 @@ func Test_getGroupedPVCs(t *testing.T) { t.Run(tc.name, func(t *testing.T) { crClient := velerotest.NewFakeControllerRuntimeClient(t) for _, pvc := range tc.existingPVCs { - require.NoError(t, crClient.Create(context.Background(), pvc)) + require.NoError(t, crClient.Create(t.Context(), pvc)) } logger := logrus.New() @@ -263,7 +262,7 @@ func Test_getGroupedPVCs(t *testing.T) { backup := builder.ForBackup("ns", "bkp").VolumeGroupSnapshotLabelKey(tc.labelKey).Result() - related, err := a.getGroupedPVCs(context.Background(), tc.targetPVC, backup) + related, err := a.getGroupedPVCs(t.Context(), tc.targetPVC, backup) if tc.expectError { require.Error(t, err) } else { diff --git a/pkg/nodeagent/node_agent_test.go b/pkg/nodeagent/node_agent_test.go index 4790b94c4..292c14a61 100644 --- a/pkg/nodeagent/node_agent_test.go +++ b/pkg/nodeagent/node_agent_test.go @@ -17,7 +17,6 @@ limitations under the License. package nodeagent import ( - "context" "testing" "github.com/pkg/errors" @@ -95,7 +94,7 @@ func TestIsRunning(t *testing.T) { fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - err := isRunning(context.TODO(), fakeKubeClient, test.namespace, daemonSet) + err := isRunning(t.Context(), fakeKubeClient, test.namespace, daemonSet) if test.expectErr == "" { assert.NoError(t, err) } else { @@ -175,7 +174,7 @@ func TestIsRunningInNode(t *testing.T) { fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() - err := IsRunningInNode(context.TODO(), "", test.nodeName, fakeClient) + err := IsRunningInNode(t.Context(), "", test.nodeName, fakeClient) if test.expectErr == "" { assert.NoError(t, err) } else { @@ -231,7 +230,7 @@ func TestGetPodSpec(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - spec, err := GetPodSpec(context.TODO(), fakeKubeClient, test.namespace, kube.NodeOSLinux) + spec, err := GetPodSpec(t.Context(), fakeKubeClient, test.namespace, kube.NodeOSLinux) if test.expectErr == "" { require.NoError(t, err) assert.Equal(t, *spec, test.expectSpec) @@ -316,7 +315,7 @@ func TestGetConfigs(t *testing.T) { fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - result, err := GetConfigs(context.TODO(), test.namespace, fakeKubeClient, "node-agent-config") + result, err := GetConfigs(t.Context(), test.namespace, fakeKubeClient, "node-agent-config") if test.expectErr == "" { require.NoError(t, err) @@ -452,7 +451,7 @@ func TestGetLabelValue(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - value, err := GetLabelValue(context.TODO(), fakeKubeClient, test.namespace, "fake-label", kube.NodeOSLinux) + value, err := GetLabelValue(t.Context(), fakeKubeClient, test.namespace, "fake-label", kube.NodeOSLinux) if test.expectErr == "" { require.NoError(t, err) assert.Equal(t, test.expectedValue, value) @@ -581,7 +580,7 @@ func TestGetAnnotationValue(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - value, err := GetAnnotationValue(context.TODO(), fakeKubeClient, test.namespace, "fake-annotation", kube.NodeOSLinux) + value, err := GetAnnotationValue(t.Context(), fakeKubeClient, test.namespace, "fake-annotation", kube.NodeOSLinux) if test.expectErr == "" { require.NoError(t, err) assert.Equal(t, test.expectedValue, value) @@ -691,7 +690,7 @@ func TestGetToleration(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - value, err := GetToleration(context.TODO(), fakeKubeClient, test.namespace, "fake-toleration", kube.NodeOSLinux) + value, err := GetToleration(t.Context(), fakeKubeClient, test.namespace, "fake-toleration", kube.NodeOSLinux) if test.expectErr == "" { require.NoError(t, err) assert.Equal(t, test.expectedValue, *value) @@ -851,7 +850,7 @@ func TestGetHostPodPath(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - path, err := GetHostPodPath(context.TODO(), fakeKubeClient, test.namespace, test.osType) + path, err := GetHostPodPath(t.Context(), fakeKubeClient, test.namespace, test.osType) if test.expectErr == "" { require.NoError(t, err) diff --git a/pkg/plugin/framework/backup_item_action_test.go b/pkg/plugin/framework/backup_item_action_test.go index 59a1bbba8..1472eb115 100644 --- a/pkg/plugin/framework/backup_item_action_test.go +++ b/pkg/plugin/framework/backup_item_action_test.go @@ -23,7 +23,6 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -167,7 +166,7 @@ func TestBackupItemActionGRPCServerExecute(t *testing.T) { Backup: test.backup, } - resp, err := s.Execute(context.Background(), req) + resp, err := s.Execute(t.Context(), req) // Verify error assert.Equal(t, test.expectError, err != nil) diff --git a/pkg/plugin/framework/backupitemaction/v2/backup_item_action_test.go b/pkg/plugin/framework/backupitemaction/v2/backup_item_action_test.go index 47f69dea9..502c37502 100644 --- a/pkg/plugin/framework/backupitemaction/v2/backup_item_action_test.go +++ b/pkg/plugin/framework/backupitemaction/v2/backup_item_action_test.go @@ -23,7 +23,6 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -170,7 +169,7 @@ func TestBackupItemActionGRPCServerExecute(t *testing.T) { Backup: test.backup, } - resp, err := s.Execute(context.Background(), req) + resp, err := s.Execute(t.Context(), req) // Verify error assert.Equal(t, test.expectError, err != nil) diff --git a/pkg/plugin/framework/itemblockaction/v1/item_block_action_test.go b/pkg/plugin/framework/itemblockaction/v1/item_block_action_test.go index 4397821cb..6e2a0e4d5 100644 --- a/pkg/plugin/framework/itemblockaction/v1/item_block_action_test.go +++ b/pkg/plugin/framework/itemblockaction/v1/item_block_action_test.go @@ -23,7 +23,6 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -143,7 +142,7 @@ func TestItemBlockActionGRPCServerGetRelatedItems(t *testing.T) { Backup: test.backup, } - resp, err := s.GetRelatedItems(context.Background(), req) + resp, err := s.GetRelatedItems(t.Context(), req) // Verify error assert.Equal(t, test.expectError, err != nil) diff --git a/pkg/podvolume/backup_micro_service_test.go b/pkg/podvolume/backup_micro_service_test.go index eab56f58f..ed6785b57 100644 --- a/pkg/podvolume/backup_micro_service_test.go +++ b/pkg/podvolume/backup_micro_service_test.go @@ -111,7 +111,7 @@ func TestOnDataPathFailed(t *testing.T) { expectedEventReason := datapath.EventReasonFailed expectedEventMsg := "Data path for PVB fake-pvb failed, error fake-error" - go bs.OnDataPathFailed(context.TODO(), velerov1api.DefaultNamespace, pvbName, errors.New("fake-error")) + go bs.OnDataPathFailed(t.Context(), velerov1api.DefaultNamespace, pvbName, errors.New("fake-error")) result := <-bs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -135,7 +135,7 @@ func TestOnDataPathCancelled(t *testing.T) { expectedEventReason := datapath.EventReasonCancelled expectedEventMsg := "Data path for PVB fake-pvb canceled" - go bs.OnDataPathCancelled(context.TODO(), velerov1api.DefaultNamespace, pvbName) + go bs.OnDataPathCancelled(t.Context(), velerov1api.DefaultNamespace, pvbName) result := <-bs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -183,7 +183,7 @@ func TestOnDataPathCompleted(t *testing.T) { funcMarshal = bt.Marshal - go bs.OnDataPathCompleted(context.TODO(), velerov1api.DefaultNamespace, pvbName, datapath.Result{}) + go bs.OnDataPathCompleted(t.Context(), velerov1api.DefaultNamespace, pvbName, datapath.Result{}) result := <-bs.resultSignal if test.marshalErr != nil { @@ -236,7 +236,7 @@ func TestOnDataPathProgress(t *testing.T) { funcMarshal = bt.Marshal - bs.OnDataPathProgress(context.TODO(), velerov1api.DefaultNamespace, pvbName, &uploader.Progress{}) + bs.OnDataPathProgress(t.Context(), velerov1api.DefaultNamespace, pvbName, &uploader.Progress{}) if test.marshalErr != nil { assert.False(t, bt.withEvent) @@ -294,7 +294,7 @@ func TestRunCancelableDataPath(t *testing.T) { pvbName := "fake-pvb" pvb := builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, pvbName).Phase(velerov1api.PodVolumeBackupPhaseNew).Result() pvbInProgress := builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, pvbName).Phase(velerov1api.PodVolumeBackupPhaseInProgress).Result() - ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second) + ctxTimeout, cancel := context.WithTimeout(t.Context(), time.Second) tests := []struct { name string @@ -321,21 +321,21 @@ func TestRunCancelableDataPath(t *testing.T) { }, { name: "create data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvbInProgress}, dataPathMgr: datapath.NewManager(0), expectedErr: "error to create data path: Concurrent number exceeds", }, { name: "init data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvbInProgress}, initErr: errors.New("fake-init-error"), expectedErr: "error to initialize data path: fake-init-error", }, { name: "start data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvbInProgress}, startErr: errors.New("fake-start-error"), expectedErr: "error starting data path backup: fake-start-error", @@ -350,7 +350,7 @@ func TestRunCancelableDataPath(t *testing.T) { }, { name: "data path returns error", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvbInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -361,7 +361,7 @@ func TestRunCancelableDataPath(t *testing.T) { }, { name: "succeed", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvbInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -386,7 +386,7 @@ func TestRunCancelableDataPath(t *testing.T) { bs := &BackupMicroService{ namespace: velerov1api.DefaultNamespace, pvbName: pvbName, - ctx: context.Background(), + ctx: t.Context(), client: fakeClient, dataPathMgr: datapath.NewManager(1), eventRecorder: bt, diff --git a/pkg/podvolume/backupper_test.go b/pkg/podvolume/backupper_test.go index 38b415a01..6359df696 100644 --- a/pkg/podvolume/backupper_test.go +++ b/pkg/podvolume/backupper_test.go @@ -190,7 +190,7 @@ func Test_backupper_BackupPodVolumes_log_test(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &backupper{ - ctx: context.Background(), + ctx: t.Context(), } logOutput := bytes.Buffer{} var log = logrus.New() @@ -546,7 +546,7 @@ func TestBackupPodVolumes(t *testing.T) { // TODO add more verification around PVCBackupSummary returned by "BackupPodVolumes" for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() fakeClientBuilder := ctrlfake.NewClientBuilder() if test.runtimeScheme != nil { @@ -705,7 +705,7 @@ func (l *logHook) Fire(entry *logrus.Entry) error { } func TestWaitAllPodVolumesProcessed(t *testing.T) { - timeoutCtx, cancelFunc := context.WithCancel(context.Background()) + timeoutCtx, cancelFunc := context.WithCancel(t.Context()) cancelFunc() log := logrus.New() pvb := builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb"). @@ -730,7 +730,7 @@ func TestWaitAllPodVolumesProcessed(t *testing.T) { }, { name: "failed pvbs", - ctx: context.Background(), + ctx: t.Context(), pvb: pvb, statusToBeUpdated: &velerov1api.PodVolumeBackupStatus{ Phase: velerov1api.PodVolumeBackupPhaseFailed, @@ -741,7 +741,7 @@ func TestWaitAllPodVolumesProcessed(t *testing.T) { }, { name: "completed pvbs", - ctx: context.Background(), + ctx: t.Context(), pvb: pvb, statusToBeUpdated: &velerov1api.PodVolumeBackupStatus{ Phase: velerov1api.PodVolumeBackupPhaseCompleted, @@ -768,7 +768,7 @@ func TestWaitAllPodVolumesProcessed(t *testing.T) { informer := cache.NewSharedIndexInformer(&lw, &velerov1api.PodVolumeBackup{}, 0, cache.Indexers{}) - ctx := context.Background() + ctx := t.Context() go informer.Run(ctx.Done()) require.True(t, cache.WaitForCacheSync(ctx.Done(), informer.HasSynced)) @@ -784,11 +784,11 @@ func TestWaitAllPodVolumesProcessed(t *testing.T) { if c.statusToBeUpdated != nil { pvb := &velerov1api.PodVolumeBackup{} - err := client.Get(context.Background(), ctrlclient.ObjectKey{Namespace: c.pvb.Namespace, Name: c.pvb.Name}, pvb) + err := client.Get(t.Context(), ctrlclient.ObjectKey{Namespace: c.pvb.Namespace, Name: c.pvb.Name}, pvb) require.NoError(t, err) pvb.Status = *c.statusToBeUpdated - err = client.Update(context.Background(), pvb) + err = client.Update(t.Context(), pvb) require.NoError(t, err) } diff --git a/pkg/podvolume/restore_micro_service.go b/pkg/podvolume/restore_micro_service.go index 01f33a68a..9bfc0a287 100644 --- a/pkg/podvolume/restore_micro_service.go +++ b/pkg/podvolume/restore_micro_service.go @@ -247,7 +247,7 @@ func (r *RestoreMicroService) recordPvrFailed(msg string, err error) { evtMsg := fmt.Sprintf("%s, error %v", msg, err) r.eventRecorder.Event(r.pvr, false, datapath.EventReasonFailed, evtMsg) r.resultSignal <- dataPathResult{ - err: errors.Wrapf(err, msg), + err: errors.Wrap(err, msg), } } diff --git a/pkg/podvolume/restore_micro_service_test.go b/pkg/podvolume/restore_micro_service_test.go index 5055c08b6..07e2056b0 100644 --- a/pkg/podvolume/restore_micro_service_test.go +++ b/pkg/podvolume/restore_micro_service_test.go @@ -119,7 +119,7 @@ func TestOnPvrFailed(t *testing.T) { expectedEventReason := datapath.EventReasonFailed expectedEventMsg := "Data path for PVR fake-pvr failed, error fake-error" - go rs.OnPvrFailed(context.TODO(), velerov1api.DefaultNamespace, pvrName, errors.New("fake-error")) + go rs.OnPvrFailed(t.Context(), velerov1api.DefaultNamespace, pvrName, errors.New("fake-error")) result := <-rs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -143,7 +143,7 @@ func TestPvrCancelled(t *testing.T) { expectedEventReason := datapath.EventReasonCancelled expectedEventMsg := "Data path for PVR fake-pvr canceled" - go rs.OnPvrCancelled(context.TODO(), velerov1api.DefaultNamespace, pvrName) + go rs.OnPvrCancelled(t.Context(), velerov1api.DefaultNamespace, pvrName) result := <-rs.resultSignal require.EqualError(t, result.err, expectedErr) @@ -205,7 +205,7 @@ func TestOnPvrCompleted(t *testing.T) { funcMarshal = rt.Marshal funcWriteCompletionMark = rt.WriteCompletionMark - go rs.OnPvrCompleted(context.TODO(), velerov1api.DefaultNamespace, pvrName, datapath.Result{}) + go rs.OnPvrCompleted(t.Context(), velerov1api.DefaultNamespace, pvrName, datapath.Result{}) result := <-rs.resultSignal if test.marshalErr != nil { @@ -262,7 +262,7 @@ func TestOnPvrProgress(t *testing.T) { funcMarshal = rt.Marshal - rs.OnPvrProgress(context.TODO(), velerov1api.DefaultNamespace, pvrName, &uploader.Progress{}) + rs.OnPvrProgress(t.Context(), velerov1api.DefaultNamespace, pvrName, &uploader.Progress{}) if test.marshalErr != nil { assert.False(t, rt.withEvent) @@ -320,7 +320,7 @@ func TestRunCancelableDataPathRestore(t *testing.T) { pvrName := "fake-pvr" pvr := builder.ForPodVolumeRestore(velerov1api.DefaultNamespace, pvrName).Phase(velerov1api.PodVolumeRestorePhaseNew).Result() pvrInProgress := builder.ForPodVolumeRestore(velerov1api.DefaultNamespace, pvrName).Phase(velerov1api.PodVolumeRestorePhaseInProgress).Result() - ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second) + ctxTimeout, cancel := context.WithTimeout(t.Context(), time.Second) tests := []struct { name string @@ -347,21 +347,21 @@ func TestRunCancelableDataPathRestore(t *testing.T) { }, { name: "create data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvrInProgress}, dataPathMgr: datapath.NewManager(0), expectedErr: "error to create data path: Concurrent number exceeds", }, { name: "init data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvrInProgress}, initErr: errors.New("fake-init-error"), expectedErr: "error to initialize data path: fake-init-error", }, { name: "start data path fail", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvrInProgress}, startErr: errors.New("fake-start-error"), expectedErr: "error starting data path restore: fake-start-error", @@ -376,7 +376,7 @@ func TestRunCancelableDataPathRestore(t *testing.T) { }, { name: "data path returns error", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvrInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -387,7 +387,7 @@ func TestRunCancelableDataPathRestore(t *testing.T) { }, { name: "succeed", - ctx: context.Background(), + ctx: t.Context(), kubeClientObj: []runtime.Object{pvrInProgress}, dataPathStarted: true, result: &dataPathResult{ @@ -412,7 +412,7 @@ func TestRunCancelableDataPathRestore(t *testing.T) { rs := &RestoreMicroService{ namespace: velerov1api.DefaultNamespace, pvrName: pvrName, - ctx: context.Background(), + ctx: t.Context(), client: fakeClient, dataPathMgr: datapath.NewManager(1), eventRecorder: rt, diff --git a/pkg/podvolume/restorer_test.go b/pkg/podvolume/restorer_test.go index 914a6e9be..a9d4ddf84 100644 --- a/pkg/podvolume/restorer_test.go +++ b/pkg/podvolume/restorer_test.go @@ -164,7 +164,7 @@ func TestRestorePodVolumes(t *testing.T) { velerov1api.AddToScheme(scheme) corev1api.AddToScheme(scheme) - ctxWithCancel, cancel := context.WithCancel(context.Background()) + ctxWithCancel, cancel := context.WithCancel(t.Context()) defer cancel() failedPVR := createPVRObj(true, 1) @@ -364,7 +364,7 @@ func TestRestorePodVolumes(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if test.ctx != nil { ctx = test.ctx } diff --git a/pkg/repository/backup_repo_op_test.go b/pkg/repository/backup_repo_op_test.go index a317e22c2..a106a6602 100644 --- a/pkg/repository/backup_repo_op_test.go +++ b/pkg/repository/backup_repo_op_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "fmt" "github.com/stretchr/testify/assert" @@ -150,7 +149,7 @@ func TestGetBackupRepository(t *testing.T) { }) fakeClient := clientBuilder.Build() - backupRepo, err := GetBackupRepository(context.Background(), fakeClient, velerov1api.DefaultNamespace, tc.backupRepositoryKey, tc.ensureReady) + backupRepo, err := GetBackupRepository(t.Context(), fakeClient, velerov1api.DefaultNamespace, tc.backupRepositoryKey, tc.ensureReady) if backupRepo != nil && tc.expected != nil { backupRepo.ResourceVersion = tc.expected.ResourceVersion diff --git a/pkg/repository/ensurer_test.go b/pkg/repository/ensurer_test.go index 382033f06..4003aa369 100644 --- a/pkg/repository/ensurer_test.go +++ b/pkg/repository/ensurer_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "testing" "time" @@ -127,7 +126,7 @@ func TestEnsureRepo(t *testing.T) { ensurer := NewEnsurer(fakeClient, velerotest.NewLogger(), time.Millisecond) - repo, err := ensurer.EnsureRepo(context.Background(), velerov1.DefaultNamespace, test.namespace, test.bsl, test.repositoryType) + repo, err := ensurer.EnsureRepo(t.Context(), velerov1.DefaultNamespace, test.namespace, test.bsl, test.repositoryType) if err != nil { require.EqualError(t, err, test.err) } else { @@ -247,7 +246,7 @@ func TestCreateBackupRepositoryAndWait(t *testing.T) { ensurer := NewEnsurer(fakeClient, velerotest.NewLogger(), time.Millisecond) - repo, err := ensurer.createBackupRepositoryAndWait(context.Background(), velerov1.DefaultNamespace, BackupRepositoryKey{ + repo, err := ensurer.createBackupRepositoryAndWait(t.Context(), velerov1.DefaultNamespace, BackupRepositoryKey{ VolumeNamespace: test.namespace, BackupLocation: test.bsl, RepositoryType: test.repositoryType, diff --git a/pkg/repository/maintenance/maintenance_test.go b/pkg/repository/maintenance/maintenance_test.go index c0e0a8851..33e4afd31 100644 --- a/pkg/repository/maintenance/maintenance_test.go +++ b/pkg/repository/maintenance/maintenance_test.go @@ -122,7 +122,7 @@ func TestDeleteOldJobs(t *testing.T) { // Get the remaining jobs jobList := &batchv1api.JobList{} - err = cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) + err = cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) require.NoError(t, err) // We expect the number of jobs to be equal to 'keep' @@ -246,13 +246,13 @@ func TestWaitForJobComplete(t *testing.T) { original := job.DeepCopy() job.Status.Succeeded = 1 - err := fakeClient.Status().Patch(context.Background(), job, client.MergeFrom(original)) + err := fakeClient.Status().Patch(t.Context(), job, client.MergeFrom(original)) require.NoError(t, err) }() } // Call the function - _, err := waitForJobComplete(context.Background(), fakeClient, job.Namespace, job.Name, logger) + _, err := waitForJobComplete(t.Context(), fakeClient, job.Namespace, job.Name, logger) // Check if the error matches the expectation if tc.expectError { @@ -387,7 +387,7 @@ func TestGetResultFromJob(t *testing.T) { } func TestGetJobConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() logger := logrus.New() veleroNamespace := "velero" repoMaintenanceJobConfig := "repo-maintenance-job-config" @@ -562,7 +562,7 @@ func TestGetJobConfig(t *testing.T) { } func TestWaitAllJobsComplete(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + ctx, cancel := context.WithTimeout(t.Context(), time.Second*2) veleroNamespace := "velero" repo := &velerov1api.BackupRepository{ @@ -719,7 +719,7 @@ func TestWaitAllJobsComplete(t *testing.T) { }, { name: "get result error on succeeded job", - ctx: context.TODO(), + ctx: t.Context(), runtimeScheme: scheme, kubeClientObj: []runtime.Object{ jobSucceeded1, @@ -734,7 +734,7 @@ func TestWaitAllJobsComplete(t *testing.T) { }, { name: "get result error on failed job", - ctx: context.TODO(), + ctx: t.Context(), runtimeScheme: scheme, kubeClientObj: []runtime.Object{ jobFailed1, @@ -749,7 +749,7 @@ func TestWaitAllJobsComplete(t *testing.T) { }, { name: "less than limit", - ctx: context.TODO(), + ctx: t.Context(), runtimeScheme: scheme, kubeClientObj: []runtime.Object{ jobFailed1, @@ -772,7 +772,7 @@ func TestWaitAllJobsComplete(t *testing.T) { }, { name: "equal to limit", - ctx: context.TODO(), + ctx: t.Context(), runtimeScheme: scheme, kubeClientObj: []runtime.Object{ jobSucceeded2, @@ -802,7 +802,7 @@ func TestWaitAllJobsComplete(t *testing.T) { }, { name: "more than limit", - ctx: context.TODO(), + ctx: t.Context(), runtimeScheme: scheme, kubeClientObj: []runtime.Object{ jobSucceeded3, @@ -1076,7 +1076,7 @@ func TestBuildJob(t *testing.T) { cli := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build() // Call the function to test - job, err := buildJob(cli, context.TODO(), param.BackupRepo, param.BackupLocation.Name, tc.m, *tc.m.PodResources, tc.logLevel, tc.logFormat) + job, err := buildJob(cli, t.Context(), param.BackupRepo, param.BackupLocation.Name, tc.m, *tc.m.PodResources, tc.logLevel, tc.logFormat) // Check the error if tc.expectedError { diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go index 5f3bdbe4b..9cd49742c 100644 --- a/pkg/repository/provider/unified_repo_test.go +++ b/pkg/repository/provider/unified_repo_test.go @@ -775,7 +775,7 @@ func TestPrepareRepo(t *testing.T) { bsl.Spec.AccessMode = velerov1api.BackupStorageLocationAccessModeReadWrite } - err := urp.PrepareRepo(context.Background(), RepoParam{ + err := urp.PrepareRepo(t.Context(), RepoParam{ BackupLocation: &bsl, BackupRepo: &velerov1api.BackupRepository{}, }) @@ -924,7 +924,7 @@ func TestForget(t *testing.T) { backupRepo.On("Close", mock.Anything).Return(nil) } - err := urp.Forget(context.Background(), "", RepoParam{ + err := urp.Forget(t.Context(), "", RepoParam{ BackupLocation: &velerov1api.BackupStorageLocation{}, BackupRepo: &velerov1api.BackupRepository{}, }) @@ -1075,7 +1075,7 @@ func TestBatchForget(t *testing.T) { backupRepo.On("Close", mock.Anything).Return(nil) } - errs := urp.BatchForget(context.Background(), tc.snapshots, RepoParam{ + errs := urp.BatchForget(t.Context(), tc.snapshots, RepoParam{ BackupLocation: &velerov1api.BackupStorageLocation{}, BackupRepo: &velerov1api.BackupRepository{}, }) @@ -1186,7 +1186,7 @@ func TestInitRepo(t *testing.T) { bsl.Spec.AccessMode = velerov1api.BackupStorageLocationAccessModeReadWrite } - err := urp.InitRepo(context.Background(), RepoParam{ + err := urp.InitRepo(t.Context(), RepoParam{ BackupLocation: &bsl, BackupRepo: &velerov1api.BackupRepository{}, }) @@ -1274,7 +1274,7 @@ func TestConnectToRepo(t *testing.T) { tc.repoService.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(tc.retFuncInit) } - err := urp.ConnectToRepo(context.Background(), RepoParam{ + err := urp.ConnectToRepo(t.Context(), RepoParam{ BackupLocation: &velerov1api.BackupStorageLocation{}, BackupRepo: &velerov1api.BackupRepository{}, }) @@ -1418,7 +1418,7 @@ func TestBoostRepoConnect(t *testing.T) { backupRepo.On("Close", mock.Anything).Return(nil) } - err := urp.BoostRepoConnect(context.Background(), RepoParam{ + err := urp.BoostRepoConnect(t.Context(), RepoParam{ BackupLocation: &velerov1api.BackupStorageLocation{}, BackupRepo: &velerov1api.BackupRepository{}, }) @@ -1506,7 +1506,7 @@ func TestPruneRepo(t *testing.T) { tc.repoService.On("Maintain", mock.Anything, mock.Anything).Return(tc.retFuncMaintain) } - err := urp.PruneRepo(context.Background(), RepoParam{ + err := urp.PruneRepo(t.Context(), RepoParam{ BackupLocation: &velerov1api.BackupStorageLocation{}, BackupRepo: &velerov1api.BackupRepository{}, }) diff --git a/pkg/repository/udmrepo/kopialib/backend/azure_test.go b/pkg/repository/udmrepo/kopialib/backend/azure_test.go index 910351bf5..b4504986e 100644 --- a/pkg/repository/udmrepo/kopialib/backend/azure_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/azure_test.go @@ -17,7 +17,6 @@ limitations under the License. package backend import ( - "context" "testing" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -43,7 +42,7 @@ func TestAzureSetup(t *testing.T) { UploadBytesPerSecond: 200, } - err := backend.Setup(context.Background(), flags, logger) + err := backend.Setup(t.Context(), flags, logger) require.NoError(t, err) assert.Equal(t, flags, backend.option.Config) assert.Equal(t, limits, backend.option.Limits) diff --git a/pkg/repository/udmrepo/kopialib/backend/common_test.go b/pkg/repository/udmrepo/kopialib/backend/common_test.go index c5c070716..1083bca9a 100644 --- a/pkg/repository/udmrepo/kopialib/backend/common_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/common_test.go @@ -17,7 +17,6 @@ limitations under the License. package backend import ( - "context" "testing" "time" @@ -103,7 +102,7 @@ func TestSetupNewRepositoryOptions(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - ret := SetupNewRepositoryOptions(context.Background(), tc.flags) + ret := SetupNewRepositoryOptions(t.Context(), tc.flags) assert.Equal(t, tc.expected, ret) }) } @@ -193,7 +192,7 @@ func TestSetupConnectOptions(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - ret := SetupConnectOptions(context.Background(), tc.repoOptions) + ret := SetupConnectOptions(t.Context(), tc.repoOptions) assert.Equal(t, tc.expected, ret) }) } diff --git a/pkg/repository/udmrepo/kopialib/backend/file_system_test.go b/pkg/repository/udmrepo/kopialib/backend/file_system_test.go index 145f54b3f..33e608efd 100644 --- a/pkg/repository/udmrepo/kopialib/backend/file_system_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/file_system_test.go @@ -17,7 +17,6 @@ limitations under the License. package backend import ( - "context" "testing" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -71,7 +70,7 @@ func TestFSSetup(t *testing.T) { t.Run(tc.name, func(t *testing.T) { fsFlags := FsBackend{} - err := fsFlags.Setup(context.Background(), tc.flags, logger) + err := fsFlags.Setup(t.Context(), tc.flags, logger) if tc.expectedErr == "" { require.NoError(t, err) diff --git a/pkg/repository/udmrepo/kopialib/backend/gcs_test.go b/pkg/repository/udmrepo/kopialib/backend/gcs_test.go index 4a211542c..3398c35c2 100644 --- a/pkg/repository/udmrepo/kopialib/backend/gcs_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/gcs_test.go @@ -17,7 +17,6 @@ limitations under the License. package backend import ( - "context" "testing" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -93,7 +92,7 @@ func TestGcsSetup(t *testing.T) { t.Run(tc.name, func(t *testing.T) { gcsFlags := GCSBackend{} - err := gcsFlags.Setup(context.Background(), tc.flags, logger) + err := gcsFlags.Setup(t.Context(), tc.flags, logger) if tc.expectedErr == "" { require.NoError(t, err) diff --git a/pkg/repository/udmrepo/kopialib/backend/s3_test.go b/pkg/repository/udmrepo/kopialib/backend/s3_test.go index 7b11a56ba..3e1b045c4 100644 --- a/pkg/repository/udmrepo/kopialib/backend/s3_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/s3_test.go @@ -17,7 +17,6 @@ limitations under the License. package backend import ( - "context" "testing" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -122,7 +121,7 @@ func TestS3Setup(t *testing.T) { t.Run(tc.name, func(t *testing.T) { s3Flags := S3Backend{} - err := s3Flags.Setup(context.Background(), tc.flags, logger) + err := s3Flags.Setup(t.Context(), tc.flags, logger) if tc.expectedErr == "" { assert.NoError(t, err) diff --git a/pkg/repository/udmrepo/kopialib/backend/utils_test.go b/pkg/repository/udmrepo/kopialib/backend/utils_test.go index 6f9049f41..24923e447 100644 --- a/pkg/repository/udmrepo/kopialib/backend/utils_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/utils_test.go @@ -17,7 +17,6 @@ limitations under the License. package backend import ( - "context" "testing" "github.com/kopia/kopia/repo/logging" @@ -79,7 +78,7 @@ func TestOptionalHaveBool(t *testing.T) { tc.logger.On("Check", mock.Anything, mock.Anything).Run(tc.retFuncCheck).Return(&zapcore.CheckedEntry{}) } - ctx := logging.WithLogger(context.Background(), func(module string) logging.Logger { + ctx := logging.WithLogger(t.Context(), func(module string) logging.Logger { return zap.New(tc.logger).Sugar() }) @@ -144,7 +143,7 @@ func TestOptionalHaveIntWithDefault(t *testing.T) { tc.logger.On("Check", mock.Anything, mock.Anything).Run(tc.retFuncCheck).Return(&zapcore.CheckedEntry{}) } - ctx := logging.WithLogger(context.Background(), func(module string) logging.Logger { + ctx := logging.WithLogger(t.Context(), func(module string) logging.Logger { return zap.New(tc.logger).Sugar() }) diff --git a/pkg/repository/udmrepo/kopialib/lib_repo_test.go b/pkg/repository/udmrepo/kopialib/lib_repo_test.go index 431855856..ba7aaa93c 100644 --- a/pkg/repository/udmrepo/kopialib/lib_repo_test.go +++ b/pkg/repository/udmrepo/kopialib/lib_repo_test.go @@ -132,7 +132,7 @@ func TestOpen(t *testing.T) { tc.returnRepo.On("Close", mock.Anything).Return(nil) } - repo, err := service.Open(context.Background(), tc.repoOptions) + repo, err := service.Open(t.Context(), tc.repoOptions) if repo != nil { require.Equal(t, tc.expected.description, repo.(*kopiaRepository).description) @@ -226,7 +226,7 @@ func TestMaintain(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { logger := velerotest.NewLogger() - ctx := context.Background() + ctx := t.Context() service := kopiaRepoService{ logger: logger, @@ -392,7 +392,7 @@ func TestWriteInitParameters(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { logger := velerotest.NewLogger() - ctx := context.Background() + ctx := t.Context() if tc.repoOpen != nil { kopiaRepoOpen = tc.repoOpen @@ -414,7 +414,7 @@ func TestWriteInitParameters(t *testing.T) { tc.returnRepoWriter.On("ReplaceManifests", mock.Anything, mock.Anything, mock.Anything).Return(manifest.ID(""), tc.replaceManifestError) } if tc.expectedReplaceManifestsParams != nil { - tc.returnRepoWriter.On("ReplaceManifests", mock.AnythingOfType("context.backgroundCtx"), mock.AnythingOfType("map[string]string"), mock.AnythingOfType("*maintenance.Params")).Return(manifest.ID(""), nil) + tc.returnRepoWriter.On("ReplaceManifests", mock.Anything, mock.AnythingOfType("map[string]string"), mock.AnythingOfType("*maintenance.Params")).Return(manifest.ID(""), nil) tc.returnRepoWriter.On("Flush", mock.Anything).Return(nil) } } @@ -521,7 +521,7 @@ func TestOpenObject(t *testing.T) { kr.rawRepo = tc.rawRepo } - _, err := kr.OpenObject(context.Background(), udmrepo.ID(tc.objectID)) + _, err := kr.OpenObject(t.Context(), udmrepo.ID(tc.objectID)) if tc.expectedErr == "" { assert.NoError(t, err) @@ -563,7 +563,7 @@ func TestGetManifest(t *testing.T) { kr.rawRepo = tc.rawRepo } - err := kr.GetManifest(context.Background(), udmrepo.ID(""), &udmrepo.RepoManifest{}) + err := kr.GetManifest(t.Context(), udmrepo.ID(""), &udmrepo.RepoManifest{}) if tc.expectedErr == "" { assert.NoError(t, err) @@ -602,7 +602,7 @@ func TestFindManifests(t *testing.T) { kr.rawRepo = tc.rawRepo } - _, err := kr.FindManifests(context.Background(), udmrepo.ManifestFilter{}) + _, err := kr.FindManifests(t.Context(), udmrepo.ManifestFilter{}) if tc.expectedErr == "" { assert.NoError(t, err) @@ -661,7 +661,7 @@ func TestClose(t *testing.T) { kr.rawWriter = tc.rawWriter } - err := kr.Close(context.Background()) + err := kr.Close(t.Context()) if tc.expectedErr == "" { assert.NoError(t, err) @@ -700,7 +700,7 @@ func TestPutManifest(t *testing.T) { kr.rawWriter = tc.rawWriter } - _, err := kr.PutManifest(context.Background(), udmrepo.RepoManifest{ + _, err := kr.PutManifest(t.Context(), udmrepo.RepoManifest{ Metadata: &udmrepo.ManifestEntryMetadata{}, }) @@ -741,7 +741,7 @@ func TestDeleteManifest(t *testing.T) { kr.rawWriter = tc.rawWriter } - err := kr.DeleteManifest(context.Background(), udmrepo.ID("")) + err := kr.DeleteManifest(t.Context(), udmrepo.ID("")) if tc.expectedErr == "" { assert.NoError(t, err) @@ -780,7 +780,7 @@ func TestFlush(t *testing.T) { kr.rawWriter = tc.rawWriter } - err := kr.Flush(context.Background()) + err := kr.Flush(t.Context()) if tc.expectedErr == "" { assert.NoError(t, err) @@ -852,7 +852,7 @@ func TestConcatenateObjects(t *testing.T) { kr.rawWriter = tc.rawWriter } - _, err := kr.ConcatenateObjects(context.Background(), tc.objectIDs) + _, err := kr.ConcatenateObjects(t.Context(), tc.objectIDs) if tc.expectedErr == "" { assert.NoError(t, err) @@ -895,7 +895,7 @@ func TestNewObjectWriter(t *testing.T) { kr.rawWriter = tc.rawWriter } - ret := kr.NewObjectWriter(context.Background(), udmrepo.ObjectWriteOptions{}) + ret := kr.NewObjectWriter(t.Context(), udmrepo.ObjectWriteOptions{}) assert.Equal(t, tc.expectedRet, ret) }) diff --git a/pkg/repository/udmrepo/kopialib/repo_init_test.go b/pkg/repository/udmrepo/kopialib/repo_init_test.go index a21546607..481ca4a98 100644 --- a/pkg/repository/udmrepo/kopialib/repo_init_test.go +++ b/pkg/repository/udmrepo/kopialib/repo_init_test.go @@ -17,7 +17,6 @@ limitations under the License. package kopialib import ( - "context" "testing" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -144,7 +143,7 @@ func TestCreateBackupRepo(t *testing.T) { tc.returnStore.On("GetBlob", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.getBlobErr) } - err := CreateBackupRepo(context.Background(), tc.repoOptions, logger) + err := CreateBackupRepo(t.Context(), tc.repoOptions, logger) if tc.expectedErr == "" { assert.NoError(t, err) @@ -229,7 +228,7 @@ func TestConnectBackupRepo(t *testing.T) { tc.returnStore.On("GetBlob", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.getBlobErr) } - err := ConnectBackupRepo(context.Background(), tc.repoOptions, logger) + err := ConnectBackupRepo(t.Context(), tc.repoOptions, logger) if tc.expectedErr == "" { assert.NoError(t, err) diff --git a/pkg/restore/actions/change_image_name_action_test.go b/pkg/restore/actions/change_image_name_action_test.go index 888d0fef3..530c8453d 100644 --- a/pkg/restore/actions/change_image_name_action_test.go +++ b/pkg/restore/actions/change_image_name_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package actions import ( - "context" "testing" "github.com/sirupsen/logrus" @@ -148,7 +147,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) { // set up test data if tc.configMap != nil { - _, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(context.TODO(), tc.configMap, metav1.CreateOptions{}) + _, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(t.Context(), tc.configMap, metav1.CreateOptions{}) require.NoError(t, err) } diff --git a/pkg/restore/actions/change_storageclass_action_test.go b/pkg/restore/actions/change_storageclass_action_test.go index 2ce3ccc51..13bbcdcc4 100644 --- a/pkg/restore/actions/change_storageclass_action_test.go +++ b/pkg/restore/actions/change_storageclass_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package actions import ( - "context" "testing" "github.com/pkg/errors" @@ -226,18 +225,18 @@ func TestChangeStorageClassActionExecute(t *testing.T) { // set up test data if tc.configMap != nil { - _, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(context.TODO(), tc.configMap, metav1.CreateOptions{}) + _, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(t.Context(), tc.configMap, metav1.CreateOptions{}) require.NoError(t, err) } if tc.storageClass != nil { - _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), tc.storageClass, metav1.CreateOptions{}) + _, err := clientset.StorageV1().StorageClasses().Create(t.Context(), tc.storageClass, metav1.CreateOptions{}) require.NoError(t, err) } if tc.storageClassSlice != nil { for _, storageClass := range tc.storageClassSlice { - _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), storageClass, metav1.CreateOptions{}) + _, err := clientset.StorageV1().StorageClasses().Create(t.Context(), storageClass, metav1.CreateOptions{}) require.NoError(t, err) } } diff --git a/pkg/restore/actions/csi/pvc_action_test.go b/pkg/restore/actions/csi/pvc_action_test.go index 901f4c18c..ad9271562 100644 --- a/pkg/restore/actions/csi/pvc_action_test.go +++ b/pkg/restore/actions/csi/pvc_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package csi import ( - "context" "fmt" "testing" "time" @@ -348,7 +347,7 @@ func TestProgress(t *testing.T) { crClient: velerotest.NewFakeControllerRuntimeClient(t), } if tc.dataDownload != nil { - err := pvcRIA.crClient.Create(context.Background(), tc.dataDownload) + err := pvcRIA.crClient.Create(t.Context(), tc.dataDownload) require.NoError(t, err) } @@ -440,7 +439,7 @@ func TestCancel(t *testing.T) { crClient: velerotest.NewFakeControllerRuntimeClient(t), } if tc.dataDownload != nil { - err := pvcRIA.crClient.Create(context.Background(), tc.dataDownload) + err := pvcRIA.crClient.Create(t.Context(), tc.dataDownload) require.NoError(t, err) } @@ -452,7 +451,7 @@ func TestCancel(t *testing.T) { require.NoError(t, err) resultDataDownload := new(velerov2alpha1.DataDownload) - err = pvcRIA.crClient.Get(context.Background(), crclient.ObjectKey{Namespace: tc.dataDownload.Namespace, Name: tc.dataDownload.Name}, resultDataDownload) + err = pvcRIA.crClient.Get(t.Context(), crclient.ObjectKey{Namespace: tc.dataDownload.Namespace, Name: tc.dataDownload.Name}, resultDataDownload) require.NoError(t, err) require.True(t, cmp.Equal(tc.expectedDataDownload, *resultDataDownload, cmpopts.IgnoreFields(velerov2alpha1.DataDownload{}, "ResourceVersion", "Name"))) @@ -622,7 +621,7 @@ func TestExecute(t *testing.T) { } if tc.expectedDataDownload != nil { dataDownloadList := new(velerov2alpha1.DataDownloadList) - err := pvcRIA.crClient.List(context.Background(), dataDownloadList, &crclient.ListOptions{ + err := pvcRIA.crClient.List(t.Context(), dataDownloadList, &crclient.ListOptions{ LabelSelector: labels.SelectorFromSet(tc.expectedDataDownload.Labels), }) require.NoError(t, err) diff --git a/pkg/restore/actions/csi/volumesnapshot_action_test.go b/pkg/restore/actions/csi/volumesnapshot_action_test.go index 27001858a..bec4f9582 100644 --- a/pkg/restore/actions/csi/volumesnapshot_action_test.go +++ b/pkg/restore/actions/csi/volumesnapshot_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package csi import ( - "context" "fmt" "testing" @@ -155,7 +154,7 @@ func TestVSExecute(t *testing.T) { if newNS, ok := test.restore.Spec.NamespaceMapping[test.vs.Namespace]; ok { test.vs.SetNamespace(newNS) } - require.NoError(t, p.crClient.Create(context.TODO(), test.vs)) + require.NoError(t, p.crClient.Create(t.Context(), test.vs)) } } diff --git a/pkg/restore/actions/csi/volumesnapshotcontent_action_test.go b/pkg/restore/actions/csi/volumesnapshotcontent_action_test.go index 9ab2aa988..85fd1a092 100644 --- a/pkg/restore/actions/csi/volumesnapshotcontent_action_test.go +++ b/pkg/restore/actions/csi/volumesnapshotcontent_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package csi import ( - "context" "fmt" "testing" @@ -121,7 +120,7 @@ func TestVSCExecute(t *testing.T) { test.item = &unstructured.Unstructured{Object: vsMap} if test.createVSC { - require.NoError(t, action.client.Create(context.TODO(), test.vsc)) + require.NoError(t, action.client.Create(t.Context(), test.vsc)) } } diff --git a/pkg/restore/actions/dataupload_retrieve_action_test.go b/pkg/restore/actions/dataupload_retrieve_action_test.go index b110a4ad4..ef5e6dad6 100644 --- a/pkg/restore/actions/dataupload_retrieve_action_test.go +++ b/pkg/restore/actions/dataupload_retrieve_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package actions import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -111,7 +110,7 @@ func TestDataUploadRetrieveActionExectue(t *testing.T) { if tc.expectedDataUploadResult != nil { var cmList corev1api.ConfigMapList - err := fakeClient.List(context.Background(), &cmList, &client.ListOptions{ + err := fakeClient.List(t.Context(), &cmList, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{ velerov1.RestoreUIDLabel: "testingUID", velerov1.PVCNamespaceNameLabel: label.GetValidName(tc.dataUpload.Spec.SourceNamespace + "." + tc.dataUpload.Spec.SourcePVC), diff --git a/pkg/restore/actions/pod_volume_restore_action_test.go b/pkg/restore/actions/pod_volume_restore_action_test.go index 9a4b3e028..43df2e35a 100644 --- a/pkg/restore/actions/pod_volume_restore_action_test.go +++ b/pkg/restore/actions/pod_volume_restore_action_test.go @@ -33,8 +33,6 @@ import ( "k8s.io/client-go/kubernetes/fake" crfake "sigs.k8s.io/controller-runtime/pkg/client/fake" - "context" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/buildinfo" @@ -690,7 +688,7 @@ func TestPodVolumeRestoreActionExecuteWithFileSystemShouldAddWaitInitContainer(t // Create the PodVolumeBackups in the fake client for _, pvb := range tc.podVolumeBackups { - require.NoError(t, crClient.Create(context.Background(), pvb)) + require.NoError(t, crClient.Create(t.Context(), pvb)) } // Create a fake clientset diff --git a/pkg/restore/actions/pvc_action_test.go b/pkg/restore/actions/pvc_action_test.go index 429196575..f00ec9264 100644 --- a/pkg/restore/actions/pvc_action_test.go +++ b/pkg/restore/actions/pvc_action_test.go @@ -18,7 +18,6 @@ package actions import ( "bytes" - "context" "fmt" "testing" @@ -130,16 +129,16 @@ func TestPVCActionExecute(t *testing.T) { // set up test data if tc.configMap != nil { - _, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(context.TODO(), tc.configMap, metav1.CreateOptions{}) + _, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(t.Context(), tc.configMap, metav1.CreateOptions{}) require.NoError(t, err) } if tc.node != nil { - _, err := clientset.CoreV1().Nodes().Create(context.TODO(), tc.node, metav1.CreateOptions{}) + _, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.node, metav1.CreateOptions{}) require.NoError(t, err) } if tc.newNode != nil { - _, err := clientset.CoreV1().Nodes().Create(context.TODO(), tc.newNode, metav1.CreateOptions{}) + _, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.newNode, metav1.CreateOptions{}) require.NoError(t, err) } unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc) diff --git a/pkg/restore/pv_restorer_test.go b/pkg/restore/pv_restorer_test.go index 8dae50fd1..09c6dd0ad 100644 --- a/pkg/restore/pv_restorer_test.go +++ b/pkg/restore/pv_restorer_test.go @@ -17,7 +17,6 @@ limitations under the License. package restore import ( - "context" "testing" "github.com/sirupsen/logrus" @@ -128,7 +127,7 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { } for _, loc := range tc.locations { - require.NoError(t, r.kbclient.Create(context.TODO(), loc)) + require.NoError(t, r.kbclient.Create(t.Context(), loc)) } res, err := r.executePVAction(tc.obj) @@ -192,7 +191,7 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { ) for _, loc := range tc.locations { - require.NoError(t, fakeClient.Create(context.Background(), loc)) + require.NoError(t, fakeClient.Create(t.Context(), loc)) } r := &pvRestorer{ diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 93e45f216..c8a9ab40a 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -1659,7 +1659,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if shouldRestoreStatus && statusFieldErr != nil { err := fmt.Errorf("could not get status to be restored %s: %v", kube.NamespaceAndName(obj), statusFieldErr) - restoreLogger.Errorf(err.Error()) + restoreLogger.Error(err.Error()) errs.Add(namespace, err) return warnings, errs, itemExists } diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 360fb8409..54523731f 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -2308,7 +2308,7 @@ func TestShouldRestore(t *testing.T) { } for _, ns := range tc.namespaces { - _, err := ctx.namespaceClient.Create(context.TODO(), ns, metav1.CreateOptions{}) + _, err := ctx.namespaceClient.Create(t.Context(), ns, metav1.CreateOptions{}) require.NoError(t, err) } @@ -2345,7 +2345,7 @@ func assertRestoredItems(t *testing.T, h *harness, want []*test.APIResource) { client = resourceClient } - res, err := client.Get(context.TODO(), item.GetName(), metav1.GetOptions{}) + res, err := client.Get(t.Context(), item.GetName(), metav1.GetOptions{}) if !assert.NoError(t, err) { //nolint:testifylint // require is inappropriate continue } @@ -3206,11 +3206,11 @@ func TestRestorePersistentVolumes(t *testing.T) { // set up the VolumeSnapshotLocation client and add test data to it for _, vsl := range tc.volumeSnapshotLocations { - require.NoError(t, h.restorer.kbClient.Create(context.Background(), vsl)) + require.NoError(t, h.restorer.kbClient.Create(t.Context(), vsl)) } if tc.dataUploadResult != nil { - require.NoError(t, h.restorer.kbClient.Create(context.TODO(), tc.dataUploadResult)) + require.NoError(t, h.restorer.kbClient.Create(t.Context(), tc.dataUploadResult)) } for _, r := range tc.apiResources { @@ -3652,7 +3652,7 @@ func assertAPIContents(t *testing.T, h *harness, want map[*test.APIResource][]st t.Helper() for r, want := range want { - res, err := h.DynamicClient.Resource(r.GVR()).List(context.TODO(), metav1.ListOptions{}) + res, err := h.DynamicClient.Resource(r.GVR()).List(t.Context(), metav1.ListOptions{}) require.NoError(t, err) if err != nil { continue @@ -3743,9 +3743,9 @@ func (h *harness) AddItems(t *testing.T, resource *test.APIResource) { unstructured.RemoveNestedField(unstructuredObj.Object, "status") if resource.Namespaced { - _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{}) + _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(t.Context(), unstructuredObj, metav1.CreateOptions{}) } else { - _, err = h.DynamicClient.Resource(resource.GVR()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{}) + _, err = h.DynamicClient.Resource(resource.GVR()).Create(t.Context(), unstructuredObj, metav1.CreateOptions{}) } require.NoError(t, err) } @@ -4134,7 +4134,7 @@ func TestHasSnapshotDataUpload(t *testing.T) { } if tc.duResult != nil { - require.NoError(t, ctx.kbClient.Create(context.TODO(), tc.duResult)) + require.NoError(t, ctx.kbClient.Create(t.Context(), tc.duResult)) } t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/uploader/kopia/shim_test.go b/pkg/uploader/kopia/shim_test.go index 535ab4cfb..69d1605a6 100644 --- a/pkg/uploader/kopia/shim_test.go +++ b/pkg/uploader/kopia/shim_test.go @@ -35,7 +35,7 @@ import ( ) func TestShimRepo(t *testing.T) { - ctx := context.Background() + ctx := t.Context() backupRepo := &mocks.BackupRepo{} backupRepo.On("Time").Return(time.Time{}) shim := NewShimRepo(backupRepo) @@ -107,7 +107,7 @@ func TestOpenObject(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() reader, err := NewShimRepo(tc.backupRepo).OpenObject(ctx, object.ID{}) if tc.isOpenObjectError { require.ErrorContains(t, err, "failed to open object") @@ -149,7 +149,7 @@ func TestFindManifests(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() _, err := NewShimRepo(tc.backupRepo).FindManifests(ctx, map[string]string{}) if tc.isGetManifestError { require.ErrorContains(t, err, "failed") @@ -274,7 +274,7 @@ func TestReplaceManifests(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() id, err := NewShimRepo(tc.backupRepo).ReplaceManifests(ctx, map[string]string{}, nil) if tc.expectedError != "" { @@ -337,7 +337,7 @@ func TestConcatenateObjects(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() _, err := NewShimRepo(tc.backupRepo).ConcatenateObjects(ctx, tc.objectIDs, repo.ConcatenateOptions{}) if tc.expectedError != "" { diff --git a/pkg/uploader/kopia/snapshot_test.go b/pkg/uploader/kopia/snapshot_test.go index 959196004..2423ba590 100644 --- a/pkg/uploader/kopia/snapshot_test.go +++ b/pkg/uploader/kopia/snapshot_test.go @@ -80,7 +80,7 @@ func MockFuncs(s *snapshotMockes, args []mockArgs) { } func TestSnapshotSource(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() sourceInfo := snapshot.SourceInfo{ UserName: "testUserName", Host: "testHost", @@ -557,7 +557,7 @@ func TestFindPreviousSnapshotManifest(t *testing.T) { t.Run(tc.name, func(t *testing.T) { var repo repo.Repository listSnapshotsFunc = tc.listSnapshotsFunc - snapshots, err := findPreviousSnapshotManifest(context.Background(), repo, sourceInfo, snapshotTags, &noLaterThan, logrus.New()) + snapshots, err := findPreviousSnapshotManifest(t.Context(), repo, sourceInfo, snapshotTags, &noLaterThan, logrus.New()) // Check if the returned error matches the expected error if tc.expectedError != nil { @@ -648,9 +648,9 @@ func TestBackup(t *testing.T) { var snapshotInfo *uploader.SnapshotInfo var err error if tc.isEmptyUploader { - snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), nil, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, map[string]string{}, tc.tags, &logrus.Logger{}) + snapshotInfo, isSnapshotEmpty, err = Backup(t.Context(), nil, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, map[string]string{}, tc.tags, &logrus.Logger{}) } else { - snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), s.uploderMock, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, map[string]string{}, tc.tags, &logrus.Logger{}) + snapshotInfo, isSnapshotEmpty, err = Backup(t.Context(), s.uploderMock, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, map[string]string{}, tc.tags, &logrus.Logger{}) } // Check if the returned error matches the expected error if tc.expectedError != nil { @@ -789,7 +789,7 @@ func TestRestore(t *testing.T) { repoWriterMock.On("OpenObject", mock.Anything, mock.Anything).Return(em, nil) progress := new(Progress) - bytesRestored, fileCount, err := Restore(context.Background(), repoWriterMock, progress, tc.snapshotID, tc.dest, tc.volMode, map[string]string{}, logrus.New(), nil) + bytesRestored, fileCount, err := Restore(t.Context(), repoWriterMock, progress, tc.snapshotID, tc.dest, tc.volMode, map[string]string{}, logrus.New(), nil) // Check if the returned error matches the expected error if tc.expectedError != nil { diff --git a/pkg/uploader/provider/kopia_test.go b/pkg/uploader/provider/kopia_test.go index b677b98c0..97f9eb98a 100644 --- a/pkg/uploader/provider/kopia_test.go +++ b/pkg/uploader/provider/kopia_test.go @@ -69,7 +69,7 @@ func TestRunBackup(t *testing.T) { var kp kopiaProvider kp.log = logrus.New() kp.bkRepo = mockBRepo - updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: t.Context(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} testCases := []struct { name string @@ -106,7 +106,7 @@ func TestRunBackup(t *testing.T) { tc.volMode = uploader.PersistentVolumeFilesystem } BackupFunc = tc.hookBackupFunc - _, _, _, err := kp.RunBackup(context.Background(), "var", "", nil, false, "", tc.volMode, map[string]string{}, &updater) + _, _, _, err := kp.RunBackup(t.Context(), "var", "", nil, false, "", tc.volMode, map[string]string{}, &updater) if tc.notError { assert.NoError(t, err) } else { @@ -119,7 +119,7 @@ func TestRunBackup(t *testing.T) { func TestRunRestore(t *testing.T) { var kp kopiaProvider kp.log = logrus.New() - updater := FakeRestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} + updater := FakeRestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: t.Context(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} testCases := []struct { name string @@ -134,13 +134,6 @@ func TestRunRestore(t *testing.T) { }, notError: true, }, - { - name: "failed to restore", - hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { - return 0, 0, errors.New("failed to restore") - }, - notError: false, - }, { name: "normal block mode restore", hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { @@ -149,6 +142,13 @@ func TestRunRestore(t *testing.T) { volMode: uploader.PersistentVolumeBlock, notError: true, }, + { + name: "failed to restore", + hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + return 0, 0, errors.New("failed to restore") + }, + notError: false, + }, } for _, tc := range testCases { @@ -157,7 +157,7 @@ func TestRunRestore(t *testing.T) { tc.volMode = uploader.PersistentVolumeFilesystem } RestoreFunc = tc.hookRestoreFunc - _, err := kp.RunRestore(context.Background(), "", "/var", tc.volMode, map[string]string{}, &updater) + _, err := kp.RunRestore(t.Context(), "", "/var", tc.volMode, map[string]string{}, &updater) if tc.notError { assert.NoError(t, err) } else { @@ -209,7 +209,7 @@ func TestCheckContext(t *testing.T) { }() } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() go func() { @@ -311,7 +311,7 @@ func (m *MockRepoSvc) Open(ctx context.Context, opts udmrepo.RepoOptions) (udmre func TestNewKopiaUploaderProvider(t *testing.T) { requestorType := "testRequestor" - ctx := context.Background() + ctx := t.Context() backupRepo := repository.NewBackupRepository(velerov1api.DefaultNamespace, repository.BackupRepositoryKey{VolumeNamespace: "fake-volume-ns-02", BackupLocation: "fake-bsl-02", RepositoryType: "fake-repository-type-02"}) mockLog := logrus.New() @@ -332,7 +332,7 @@ func TestNewKopiaUploaderProvider(t *testing.T) { mockBackupRepoService: func() udmrepo.BackupRepoService { backupRepoService := &udmrepomocks.BackupRepoService{} var backupRepo udmrepo.BackupRepo - backupRepoService.On("Open", context.Background(), mock.Anything).Return(backupRepo, nil) + backupRepoService.On("Open", t.Context(), mock.Anything).Return(backupRepo, nil) return backupRepoService }(), expectedError: "", @@ -347,7 +347,7 @@ func TestNewKopiaUploaderProvider(t *testing.T) { mockBackupRepoService: func() udmrepo.BackupRepoService { backupRepoService := &udmrepomocks.BackupRepoService{} var backupRepo udmrepo.BackupRepo - backupRepoService.On("Open", context.Background(), mock.Anything).Return(backupRepo, nil) + backupRepoService.On("Open", t.Context(), mock.Anything).Return(backupRepo, nil) return backupRepoService }(), expectedError: "error to get repo options", @@ -362,7 +362,7 @@ func TestNewKopiaUploaderProvider(t *testing.T) { mockBackupRepoService: func() udmrepo.BackupRepoService { backupRepoService := &udmrepomocks.BackupRepoService{} var backupRepo udmrepo.BackupRepo - backupRepoService.On("Open", context.Background(), mock.Anything).Return(backupRepo, errors.New("failed to init repository")) + backupRepoService.On("Open", t.Context(), mock.Anything).Return(backupRepo, errors.New("failed to init repository")) return backupRepoService }(), expectedError: "Failed to find kopia repository", diff --git a/pkg/uploader/provider/provider_test.go b/pkg/uploader/provider/provider_test.go index ffe011404..199091e32 100644 --- a/pkg/uploader/provider/provider_test.go +++ b/pkg/uploader/provider/provider_test.go @@ -17,7 +17,6 @@ limitations under the License. package provider import ( - "context" "testing" "github.com/sirupsen/logrus" @@ -42,7 +41,7 @@ type NewUploaderProviderTestCase struct { func TestNewUploaderProvider(t *testing.T) { // Mock objects or dependencies - ctx := context.Background() + ctx := t.Context() client := fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build() repoIdentifier := "repoIdentifier" bsl := &velerov1api.BackupStorageLocation{} diff --git a/pkg/uploader/provider/restic_test.go b/pkg/uploader/provider/restic_test.go index ee3e27014..3f450f554 100644 --- a/pkg/uploader/provider/restic_test.go +++ b/pkg/uploader/provider/restic_test.go @@ -17,7 +17,6 @@ limitations under the License. package provider import ( - "context" "errors" "os" "reflect" @@ -149,10 +148,10 @@ func TestResticRunBackup(t *testing.T) { tc.volMode = uploader.PersistentVolumeFilesystem } if !tc.nilUpdater { - updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} - _, _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, &updater) + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: t.Context(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} + _, _, _, err = tc.rp.RunBackup(t.Context(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, &updater) } else { - _, _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, nil) + _, _, _, err = tc.rp.RunBackup(t.Context(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, nil) } tc.rp.log.Infof("test name %v error %v", tc.name, err) @@ -222,10 +221,10 @@ func TestResticRunRestore(t *testing.T) { } var err error if !tc.nilUpdater { - updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} - _, err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, map[string]string{}, &updater) + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: t.Context(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} + _, err = tc.rp.RunRestore(t.Context(), "", "var", tc.volMode, map[string]string{}, &updater) } else { - _, err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, map[string]string{}, nil) + _, err = tc.rp.RunRestore(t.Context(), "", "var", tc.volMode, map[string]string{}, nil) } tc.rp.log.Infof("test name %v error %v", tc.name, err) @@ -253,7 +252,7 @@ func TestClose(t *testing.T) { caCertFile: caCertFile.Name(), } // Test deleting an existing credentials file - err = rp.Close(context.Background()) + err = rp.Close(t.Context()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -275,7 +274,7 @@ func TestClose(t *testing.T) { credentialsFile: "", caCertFile: "", } - err = rp.Close(context.Background()) + err = rp.Close(t.Context()) // Test deleting an existing caCert file if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/util/csi/volume_snapshot_test.go b/pkg/util/csi/volume_snapshot_test.go index 999ebf3f0..d73e0f910 100644 --- a/pkg/util/csi/volume_snapshot_test.go +++ b/pkg/util/csi/volume_snapshot_test.go @@ -17,7 +17,6 @@ limitations under the License. package csi import ( - "context" "errors" "testing" "time" @@ -198,7 +197,7 @@ func TestWaitVolumeSnapshotReady(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeClient := snapshotFake.NewSimpleClientset(test.clientObj...) - vs, err := WaitVolumeSnapshotReady(context.Background(), fakeClient.SnapshotV1(), test.vsName, test.namespace, time.Millisecond, velerotest.NewLogger()) + vs, err := WaitVolumeSnapshotReady(t.Context(), fakeClient.SnapshotV1(), test.vsName, test.namespace, time.Millisecond, velerotest.NewLogger()) if err != nil { require.EqualError(t, err, test.err) } else { @@ -390,7 +389,7 @@ func TestEnsureDeleteVS(t *testing.T) { fakeSnapshotClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - err := EnsureDeleteVS(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vsName, test.namespace, time.Millisecond) + err := EnsureDeleteVS(t.Context(), fakeSnapshotClient.SnapshotV1(), test.vsName, test.namespace, time.Millisecond) if err != nil { assert.EqualError(t, err, test.err) } else { @@ -500,7 +499,7 @@ func TestEnsureDeleteVSC(t *testing.T) { fakeSnapshotClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - err := EnsureDeleteVSC(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vscName, time.Millisecond) + err := EnsureDeleteVSC(t.Context(), fakeSnapshotClient.SnapshotV1(), test.vscName, time.Millisecond) if test.err != "" { assert.EqualError(t, err, test.err) } else { @@ -554,7 +553,7 @@ func TestDeleteVolumeSnapshotContentIfAny(t *testing.T) { logMessage := "" - DeleteVolumeSnapshotContentIfAny(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vscName, velerotest.NewSingleLogger(&logMessage)) + DeleteVolumeSnapshotContentIfAny(t.Context(), fakeSnapshotClient.SnapshotV1(), test.vscName, velerotest.NewSingleLogger(&logMessage)) if len(test.logMessage) > 0 { assert.Contains(t, logMessage, test.logMessage) @@ -618,7 +617,7 @@ func TestDeleteVolumeSnapshotIfAny(t *testing.T) { logMessage := "" - DeleteVolumeSnapshotIfAny(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vsName, test.vsNamespace, velerotest.NewSingleLogger(&logMessage)) + DeleteVolumeSnapshotIfAny(t.Context(), fakeSnapshotClient.SnapshotV1(), test.vsName, test.vsNamespace, velerotest.NewSingleLogger(&logMessage)) if len(test.logMessage) > 0 { assert.Contains(t, logMessage, test.logMessage) @@ -720,7 +719,7 @@ func TestRetainVSC(t *testing.T) { fakeSnapshotClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - returned, err := RetainVSC(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vsc) + returned, err := RetainVSC(t.Context(), fakeSnapshotClient.SnapshotV1(), test.vsc) if len(test.err) == 0 { require.NoError(t, err) @@ -814,7 +813,7 @@ func TestRemoveVSCProtect(t *testing.T) { fakeSnapshotClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - err := RemoveVSCProtect(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vsc, test.timeout) + err := RemoveVSCProtect(t.Context(), fakeSnapshotClient.SnapshotV1(), test.vsc, test.timeout) if len(test.err) == 0 { require.NoError(t, err) @@ -823,7 +822,7 @@ func TestRemoveVSCProtect(t *testing.T) { } if test.updated != nil { - updated, err := fakeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Get(context.Background(), test.vsc, metav1.GetOptions{}) + updated, err := fakeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Get(t.Context(), test.vsc, metav1.GetOptions{}) require.NoError(t, err) assert.Equal(t, test.updated.Finalizers, updated.Finalizers) @@ -1447,7 +1446,7 @@ func TestSetVolumeSnapshotContentDeletionPolicy(t *testing.T) { require.NoError(t, err) actual := new(snapshotv1api.VolumeSnapshotContent) err := fakeClient.Get( - context.TODO(), + t.Context(), crclient.ObjectKey{Name: tc.inputVSCName}, actual, ) @@ -1501,7 +1500,7 @@ func TestDeleteVolumeSnapshots(t *testing.T) { vsList := new(snapshotv1api.VolumeSnapshotList) err := client.List( - context.TODO(), + t.Context(), vsList, &crclient.ListOptions{ Namespace: "velero", @@ -1511,7 +1510,7 @@ func TestDeleteVolumeSnapshots(t *testing.T) { vscList := new(snapshotv1api.VolumeSnapshotContentList) err = client.List( - context.TODO(), + t.Context(), vscList, ) require.NoError(t, err) diff --git a/pkg/util/kube/event_test.go b/pkg/util/kube/event_test.go index 020ef8651..645d1da2d 100644 --- a/pkg/util/kube/event_test.go +++ b/pkg/util/kube/event_test.go @@ -17,7 +17,6 @@ limitations under the License. package kube import ( - "context" "fmt" "testing" "time" @@ -155,7 +154,7 @@ func TestEvent(t *testing.T) { }, } - _, err = client.CoreV1().Pods("fake-ns").Create(context.Background(), pod, metav1.CreateOptions{}) + _, err = client.CoreV1().Pods("fake-ns").Create(t.Context(), pod, metav1.CreateOptions{}) require.NoError(t, err) for i := 0; i < tc.generateDiff; i++ { @@ -190,7 +189,7 @@ func TestEvent(t *testing.T) { recorder.Shutdown() - items, err := client.CoreV1().Events("fake-ns").List(context.Background(), metav1.ListOptions{}) + items, err := client.CoreV1().Events("fake-ns").List(t.Context(), metav1.ListOptions{}) require.NoError(t, err) if tc.expected != len(items.Items) { diff --git a/pkg/util/kube/list_watch_test.go b/pkg/util/kube/list_watch_test.go index 8f70e3751..a0ed9760e 100644 --- a/pkg/util/kube/list_watch_test.go +++ b/pkg/util/kube/list_watch_test.go @@ -17,7 +17,6 @@ limitations under the License. package kube import ( - "context" "testing" "time" @@ -45,9 +44,9 @@ func TestInternalLW(t *testing.T) { close(stop) backupList := new(velerov1api.BackupList) - err := client.List(context.Background(), backupList) + err := client.List(t.Context(), backupList) require.NoError(t, err) - _, err = client.Watch(context.Background(), backupList) + _, err = client.Watch(t.Context(), backupList) require.NoError(t, err) } diff --git a/pkg/util/kube/node_test.go b/pkg/util/kube/node_test.go index 740e7153d..9f14c380b 100644 --- a/pkg/util/kube/node_test.go +++ b/pkg/util/kube/node_test.go @@ -17,7 +17,6 @@ limitations under the License. package kube import ( - "context" "testing" "github.com/pkg/errors" @@ -80,7 +79,7 @@ func TestIsLinuxNode(t *testing.T) { fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() - err := IsLinuxNode(context.TODO(), "fake-node", fakeClient) + err := IsLinuxNode(t.Context(), "fake-node", fakeClient) if err != nil { assert.EqualError(t, err, test.err) } else { @@ -128,7 +127,7 @@ func TestWithLinuxNode(t *testing.T) { fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() - result := withOSNode(context.TODO(), fakeClient, "linux", velerotest.NewLogger()) + result := withOSNode(t.Context(), fakeClient, "linux", velerotest.NewLogger()) assert.Equal(t, test.result, result) }) } @@ -174,7 +173,7 @@ func TestGetNodeOSType(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { fakeKubeClient := kubeClientFake.NewSimpleClientset(test.kubeClientObj...) - osType, err := GetNodeOS(context.TODO(), "fake-node", fakeKubeClient.CoreV1()) + osType, err := GetNodeOS(t.Context(), "fake-node", fakeKubeClient.CoreV1()) if err != nil { assert.EqualError(t, err, test.err) } else { @@ -250,7 +249,7 @@ func TestHasNodeWithOS(t *testing.T) { fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - err := HasNodeWithOS(context.TODO(), test.os, fakeKubeClient.CoreV1()) + err := HasNodeWithOS(t.Context(), test.os, fakeKubeClient.CoreV1()) if test.err != "" { assert.EqualError(t, err, test.err) } else { diff --git a/pkg/util/kube/periodical_enqueue_source_test.go b/pkg/util/kube/periodical_enqueue_source_test.go index 957639aad..677ec90e1 100644 --- a/pkg/util/kube/periodical_enqueue_source_test.go +++ b/pkg/util/kube/periodical_enqueue_source_test.go @@ -40,7 +40,7 @@ import ( func TestStart(t *testing.T) { require.NoError(t, velerov1.AddToScheme(scheme.Scheme)) - ctx, cancelFunc := context.WithCancel(context.TODO()) + ctx, cancelFunc := context.WithCancel(t.Context()) client := (&fake.ClientBuilder{}).Build() queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]()) source := NewPeriodicalEnqueueSource(logrus.WithContext(ctx).WithField("controller", "PES_TEST"), client, &velerov1.ScheduleList{}, 1*time.Second, PeriodicalEnqueueSourceOption{}) @@ -72,7 +72,7 @@ func TestStart(t *testing.T) { func TestPredicate(t *testing.T) { require.NoError(t, velerov1.AddToScheme(scheme.Scheme)) - ctx, cancelFunc := context.WithCancel(context.TODO()) + ctx, cancelFunc := context.WithCancel(t.Context()) client := (&fake.ClientBuilder{}).Build() queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]()) @@ -116,7 +116,7 @@ func TestPredicate(t *testing.T) { func TestOrder(t *testing.T) { require.NoError(t, velerov1.AddToScheme(scheme.Scheme)) - ctx, cancelFunc := context.WithCancel(context.TODO()) + ctx, cancelFunc := context.WithCancel(t.Context()) client := (&fake.ClientBuilder{}).Build() queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]()) source := NewPeriodicalEnqueueSource( diff --git a/pkg/util/kube/pod_test.go b/pkg/util/kube/pod_test.go index 54630df79..b162894e1 100644 --- a/pkg/util/kube/pod_test.go +++ b/pkg/util/kube/pod_test.go @@ -134,7 +134,7 @@ func TestEnsureDeletePod(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - err := EnsureDeletePod(context.Background(), kubeClient.CoreV1(), test.podName, test.namespace, time.Millisecond) + err := EnsureDeletePod(t.Context(), kubeClient.CoreV1(), test.podName, test.namespace, time.Millisecond) if err != nil { assert.EqualError(t, err, test.err) } else { @@ -377,7 +377,7 @@ func TestDeletePodIfAny(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient logMessage := "" - DeletePodIfAny(context.Background(), kubeClient.CoreV1(), test.podName, test.podNamespace, velerotest.NewSingleLogger(&logMessage)) + DeletePodIfAny(t.Context(), kubeClient.CoreV1(), test.podName, test.podNamespace, velerotest.NewSingleLogger(&logMessage)) if len(test.logMessage) > 0 { assert.Contains(t, logMessage, test.logMessage) @@ -733,7 +733,7 @@ func TestCollectPodLogs(t *testing.T) { } podLogReaderGetter = fp.GetPodLogReader - err := CollectPodLogs(context.Background(), nil, test.pod, "", test.container, fp) + err := CollectPodLogs(t.Context(), nil, test.pod, "", test.container, fp) if test.expectErr != "" { assert.EqualError(t, err, test.expectErr) } else { diff --git a/pkg/util/kube/pvc_pv_test.go b/pkg/util/kube/pvc_pv_test.go index 150757162..f52cdeb98 100644 --- a/pkg/util/kube/pvc_pv_test.go +++ b/pkg/util/kube/pvc_pv_test.go @@ -17,7 +17,6 @@ limitations under the License. package kube import ( - "context" "testing" "time" @@ -123,7 +122,7 @@ func TestWaitPVCBound(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - pv, err := WaitPVCBound(context.Background(), kubeClient.CoreV1(), kubeClient.CoreV1(), test.pvcName, test.pvcNamespace, time.Millisecond) + pv, err := WaitPVCBound(t.Context(), kubeClient.CoreV1(), kubeClient.CoreV1(), test.pvcName, test.pvcNamespace, time.Millisecond) if err != nil { require.EqualError(t, err, test.err) @@ -287,7 +286,7 @@ func TestWaitPVCConsumed(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - selectedNode, pvc, err := WaitPVCConsumed(context.Background(), kubeClient.CoreV1(), test.pvcName, test.pvcNamespace, kubeClient.StorageV1(), time.Millisecond, test.ignoreWaitForFirstConsumer) + selectedNode, pvc, err := WaitPVCConsumed(t.Context(), kubeClient.CoreV1(), test.pvcName, test.pvcNamespace, kubeClient.StorageV1(), time.Millisecond, test.ignoreWaitForFirstConsumer) if err != nil { require.EqualError(t, err, test.err) @@ -510,7 +509,7 @@ func TestDeletePVCIfAny(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient logMessage := "" - DeletePVAndPVCIfAny(context.Background(), kubeClient.CoreV1(), test.pvcName, test.pvcNamespace, test.ensureTimeout, velerotest.NewSingleLogger(&logMessage)) + DeletePVAndPVCIfAny(t.Context(), kubeClient.CoreV1(), test.pvcName, test.pvcNamespace, test.ensureTimeout, velerotest.NewSingleLogger(&logMessage)) if len(test.logMessage) > 0 { assert.Contains(t, logMessage, test.logMessage) @@ -572,7 +571,7 @@ func TestDeletePVIfAny(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient logMessage := "" - DeletePVIfAny(context.Background(), kubeClient.CoreV1(), test.pvName, velerotest.NewSingleLogger(&logMessage)) + DeletePVIfAny(t.Context(), kubeClient.CoreV1(), test.pvName, velerotest.NewSingleLogger(&logMessage)) if len(test.logMessage) > 0 { assert.Contains(t, logMessage, test.logMessage) @@ -698,7 +697,7 @@ func TestEnsureDeletePVC(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - err := EnsureDeletePVC(context.Background(), kubeClient.CoreV1(), test.pvcName, test.namespace, test.timeout) + err := EnsureDeletePVC(t.Context(), kubeClient.CoreV1(), test.pvcName, test.namespace, test.timeout) if err != nil { assert.EqualError(t, err, test.err) } else { @@ -786,7 +785,7 @@ func TestEnsureDeletePV(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - err := EnsurePVDeleted(context.Background(), kubeClient.CoreV1(), test.pvName, test.timeout) + err := EnsurePVDeleted(t.Context(), kubeClient.CoreV1(), test.pvName, test.timeout) if err != nil { assert.EqualError(t, err, test.err) } else { @@ -860,7 +859,7 @@ func TestRebindPVC(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - result, err := RebindPVC(context.Background(), kubeClient.CoreV1(), test.pvc, test.pv) + result, err := RebindPVC(t.Context(), kubeClient.CoreV1(), test.pvc, test.pv) if err != nil { require.EqualError(t, err, test.err) } else { @@ -967,7 +966,7 @@ func TestResetPVBinding(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - result, err := ResetPVBinding(context.Background(), kubeClient.CoreV1(), test.pv, test.labels, test.pvc) + result, err := ResetPVBinding(t.Context(), kubeClient.CoreV1(), test.pv, test.labels, test.pvc) if err != nil { require.EqualError(t, err, test.err) } else { @@ -1045,7 +1044,7 @@ func TestSetPVReclaimPolicy(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - result, err := SetPVReclaimPolicy(context.Background(), kubeClient.CoreV1(), test.pv, test.policy) + result, err := SetPVReclaimPolicy(t.Context(), kubeClient.CoreV1(), test.pv, test.policy) if err != nil { require.EqualError(t, err, test.err) } else { @@ -1195,7 +1194,7 @@ func TestWaitPVBound(t *testing.T) { var kubeClient kubernetes.Interface = fakeKubeClient - pv, err := WaitPVBound(context.Background(), kubeClient.CoreV1(), test.pvName, test.pvcName, test.pvcNamespace, time.Millisecond) + pv, err := WaitPVBound(t.Context(), kubeClient.CoreV1(), test.pvName, test.pvcName, test.pvcNamespace, time.Millisecond) if err != nil { require.EqualError(t, err, test.err) diff --git a/pkg/util/kube/secrets_test.go b/pkg/util/kube/secrets_test.go index 61c6f355b..8707c9be3 100644 --- a/pkg/util/kube/secrets_test.go +++ b/pkg/util/kube/secrets_test.go @@ -17,7 +17,6 @@ limitations under the License. package kube import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -81,7 +80,7 @@ func TestGetSecretKey(t *testing.T) { fakeClient := velerotest.NewFakeControllerRuntimeClient(t) for _, secret := range tc.secrets { - require.NoError(t, fakeClient.Create(context.Background(), secret)) + require.NoError(t, fakeClient.Create(t.Context(), secret)) } data, err := GetSecretKey(fakeClient, tc.namespace, tc.selector) diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index fd0a1ab4c..ec2251744 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -17,7 +17,6 @@ limitations under the License. package kube import ( - "context" "encoding/json" "testing" "time" @@ -228,7 +227,7 @@ func TestGetVolumeDirectorySuccess(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(objs...) // Function under test - dir, err := GetVolumeDirectory(context.Background(), logrus.StandardLogger(), tc.pod, tc.pod.Spec.Volumes[0].Name, fakeKubeClient) + dir, err := GetVolumeDirectory(t.Context(), logrus.StandardLogger(), tc.pod, tc.pod.Spec.Volumes[0].Name, fakeKubeClient) require.NoError(t, err) assert.Equal(t, tc.want, dir) @@ -277,7 +276,7 @@ func TestGetVolumeModeSuccess(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(objs...) // Function under test - mode, err := GetVolumeMode(context.Background(), logrus.StandardLogger(), tc.pod, tc.pod.Spec.Volumes[0].Name, fakeKubeClient) + mode, err := GetVolumeMode(t.Context(), logrus.StandardLogger(), tc.pod, tc.pod.Spec.Volumes[0].Name, fakeKubeClient) require.NoError(t, err) assert.Equal(t, tc.want, mode)