diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 2a5f79bdf..8fe1b3128 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -694,30 +694,41 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a continue } - // restore the PV from snapshot (if applicable) - updatedObj, err := ctx.pvRestorer.executePVAction(obj) - if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", fullPath, err)) + // Check if the PV exists in the cluster before attempting to create + // a volume from the snapshot, in order to avoid orphaned volumes (GH #609) + pv, err := resourceClient.Get(name, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + addToResult(&errs, namespace, fmt.Errorf("error checking existence for PV %s: %v", name, err)) continue } - obj = updatedObj - if resourceWatch == nil { - resourceWatch, err = resourceClient.Watch(metav1.ListOptions{}) + // PV's existence will be recorded later. Just skip the volume restore logic. + if pv == nil { + // restore the PV from snapshot (if applicable) + updatedObj, err := ctx.pvRestorer.executePVAction(obj) if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error watching for namespace %q, resource %q: %v", namespace, &groupResource, err)) - return warnings, errs + addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", fullPath, err)) + continue } - ctx.resourceWatches = append(ctx.resourceWatches, resourceWatch) - ctx.resourceWaitGroup.Add(1) - go func() { - defer ctx.resourceWaitGroup.Done() + obj = updatedObj - if _, err := waitForReady(resourceWatch.ResultChan(), name, isPVReady, time.Minute, ctx.log); err != nil { - ctx.log.Warnf("Timeout reached waiting for persistent volume %s to become ready", name) - addArkError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", name)) + if resourceWatch == nil { + resourceWatch, err = resourceClient.Watch(metav1.ListOptions{}) + if err != nil { + addToResult(&errs, namespace, fmt.Errorf("error watching for namespace %q, resource %q: %v", namespace, &groupResource, err)) + return warnings, errs } - }() + ctx.resourceWatches = append(ctx.resourceWatches, resourceWatch) + ctx.resourceWaitGroup.Add(1) + go func() { + defer ctx.resourceWaitGroup.Done() + + if _, err := waitForReady(resourceWatch.ResultChan(), name, isPVReady, time.Minute, ctx.log); err != nil { + ctx.log.Warnf("Timeout reached waiting for persistent volume %s to become ready", name) + addArkError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", name)) + } + }() + } } } diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 855535e11..2bcf8fc30 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -607,6 +607,10 @@ func TestRestoreResourceForNamespace(t *testing.T) { dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, test.namespace).Return(resourceClient, nil) resourceClient.On("Watch", metav1.ListOptions{}).Return(&fakeWatch{}, nil) + // Assume the persistentvolume doesn't already exist in the cluster. + var empty *unstructured.Unstructured + resourceClient.On("Get", newTestPV().PersistentVolume.Name, metav1.GetOptions{}).Return(empty, nil) + saResource := metav1.APIResource{Name: "serviceaccounts", Namespaced: true} dynamicFactory.On("ClientForGroupVersionResource", gv, saResource, test.namespace).Return(resourceClient, nil) @@ -818,9 +822,10 @@ status: expectPVCVolumeName bool expectedPVCAnnotationsMissing sets.String expectPVCreation bool + expectPVFound bool }{ { - name: "legacy backup, have snapshot, reclaim policy delete", + name: "legacy backup, have snapshot, reclaim policy delete, no existing PV found", haveSnapshot: true, legacyBackup: true, reclaimPolicy: "Delete", @@ -828,7 +833,7 @@ status: expectPVCreation: true, }, { - name: "non-legacy backup, have snapshot, reclaim policy delete", + name: "non-legacy backup, have snapshot, reclaim policy delete, no existing PV found", haveSnapshot: true, legacyBackup: false, reclaimPolicy: "Delete", @@ -836,7 +841,16 @@ status: expectPVCreation: true, }, { - name: "legacy backup, have snapshot, reclaim policy retain", + name: "non-legacy backup, have snapshot, reclaim policy delete, existing PV found", + haveSnapshot: true, + legacyBackup: false, + reclaimPolicy: "Delete", + expectPVCVolumeName: true, + expectPVCreation: false, + expectPVFound: true, + }, + { + name: "legacy backup, have snapshot, reclaim policy retain, no existing PV found", haveSnapshot: true, legacyBackup: true, reclaimPolicy: "Retain", @@ -844,7 +858,16 @@ status: expectPVCreation: true, }, { - name: "non-legacy backup, have snapshot, reclaim policy retain", + name: "legacy backup, have snapshot, reclaim policy retain, existing PV found", + haveSnapshot: true, + legacyBackup: true, + reclaimPolicy: "Retain", + expectPVCVolumeName: true, + expectPVCreation: false, + expectPVFound: true, + }, + { + name: "non-legacy backup, have snapshot, reclaim policy retain, no existing PV found", haveSnapshot: true, legacyBackup: false, reclaimPolicy: "Retain", @@ -852,19 +875,45 @@ status: expectPVCreation: true, }, { - name: "no snapshot, reclaim policy delete", + name: "non-legacy backup, have snapshot, reclaim policy retain, existing PV found", + haveSnapshot: true, + legacyBackup: false, + reclaimPolicy: "Retain", + expectPVCVolumeName: true, + expectPVCreation: false, + expectPVFound: true, + }, + { + name: "non-legacy backup, have snapshot, reclaim policy retain, existing PV found", + haveSnapshot: true, + legacyBackup: false, + reclaimPolicy: "Retain", + expectPVCVolumeName: true, + expectPVCreation: false, + expectPVFound: true, + }, + { + name: "no snapshot, reclaim policy delete, no existing PV", haveSnapshot: false, reclaimPolicy: "Delete", expectPVCVolumeName: false, expectedPVCAnnotationsMissing: sets.NewString("pv.kubernetes.io/bind-completed", "pv.kubernetes.io/bound-by-controller"), }, { - name: "no snapshot, reclaim policy retain", + name: "no snapshot, reclaim policy retain, no existing PV found", haveSnapshot: false, reclaimPolicy: "Retain", expectPVCVolumeName: true, expectPVCreation: true, }, + { + name: "no snapshot, reclaim policy retain, existing PV found", + haveSnapshot: false, + reclaimPolicy: "Retain", + expectPVCVolumeName: true, + expectPVCreation: false, + expectPVFound: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -951,6 +1000,17 @@ status: require.NoError(t, err) unstructuredPV := &unstructured.Unstructured{Object: unstructuredPVMap} + if test.expectPVFound { + pvClient.On("Get", unstructuredPV.GetName(), metav1.GetOptions{}).Return(unstructuredPV, nil) + pvClient.On("Create", mock.Anything).Return(unstructuredPV, k8serrors.NewAlreadyExists(kuberesource.PersistentVolumes, unstructuredPV.GetName())) + } + + // Only set up the client expectation if the test has the proper prerequisites + if test.haveSnapshot || test.reclaimPolicy != "Delete" { + var empty *unstructured.Unstructured + pvClient.On("Get", mock.Anything, metav1.GetOptions{}).Return(empty, nil) + } + pvToRestore := unstructuredPV.DeepCopy() restoredPV := unstructuredPV.DeepCopy() @@ -985,10 +1045,15 @@ status: warnings, errors := ctx.restoreResource("persistentvolumes", "", "foo/resources/persistentvolumes/cluster/") assert.Empty(t, warnings.Ark) - assert.Empty(t, warnings.Cluster) assert.Empty(t, warnings.Namespaces) assert.Equal(t, api.RestoreResult{}, errors) + if test.expectPVFound { + assert.Equal(t, 1, len(warnings.Cluster)) + } else { + assert.Empty(t, warnings.Cluster) + } + // Prep PVC restore // Handle expectations if !test.expectPVCVolumeName {