Merge pull request #1011 from wwitzel3/issue-609

Verify PV doesn't exist before creating new volume
This commit is contained in:
Steve Kriss
2018-11-01 08:45:13 -06:00
committed by GitHub
2 changed files with 62 additions and 23 deletions

View File

@@ -674,30 +674,41 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
continue
}
// restore the PV from snapshot (if applicable)
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", fullPath, err))
// Check if the PV exists in the cluster before attempting to create
// a volume from the snapshot, in order to avoid orphaned volumes (GH #609)
pv, err := resourceClient.Get(name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
addToResult(&errs, namespace, fmt.Errorf("error checking existence for PV %s: %v", name, err))
continue
}
obj = updatedObj
if resourceWatch == nil {
resourceWatch, err = resourceClient.Watch(metav1.ListOptions{})
// PV's existence will be recorded later. Jest skip volume restore logic
if pv == nil {
// restore the PV from snapshot (if applicable)
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error watching for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", fullPath, err))
continue
}
ctx.resourceWatches = append(ctx.resourceWatches, resourceWatch)
ctx.resourceWaitGroup.Add(1)
go func() {
defer ctx.resourceWaitGroup.Done()
obj = updatedObj
if _, err := waitForReady(resourceWatch.ResultChan(), name, isPVReady, time.Minute, ctx.log); err != nil {
ctx.log.Warnf("Timeout reached waiting for persistent volume %s to become ready", name)
addArkError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", name))
if resourceWatch == nil {
resourceWatch, err = resourceClient.Watch(metav1.ListOptions{})
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error watching for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
}
}()
ctx.resourceWatches = append(ctx.resourceWatches, resourceWatch)
ctx.resourceWaitGroup.Add(1)
go func() {
defer ctx.resourceWaitGroup.Done()
if _, err := waitForReady(resourceWatch.ResultChan(), name, isPVReady, time.Minute, ctx.log); err != nil {
ctx.log.Warnf("Timeout reached waiting for persistent volume %s to become ready", name)
addArkError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", name))
}
}()
}
}
}

View File

@@ -597,6 +597,9 @@ func TestRestoreResourceForNamespace(t *testing.T) {
dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, test.namespace).Return(resourceClient, nil)
resourceClient.On("Watch", metav1.ListOptions{}).Return(&fakeWatch{}, nil)
var empty *unstructured.Unstructured
resourceClient.On("Get", newTestPV().PersistentVolume.Name, metav1.GetOptions{}).Return(empty, nil)
saResource := metav1.APIResource{Name: "serviceaccounts", Namespaced: true}
dynamicFactory.On("ClientForGroupVersionResource", gv, saResource, test.namespace).Return(resourceClient, nil)
@@ -798,34 +801,44 @@ status:
expectPVCVolumeName bool
expectedPVCAnnotationsMissing sets.String
expectPVCreation bool
expectPVFound bool
}{
{
name: "have snapshot, reclaim policy delete",
name: "have snapshot, reclaim policy delete, no existing PV found",
haveSnapshot: true,
reclaimPolicy: "Delete",
expectPVCVolumeName: true,
expectPVCreation: true,
},
{
name: "have snapshot, reclaim policy retain",
name: "have snapshot, reclaim policy retain, no existing PV found",
haveSnapshot: true,
reclaimPolicy: "Retain",
expectPVCVolumeName: true,
expectPVCreation: true,
},
{
name: "no snapshot, reclaim policy delete",
name: "have snapshot, reclaim policy retain, existing PV found",
haveSnapshot: true,
reclaimPolicy: "Retain",
expectPVCVolumeName: true,
expectPVCreation: false,
expectPVFound: true,
},
{
name: "no snapshot, reclaim policy delete, no existing PV",
haveSnapshot: false,
reclaimPolicy: "Delete",
expectPVCVolumeName: false,
expectedPVCAnnotationsMissing: sets.NewString("pv.kubernetes.io/bind-completed", "pv.kubernetes.io/bound-by-controller"),
},
{
name: "no snapshot, reclaim policy retain",
name: "no snapshot, reclaim policy retain, existing PV found",
haveSnapshot: false,
reclaimPolicy: "Retain",
expectPVCVolumeName: true,
expectPVCreation: true,
expectPVCreation: false,
expectPVFound: true,
},
}
for _, test := range tests {
@@ -902,6 +915,16 @@ status:
require.NoError(t, err)
unstructuredPV := &unstructured.Unstructured{Object: unstructuredPVMap}
if test.expectPVFound {
pvClient.On("Get", unstructuredPV.GetName(), metav1.GetOptions{}).Return(unstructuredPV, nil)
pvClient.On("Create", mock.Anything).Return(unstructuredPV, k8serrors.NewAlreadyExists(kuberesource.PersistentVolumes, unstructuredPV.GetName()))
}
// Only set up the client expectation if the test has the proper prerequisites
if test.haveSnapshot || test.reclaimPolicy != "Delete" {
var empty *unstructured.Unstructured
pvClient.On("Get", mock.Anything, metav1.GetOptions{}).Return(empty, nil)
}
pvToRestore := unstructuredPV.DeepCopy()
restoredPV := unstructuredPV.DeepCopy()
@@ -936,10 +959,15 @@ status:
warnings, errors := ctx.restoreResource("persistentvolumes", "", "foo/resources/persistentvolumes/cluster/")
assert.Empty(t, warnings.Ark)
assert.Empty(t, warnings.Cluster)
assert.Empty(t, warnings.Namespaces)
assert.Equal(t, api.RestoreResult{}, errors)
if test.expectPVFound {
assert.Equal(t, 1, len(warnings.Cluster))
} else {
assert.Empty(t, warnings.Cluster)
}
// Prep PVC restore
// Handle expectations
if !test.expectPVCVolumeName {