Restore result refactoring (#2234)

* move Result helper funcs to be methods

Signed-off-by: Steve Kriss <krisss@vmware.com>
This commit is contained in:
Steve Kriss
2020-02-03 11:56:57 -07:00
committed by GitHub
parent c9bc6646a5
commit 08c549a092
3 changed files with 268 additions and 72 deletions

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017, 2019 the Velero contributors.
Copyright 2017, 2019, 2020 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -396,7 +396,7 @@ func (ctx *context) execute() (Result, Result) {
dir, err := archive.NewExtractor(ctx.log, ctx.fileSystem).UnzipAndExtractBackup(ctx.backupReader)
if err != nil {
ctx.log.Infof("error unzipping and extracting: %v", err)
addVeleroError(&errs, err)
errs.AddVeleroError(err)
return warnings, errs
}
defer ctx.fileSystem.RemoveAll(dir)
@@ -406,7 +406,7 @@ func (ctx *context) execute() (Result, Result) {
backupResources, err := archive.NewParser(ctx.log, ctx.fileSystem).Parse(ctx.restoreDir)
if err != nil {
addVeleroError(&errs, errors.Wrap(err, "error parsing backup contents"))
errs.AddVeleroError(errors.Wrap(err, "error parsing backup contents"))
return warnings, errs
}
@@ -445,7 +445,7 @@ func (ctx *context) execute() (Result, Result) {
logger := ctx.log.WithField("namespace", namespace)
ns := getNamespace(logger, getItemFilePath(ctx.restoreDir, "namespaces", "", namespace), targetNamespace)
if _, err := kube.EnsureNamespaceExistsAndIsReady(ns, ctx.namespaceClient, ctx.resourceTerminatingTimeout); err != nil {
addVeleroError(&errs, err)
errs.AddVeleroError(err)
continue
}
@@ -455,8 +455,8 @@ func (ctx *context) execute() (Result, Result) {
}
w, e := ctx.restoreResource(resource.String(), targetNamespace, namespace, items)
merge(&warnings, &w)
merge(&errs, &e)
warnings.Merge(&w)
errs.Merge(&e)
}
}
@@ -469,13 +469,13 @@ func (ctx *context) execute() (Result, Result) {
// Don't break on error here, since newResources will be the same as the original prioritizedResources,
// and thus addedResources will end up being empty and we'll restore nothing.
// Since we're continuing the restore, add a warning, not an error.
addVeleroError(&warnings, errors.Wrap(err, "error refreshing discovery API"))
warnings.AddVeleroError(errors.Wrap(err, "error refreshing discovery API"))
}
newResources, err := prioritizeResources(ctx.discoveryHelper, ctx.resourcePriorities, ctx.resourceIncludesExcludes, ctx.log)
if err != nil {
// If there was an error, then newResources will be nil, so we can continue on the restore.
// addedResources will end up being nil, but we should still report this failure.
addVeleroError(&warnings, errors.Wrap(err, "error sorting resources"))
warnings.AddVeleroError(errors.Wrap(err, "error sorting resources"))
}
// Filter the resources to only those added since our first restore pass.
@@ -523,7 +523,7 @@ func (ctx *context) execute() (Result, Result) {
logger := ctx.log.WithField("namespace", namespace)
ns := getNamespace(logger, getItemFilePath(ctx.restoreDir, "namespaces", "", namespace), targetNamespace)
if _, err := kube.EnsureNamespaceExistsAndIsReady(ns, ctx.namespaceClient, ctx.resourceTerminatingTimeout); err != nil {
addVeleroError(&errs, err)
errs.AddVeleroError(err)
continue
}
@@ -533,8 +533,8 @@ func (ctx *context) execute() (Result, Result) {
}
w, e := ctx.restoreResource(resource.String(), targetNamespace, namespace, items)
merge(&warnings, &w)
merge(&errs, &e)
warnings.Merge(&w)
errs.Merge(&e)
}
}
@@ -608,38 +608,6 @@ func getNamespace(logger logrus.FieldLogger, path, remappedName string) *v1.Name
}
}
// merge combines two RestoreResult objects into one
// by appending the corresponding lists to one another.
func merge(a, b *Result) {
a.Cluster = append(a.Cluster, b.Cluster...)
a.Velero = append(a.Velero, b.Velero...)
for k, v := range b.Namespaces {
if a.Namespaces == nil {
a.Namespaces = make(map[string][]string)
}
a.Namespaces[k] = append(a.Namespaces[k], v...)
}
}
// addVeleroError appends an error to the provided RestoreResult's Velero list.
func addVeleroError(r *Result, err error) {
r.Velero = append(r.Velero, err.Error())
}
// addToResult appends an error to the provided RestoreResult, either within
// the cluster-scoped list (if ns == "") or within the provided namespace's
// entry.
func addToResult(r *Result, ns string, e error) {
if ns == "" {
r.Cluster = append(r.Cluster, e.Error())
} else {
if r.Namespaces == nil {
r.Namespaces = make(map[string][]string)
}
r.Namespaces[ns] = append(r.Namespaces[ns], e.Error())
}
}
func (ctx *context) getApplicableActions(groupResource schema.GroupResource, namespace string) []resolvedAction {
var actions []resolvedAction
for _, action := range ctx.actions {
@@ -822,7 +790,7 @@ func (ctx *context) restoreResource(resource, targetNamespace, originalNamespace
obj, err := ctx.unmarshal(itemPath)
if err != nil {
addToResult(&errs, targetNamespace, fmt.Errorf("error decoding %q: %v", strings.Replace(itemPath, ctx.restoreDir+"/", "", -1), err))
errs.Add(targetNamespace, fmt.Errorf("error decoding %q: %v", strings.Replace(itemPath, ctx.restoreDir+"/", "", -1), err))
continue
}
@@ -831,8 +799,8 @@ func (ctx *context) restoreResource(resource, targetNamespace, originalNamespace
}
w, e := ctx.restoreItem(obj, groupResource, targetNamespace)
merge(&warnings, &w)
merge(&errs, &e)
warnings.Merge(&w)
errs.Merge(&e)
}
return warnings, errs
@@ -922,7 +890,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
complete, err := isCompleted(obj, groupResource)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error checking completion of %q: %v", resourceID, err))
errs.Add(namespace, fmt.Errorf("error checking completion of %q: %v", resourceID, err))
return warnings, errs
}
if complete {
@@ -952,7 +920,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
resourceClient, err := ctx.getResourceClient(groupResource, obj, namespace)
if err != nil {
addVeleroError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
errs.AddVeleroError(fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
}
@@ -961,7 +929,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
case hasSnapshot(name, ctx.volumeSnapshots):
shouldRenamePV, err := shouldRenamePV(ctx, obj, resourceClient)
if err != nil {
addToResult(&errs, namespace, err)
errs.Add(namespace, err)
return warnings, errs
}
@@ -971,7 +939,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
// a volume from the snapshot, in order to avoid orphaned volumes (GH #609)
shouldRestoreSnapshot, err = ctx.shouldRestore(name, resourceClient)
if err != nil {
addToResult(&errs, namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name))
errs.Add(namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name))
return warnings, errs
}
} else {
@@ -987,7 +955,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
ctx.log.Infof("Restoring persistent volume from snapshot.")
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err))
errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err))
return warnings, errs
}
obj = updatedObj
@@ -998,7 +966,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
oldName := obj.GetName()
newName, err := ctx.pvRenamer(oldName)
if err != nil {
addToResult(&errs, namespace, errors.Wrapf(err, "error renaming PV"))
errs.Add(namespace, errors.Wrapf(err, "error renaming PV"))
return warnings, errs
}
@@ -1035,7 +1003,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
// when its PVC is restored.
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err))
errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err))
return warnings, errs
}
obj = updatedObj
@@ -1044,7 +1012,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
// clear out non-core metadata fields & status
if obj, err = resetMetadataAndStatus(obj); err != nil {
addToResult(&errs, namespace, err)
errs.Add(namespace, err)
return warnings, errs
}
@@ -1061,7 +1029,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
Restore: ctx.restore,
})
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error preparing %s: %v", resourceID, err))
errs.Add(namespace, fmt.Errorf("error preparing %s: %v", resourceID, err))
return warnings, errs
}
@@ -1071,7 +1039,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
}
unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured)
if !ok {
addToResult(&errs, namespace, fmt.Errorf("%s: unexpected type %T", resourceID, executeOutput.UpdatedItem))
errs.Add(namespace, fmt.Errorf("%s: unexpected type %T", resourceID, executeOutput.UpdatedItem))
return warnings, errs
}
@@ -1086,7 +1054,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
"additionalResourceNamespace": additionalItem.Namespace,
"additionalResourceName": additionalItem.Name,
}).Warn("unable to restore additional item")
addToResult(&warnings, additionalItem.Namespace, err)
warnings.Add(additionalItem.Namespace, err)
continue
}
@@ -1094,7 +1062,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
additionalResourceID := getResourceID(additionalItem.GroupResource, additionalItem.Namespace, additionalItem.Name)
additionalObj, err := ctx.unmarshal(itemPath)
if err != nil {
addToResult(&errs, namespace, errors.Wrapf(err, "error restoring additional item %s", additionalResourceID))
errs.Add(namespace, errors.Wrapf(err, "error restoring additional item %s", additionalResourceID))
}
additionalItemNamespace := additionalItem.Namespace
@@ -1105,8 +1073,8 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
}
w, e := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace)
merge(&warnings, &w)
merge(&errs, &e)
warnings.Merge(&w)
errs.Merge(&e)
}
}
@@ -1121,7 +1089,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
if groupResource == kuberesource.PersistentVolumeClaims {
pvc := new(v1.PersistentVolumeClaim)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil {
addToResult(&errs, namespace, err)
errs.Add(namespace, err)
return warnings, errs
}
@@ -1141,7 +1109,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
if newName, ok := ctx.renamedPVs[pvc.Spec.VolumeName]; ok {
ctx.log.Infof("Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)", namespace, name, pvc.Spec.VolumeName, newName)
if err := unstructured.SetNestedField(obj.Object, newName, "spec", "volumeName"); err != nil {
addToResult(&errs, namespace, err)
errs.Add(namespace, err)
return warnings, errs
}
}
@@ -1165,14 +1133,14 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
fromCluster, err := resourceClient.Get(name, metav1.GetOptions{})
if err != nil {
ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
warnings.Add(namespace, err)
return warnings, errs
}
// Remove insubstantial metadata
fromCluster, err = resetMetadataAndStatus(fromCluster)
if err != nil {
ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
warnings.Add(namespace, err)
return warnings, errs
}
@@ -1187,14 +1155,14 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
desired, err := mergeServiceAccounts(fromCluster, obj)
if err != nil {
ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
warnings.Add(namespace, err)
return warnings, errs
}
patchBytes, err := generatePatch(fromCluster, desired)
if err != nil {
ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
warnings.Add(namespace, err)
return warnings, errs
}
@@ -1205,13 +1173,13 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
_, err = resourceClient.Patch(name, patchBytes)
if err != nil {
addToResult(&warnings, namespace, err)
warnings.Add(namespace, err)
} else {
ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj))
}
default:
e := errors.Errorf("could not restore, %s. Warning: the in-cluster version is different than the backed-up version.", restoreErr)
addToResult(&warnings, namespace, e)
warnings.Add(namespace, e)
}
return warnings, errs
}
@@ -1223,7 +1191,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
// Error was something other than an AlreadyExists
if restoreErr != nil {
ctx.log.Infof("error restoring %s: %v", name, restoreErr)
addToResult(&errs, namespace, fmt.Errorf("error restoring %s: %v", resourceID, restoreErr))
errs.Add(namespace, fmt.Errorf("error restoring %s: %v", resourceID, restoreErr))
return warnings, errs
}
@@ -1236,9 +1204,9 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
if groupResource == kuberesource.CustomResourceDefinitions {
available, err := ctx.crdAvailable(name, resourceClient)
if err != nil {
addToResult(&errs, namespace, errors.Wrapf(err, "error verifying custom resource definition is ready to use"))
errs.Add(namespace, errors.Wrapf(err, "error verifying custom resource definition is ready to use"))
} else if !available {
addToResult(&errs, namespace, fmt.Errorf("CRD %s is not available to use for custom resources.", name))
errs.Add(namespace, fmt.Errorf("CRD %s is not available to use for custom resources.", name))
}
}

View File

@@ -1,5 +1,5 @@
/*
Copyright 2019 the Velero contributors.
Copyright 2019, 2020 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -33,3 +33,35 @@ type Result struct {
// related to restoring namespace-scoped resources.
Namespaces map[string][]string `json:"namespaces,omitempty"`
}
// Merge combines two Result objects into one
// by appending the corresponding lists to one another.
func (r *Result) Merge(other *Result) {
r.Cluster = append(r.Cluster, other.Cluster...)
r.Velero = append(r.Velero, other.Velero...)
for k, v := range other.Namespaces {
if r.Namespaces == nil {
r.Namespaces = make(map[string][]string)
}
r.Namespaces[k] = append(r.Namespaces[k], v...)
}
}
// AddVeleroError appends an error to the provided Result's Velero list.
func (r *Result) AddVeleroError(err error) {
r.Velero = append(r.Velero, err.Error())
}
// Add appends an error to the provided Result, either within
// the cluster-scoped list (if ns == "") or within the provided namespace's
// entry.
func (r *Result) Add(ns string, e error) {
if ns == "" {
r.Cluster = append(r.Cluster, e.Error())
} else {
if r.Namespaces == nil {
r.Namespaces = make(map[string][]string)
}
r.Namespaces[ns] = append(r.Namespaces[ns], e.Error())
}
}

196
pkg/restore/result_test.go Normal file
View File

@@ -0,0 +1,196 @@
/*
Copyright 2020 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestMerge(t *testing.T) {
tests := []struct {
name string
result *Result
other *Result
want *Result
}{
{
name: "when an empty result is merged into a non-empty result, the result does not change",
result: &Result{
Cluster: []string{"foo"},
Namespaces: map[string][]string{
"ns-1": {"bar"},
"ns-2": {"baz"},
},
},
other: &Result{},
want: &Result{
Cluster: []string{"foo"},
Namespaces: map[string][]string{
"ns-1": {"bar"},
"ns-2": {"baz"},
},
},
},
{
name: "when a non-empty result is merged into an result, the result looks like the non-empty result",
result: &Result{},
other: &Result{
Cluster: []string{"foo"},
Namespaces: map[string][]string{
"ns-1": {"bar"},
"ns-2": {"baz"},
},
},
want: &Result{
Cluster: []string{"foo"},
Namespaces: map[string][]string{
"ns-1": {"bar"},
"ns-2": {"baz"},
},
},
},
{
name: "when two non-empty results are merged, the result is the union of the two",
result: &Result{
Cluster: []string{"cluster-err-1"},
Namespaces: map[string][]string{
"ns-1": {"ns-1-err-1"},
"ns-2": {"ns-2-err-1"},
"ns-3": {"ns-3-err-1"},
},
},
other: &Result{
Cluster: []string{"cluster-err-2"},
Namespaces: map[string][]string{
"ns-1": {"ns-1-err-2"},
"ns-2": {"ns-2-err-2"},
"ns-4": {"ns-4-err-1"},
},
},
want: &Result{
Cluster: []string{"cluster-err-1", "cluster-err-2"},
Namespaces: map[string][]string{
"ns-1": {"ns-1-err-1", "ns-1-err-2"},
"ns-2": {"ns-2-err-1", "ns-2-err-2"},
"ns-3": {"ns-3-err-1"},
"ns-4": {"ns-4-err-1"},
},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tc.result.Merge(tc.other)
assert.Equal(t, tc.want, tc.result)
})
}
}
func TestAddVeleroError(t *testing.T) {
tests := []struct {
name string
result *Result
err error
want *Result
}{
{
name: "when AddVeleroError is called for a result with no velero errors, the result has the new error added properly",
result: &Result{},
err: errors.New("foo"),
want: &Result{Velero: []string{"foo"}},
},
{
name: "when AddVeleroError is called for a result with existing velero errors, the result has the new error appended properly",
result: &Result{Velero: []string{"bar"}},
err: errors.New("foo"),
want: &Result{Velero: []string{"bar", "foo"}},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tc.result.AddVeleroError(tc.err)
assert.Equal(t, tc.want, tc.result)
})
}
}
func TestAdd(t *testing.T) {
tests := []struct {
name string
result *Result
ns string
err error
want *Result
}{
{
name: "when Add is called for a result with no existing errors and an empty namespace, the error is added to the cluster-scoped list",
result: &Result{},
ns: "",
err: errors.New("foo"),
want: &Result{Cluster: []string{"foo"}},
},
{
name: "when Add is called for a result with some existing errors and an empty namespace, the error is added to the cluster-scoped list",
result: &Result{Cluster: []string{"bar"}},
ns: "",
err: errors.New("foo"),
want: &Result{Cluster: []string{"bar", "foo"}},
},
{
name: "when Add is called for a result with no existing errors and a non-empty namespace, the error is added to the namespace list",
result: &Result{},
ns: "ns-1",
err: errors.New("foo"),
want: &Result{
Namespaces: map[string][]string{
"ns-1": {"foo"},
},
},
},
{
name: "when Add is called for a result with some existing errors and a non-empty namespace, the error is added to the namespace list",
result: &Result{
Namespaces: map[string][]string{
"ns-1": {"bar"},
"ns-2": {"baz"},
},
},
ns: "ns-1",
err: errors.New("foo"),
want: &Result{
Namespaces: map[string][]string{
"ns-1": {"bar", "foo"},
"ns-2": {"baz"},
},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tc.result.Add(tc.ns, tc.err)
assert.Equal(t, tc.want, tc.result)
})
}
}