From 4accb8512abc8cf82cdfdedcc33ef458a054d9b2 Mon Sep 17 00:00:00 2001 From: KubeKween Date: Tue, 6 Aug 2019 13:17:36 -0700 Subject: [PATCH] Restore from PodVolumeBackups (#1723) * Restore from PodVolumeBackups Signed-off-by: Carlisia * Partially address code reviews Signed-off-by: Carlisia * Partially address code reviews #2 Signed-off-by: Carlisia * Clean up struct Signed-off-by: Carlisia * Fix log messages Signed-off-by: Carlisia * Fix tests Signed-off-by: Carlisia * Clean up Signed-off-by: Carlisia * Add changelog Signed-off-by: Carlisia --- changelogs/unreleased/1723-carlisia | 1 + pkg/cmd/server/plugin/plugin.go | 7 +- pkg/controller/restore_controller.go | 28 +++- pkg/controller/restore_controller_test.go | 13 +- pkg/restic/common.go | 26 ++- pkg/restic/common_test.go | 2 +- pkg/restic/restorer.go | 24 ++- pkg/restore/restic_restore_action.go | 30 +++- pkg/restore/restic_restore_action_test.go | 3 + pkg/restore/restore.go | 134 ++++++++------- pkg/restore/restore_test.go | 192 +++++++++++++++++----- 11 files changed, 321 insertions(+), 139 deletions(-) create mode 100644 changelogs/unreleased/1723-carlisia diff --git a/changelogs/unreleased/1723-carlisia b/changelogs/unreleased/1723-carlisia new file mode 100644 index 000000000..a1852a8f9 --- /dev/null +++ b/changelogs/unreleased/1723-carlisia @@ -0,0 +1 @@ +Restore restic volumes from PodVolumeBackups CRs \ No newline at end of file diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 6f9650f67..10f0bbcb8 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -136,7 +136,12 @@ func newResticRestoreItemAction(f client.Factory) veleroplugin.HandlerInitialize return nil, err } - return restore.NewResticRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace())), nil + veleroClient, err := f.Client() + if err != nil { + return nil, err + } + + return restore.NewResticRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace()), veleroClient.VeleroV1().PodVolumeBackups(f.Namespace())), nil } } diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index b71444ddf..b8a6fcd73 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -43,6 +43,7 @@ import ( "github.com/heptio/velero/pkg/metrics" "github.com/heptio/velero/pkg/persistence" "github.com/heptio/velero/pkg/plugin/clientmgmt" + "github.com/heptio/velero/pkg/restic" pkgrestore "github.com/heptio/velero/pkg/restore" "github.com/heptio/velero/pkg/util/collections" kubeutil "github.com/heptio/velero/pkg/util/kube" @@ -75,7 +76,7 @@ type restoreController struct { namespace string restoreClient velerov1client.RestoresGetter - backupClient velerov1client.BackupsGetter + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter restorer pkgrestore.Restorer backupLister listers.BackupLister restoreLister listers.RestoreLister @@ -94,7 +95,7 @@ func NewRestoreController( namespace string, restoreInformer informers.RestoreInformer, restoreClient velerov1client.RestoresGetter, - backupClient velerov1client.BackupsGetter, + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter, restorer pkgrestore.Restorer, backupInformer informers.BackupInformer, backupLocationInformer informers.BackupStorageLocationInformer, @@ -110,7 +111,7 @@ func NewRestoreController( genericController: newGenericController("restore", logger), namespace: namespace, restoreClient: restoreClient, - backupClient: backupClient, + podVolumeBackupClient: podVolumeBackupClient, restorer: restorer, backupLister: backupInformer.Lister(), restoreLister: restoreInformer.Lister(), @@ -435,13 +436,32 @@ func (c *restoreController) runValidatedRestore(restore *api.Restore, info backu } defer closeAndRemoveFile(backupFile, c.logger) + opts := restic.NewPodVolumeBackupListOptions(restore.Spec.BackupName) + podVolumeBackupList, err := c.podVolumeBackupClient.PodVolumeBackups(c.namespace).List(opts) + if err != nil { + return errors.WithStack(err) + } + volumeSnapshots, err := info.backupStore.GetBackupVolumeSnapshots(restore.Spec.BackupName) if err != nil { return errors.Wrap(err, "error fetching volume snapshots metadata") } restoreLog.Info("starting restore") - restoreWarnings, restoreErrors := c.restorer.Restore(restoreLog, restore, info.backup, volumeSnapshots, backupFile, actions, c.snapshotLocationLister, pluginManager) + + var podVolumeBackups []*velerov1api.PodVolumeBackup + for i := range podVolumeBackupList.Items { + podVolumeBackups = append(podVolumeBackups, &podVolumeBackupList.Items[i]) + } + restoreReq := pkgrestore.Request{ + Log: restoreLog, + Restore: restore, + Backup: info.backup, + PodVolumeBackups: podVolumeBackups, + VolumeSnapshots: volumeSnapshots, + BackupReader: backupFile, + } + restoreWarnings, restoreErrors := c.restorer.Restore(restoreReq, actions, c.snapshotLocationLister, pluginManager) restoreLog.Info("restore completed") if logReader, err := restoreLog.done(c.logger); err != nil { diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index d35a01c70..e0b9d5232 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -19,7 +19,6 @@ package controller import ( "bytes" "encoding/json" - "io" "io/ioutil" "testing" "time" @@ -608,7 +607,7 @@ func TestProcessQueueItem(t *testing.T) { } } - velerotest.ValidatePatch(t, actions[1], expected, decode) + velerotest.ValidatePatch(t, actions[2], expected, decode) // explicitly capturing the argument passed to Restore myself because // I want to validate the called arg as of the time of calling, but @@ -822,18 +821,14 @@ type fakeRestorer struct { } func (r *fakeRestorer) Restore( - log logrus.FieldLogger, - restore *api.Restore, - backup *api.Backup, - volumeSnapshots []*volume.Snapshot, - backupReader io.Reader, + info pkgrestore.Request, actions []velero.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter pkgrestore.VolumeSnapshotterGetter, ) (pkgrestore.Result, pkgrestore.Result) { - res := r.Called(log, restore, backup, backupReader, actions) + res := r.Called(info.Log, info.Restore, info.Backup, info.BackupReader, actions) - r.calledWithArg = *restore + r.calledWithArg = *info.Restore return res.Get(0).(pkgrestore.Result), res.Get(1).(pkgrestore.Result) } diff --git a/pkg/restic/common.go b/pkg/restic/common.go index 7b8971a5f..d29e8cabf 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -45,10 +45,12 @@ const ( volumesToBackupAnnotation = "backup.velero.io/backup-volumes" ) -// GetPodSnapshotAnnotations returns a map, of volume name -> snapshot id, +// getPodSnapshotAnnotations returns a map, of volume name -> snapshot id, // of all restic snapshots for this pod. -// Deprecated: we will stop using pod annotations to record restic snapshot IDs after they're taken. -func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string { +// TODO(2.0) to remove +// Deprecated: we will stop using pod annotations to record restic snapshot IDs after they're taken, +// therefore we won't need to check if these annotations exist. +func getPodSnapshotAnnotations(obj metav1.Object) map[string]string { var res map[string]string insertSafe := func(k, v string) { @@ -67,6 +69,24 @@ func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string { return res } +// GetVolumeBackupsForPod returns a map, of volume name -> snapshot id, +// of the PodVolumeBackups that exist for the provided pod. +func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod metav1.Object) map[string]string { + volumes := make(map[string]string) + + for _, pvb := range podVolumeBackups { + if pod.GetName() == pvb.Spec.Pod.Name { + volumes[pvb.Spec.Volume] = pvb.Status.SnapshotID + } + } + + if len(volumes) > 0 { + return volumes + } + + return getPodSnapshotAnnotations(pod) +} + // GetVolumesToBackup returns a list of volume names to backup for // the provided pod. func GetVolumesToBackup(obj metav1.Object) []string { diff --git a/pkg/restic/common_test.go b/pkg/restic/common_test.go index c0e5ada96..f6cf9c037 100644 --- a/pkg/restic/common_test.go +++ b/pkg/restic/common_test.go @@ -75,7 +75,7 @@ func TestGetPodSnapshotAnnotations(t *testing.T) { t.Run(test.name, func(t *testing.T) { pod := &corev1api.Pod{} pod.Annotations = test.annotations - assert.Equal(t, test.expected, GetPodSnapshotAnnotations(pod)) + assert.Equal(t, test.expected, getPodSnapshotAnnotations(pod)) }) } } diff --git a/pkg/restic/restorer.go b/pkg/restic/restorer.go index f06c492da..6a310096d 100644 --- a/pkg/restic/restorer.go +++ b/pkg/restic/restorer.go @@ -31,10 +31,17 @@ import ( "github.com/heptio/velero/pkg/util/boolptr" ) +type RestoreData struct { + Restore *velerov1api.Restore + Pod *corev1api.Pod + PodVolumeBackups []*velerov1api.PodVolumeBackup + SourceNamespace, BackupLocation string +} + // Restorer can execute restic restores of volumes in a pod. type Restorer interface { // RestorePodVolumes restores all annotated volumes in a pod. - RestorePodVolumes(restore *velerov1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error + RestorePodVolumes(RestoreData) []error } type restorer struct { @@ -84,14 +91,13 @@ func newRestorer( return r } -func (r *restorer) RestorePodVolumes(restore *velerov1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error { - // get volumes to restore from pod's annotations - volumesToRestore := GetPodSnapshotAnnotations(pod) +func (r *restorer) RestorePodVolumes(data RestoreData) []error { + volumesToRestore := GetVolumeBackupsForPod(data.PodVolumeBackups, data.Pod) if len(volumesToRestore) == 0 { return nil } - repo, err := r.repoEnsurer.EnsureRepo(r.ctx, restore.Namespace, sourceNamespace, backupLocation) + repo, err := r.repoEnsurer.EnsureRepo(r.ctx, data.Restore.Namespace, data.SourceNamespace, data.BackupLocation) if err != nil { return []error{err} } @@ -104,7 +110,7 @@ func (r *restorer) RestorePodVolumes(restore *velerov1api.Restore, pod *corev1ap resultsChan := make(chan *velerov1api.PodVolumeRestore) r.resultsLock.Lock() - r.results[resultsKey(pod.Namespace, pod.Name)] = resultsChan + r.results[resultsKey(data.Pod.Namespace, data.Pod.Name)] = resultsChan r.resultsLock.Unlock() var ( @@ -113,7 +119,7 @@ func (r *restorer) RestorePodVolumes(restore *velerov1api.Restore, pod *corev1ap ) for volume, snapshot := range volumesToRestore { - volumeRestore := newPodVolumeRestore(restore, pod, volume, snapshot, backupLocation, repo.Spec.ResticIdentifier) + volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, snapshot, repo.Spec.ResticIdentifier) if err := errorOnly(r.repoManager.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(volumeRestore)); err != nil { errs = append(errs, errors.WithStack(err)) @@ -136,13 +142,13 @@ ForEachVolume: } r.resultsLock.Lock() - delete(r.results, resultsKey(pod.Namespace, pod.Name)) + delete(r.results, resultsKey(data.Pod.Namespace, data.Pod.Name)) r.resultsLock.Unlock() return errs } -func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, volume, snapshot, backupLocation, repoIdentifier string) *velerov1api.PodVolumeRestore { +func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier string) *velerov1api.PodVolumeRestore { return &velerov1api.PodVolumeRestore{ ObjectMeta: metav1.ObjectMeta{ Namespace: restore.Namespace, diff --git a/pkg/restore/restic_restore_action.go b/pkg/restore/restic_restore_action.go index 6b884a087..964255634 100644 --- a/pkg/restore/restic_restore_action.go +++ b/pkg/restore/restic_restore_action.go @@ -28,8 +28,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/buildinfo" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" "github.com/heptio/velero/pkg/plugin/framework" "github.com/heptio/velero/pkg/plugin/velero" "github.com/heptio/velero/pkg/restic" @@ -43,14 +45,16 @@ const ( ) type ResticRestoreAction struct { - logger logrus.FieldLogger - client corev1client.ConfigMapInterface + logger logrus.FieldLogger + client corev1client.ConfigMapInterface + podVolumeBackupClient velerov1client.PodVolumeBackupInterface } -func NewResticRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface) *ResticRestoreAction { +func NewResticRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface, podVolumeBackupClient velerov1client.PodVolumeBackupInterface) *ResticRestoreAction { return &ResticRestoreAction{ - logger: logger, - client: client, + logger: logger, + client: client, + podVolumeBackupClient: podVolumeBackupClient, } } @@ -71,13 +75,23 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu log := a.logger.WithField("pod", kube.NamespaceAndName(&pod)) - volumeSnapshots := restic.GetPodSnapshotAnnotations(&pod) + opts := restic.NewPodVolumeBackupListOptions(input.Restore.Spec.BackupName) + podVolumeBackupList, err := a.podVolumeBackupClient.List(opts) + if err != nil { + return nil, errors.WithStack(err) + } + + var podVolumeBackups []*velerov1api.PodVolumeBackup + for i := range podVolumeBackupList.Items { + podVolumeBackups = append(podVolumeBackups, &podVolumeBackupList.Items[i]) + } + volumeSnapshots := restic.GetVolumeBackupsForPod(podVolumeBackups, &pod) if len(volumeSnapshots) == 0 { - log.Debug("No restic snapshot ID annotations found") + log.Debug("No restic backups found for pod") return velero.NewRestoreItemActionExecuteOutput(input.Item), nil } - log.Info("Restic snapshot ID annotations found") + log.Info("Restic backups for pod found") // TODO we might want/need to get plugin config at the top of this method at some point; for now, wait // until we know we're doing a restore before getting config. diff --git a/pkg/restore/restic_restore_action_test.go b/pkg/restore/restic_restore_action_test.go index 7a842094e..bf89c55f7 100644 --- a/pkg/restore/restic_restore_action_test.go +++ b/pkg/restore/restic_restore_action_test.go @@ -31,6 +31,7 @@ import ( api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/buildinfo" + velerofake "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" "github.com/heptio/velero/pkg/plugin/velero" "github.com/heptio/velero/pkg/util/kube" velerotest "github.com/heptio/velero/pkg/util/test" @@ -149,9 +150,11 @@ func TestResticRestoreActionExecute(t *testing.T) { } clientset := fake.NewSimpleClientset() + clientsetVelero := velerofake.NewSimpleClientset() a := NewResticRestoreAction( logrus.StandardLogger(), clientset.CoreV1().ConfigMaps("velero"), + clientsetVelero.VeleroV1().PodVolumeBackups("velero"), ) // method under test diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 70f1cb730..80a2fd401 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -43,7 +43,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/discovery" listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" @@ -63,14 +63,20 @@ type VolumeSnapshotterGetter interface { GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) } +type Request struct { + *velerov1api.Restore + + Log logrus.FieldLogger + Backup *velerov1api.Backup + PodVolumeBackups []*velerov1api.PodVolumeBackup + VolumeSnapshots []*volume.Snapshot + BackupReader io.Reader +} + // Restorer knows how to restore a backup. type Restorer interface { // Restore restores the backup data from backupReader, returning warnings and errors. - Restore(log logrus.FieldLogger, - restore *api.Restore, - backup *api.Backup, - volumeSnapshots []*volume.Snapshot, - backupReader io.Reader, + Restore(req Request, actions []velero.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter VolumeSnapshotterGetter, @@ -176,11 +182,7 @@ func NewKubernetesRestorer( // and using data from the provided backup/backup reader. Returns a warnings and errors RestoreResult, // respectively, summarizing info about the restore. func (kr *kubernetesRestorer) Restore( - log logrus.FieldLogger, - restore *api.Restore, - backup *api.Backup, - volumeSnapshots []*volume.Snapshot, - backupReader io.Reader, + req Request, actions []velero.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter VolumeSnapshotterGetter, @@ -189,7 +191,7 @@ func (kr *kubernetesRestorer) Restore( // Nothing Selector, i.e. a selector that matches nothing. We want // a selector that matches everything. This can be accomplished by // passing a non-nil empty LabelSelector. - ls := restore.Spec.LabelSelector + ls := req.Restore.Spec.LabelSelector if ls == nil { ls = &metav1.LabelSelector{} } @@ -200,16 +202,16 @@ func (kr *kubernetesRestorer) Restore( } // get resource includes-excludes - resourceIncludesExcludes := getResourceIncludesExcludes(kr.discoveryHelper, restore.Spec.IncludedResources, restore.Spec.ExcludedResources) - prioritizedResources, err := prioritizeResources(kr.discoveryHelper, kr.resourcePriorities, resourceIncludesExcludes, log) + resourceIncludesExcludes := getResourceIncludesExcludes(kr.discoveryHelper, req.Restore.Spec.IncludedResources, req.Restore.Spec.ExcludedResources) + prioritizedResources, err := prioritizeResources(kr.discoveryHelper, kr.resourcePriorities, resourceIncludesExcludes, req.Log) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} } // get namespace includes-excludes namespaceIncludesExcludes := collections.NewIncludesExcludes(). - Includes(restore.Spec.IncludedNamespaces...). - Excludes(restore.Spec.ExcludedNamespaces...) + Includes(req.Restore.Spec.IncludedNamespaces...). + Excludes(req.Restore.Spec.ExcludedNamespaces...) resolvedActions, err := resolveActions(actions, kr.discoveryHelper) if err != nil { @@ -217,10 +219,10 @@ func (kr *kubernetesRestorer) Restore( } podVolumeTimeout := kr.resticTimeout - if val := restore.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" { + if val := req.Restore.Annotations[velerov1api.PodVolumeOperationTimeoutAnnotation]; val != "" { parsed, err := time.ParseDuration(val) if err != nil { - log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val) + req.Log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val) } else { podVolumeTimeout = parsed } @@ -231,31 +233,31 @@ func (kr *kubernetesRestorer) Restore( var resticRestorer restic.Restorer if kr.resticRestorerFactory != nil { - resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, restore) + resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, req.Restore) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} } } pvRestorer := &pvRestorer{ - logger: log, - backup: backup, - snapshotVolumes: backup.Spec.SnapshotVolumes, - restorePVs: restore.Spec.RestorePVs, - volumeSnapshots: volumeSnapshots, + logger: req.Log, + backup: req.Backup, + snapshotVolumes: req.Backup.Spec.SnapshotVolumes, + restorePVs: req.Restore.Spec.RestorePVs, + volumeSnapshots: req.VolumeSnapshots, volumeSnapshotterGetter: volumeSnapshotterGetter, snapshotLocationLister: snapshotLocationLister, } restoreCtx := &context{ - backup: backup, - backupReader: backupReader, - restore: restore, + backup: req.Backup, + backupReader: req.BackupReader, + restore: req.Restore, resourceIncludesExcludes: resourceIncludesExcludes, namespaceIncludesExcludes: namespaceIncludesExcludes, prioritizedResources: prioritizedResources, selector: selector, - log: log, + log: req.Log, dynamicFactory: kr.dynamicFactory, fileSystem: kr.fileSystem, namespaceClient: kr.namespaceClient, @@ -264,10 +266,11 @@ func (kr *kubernetesRestorer) Restore( resticRestorer: resticRestorer, pvsToProvision: sets.NewString(), pvRestorer: pvRestorer, - volumeSnapshots: volumeSnapshots, + volumeSnapshots: req.VolumeSnapshots, + podVolumeBackups: req.PodVolumeBackups, resourceTerminatingTimeout: kr.resourceTerminatingTimeout, extractor: &backupExtractor{ - log: log, + log: req.Log, fileSystem: kr.fileSystem, }, resourceClients: make(map[resourceClientKey]client.Dynamic), @@ -339,9 +342,9 @@ func resolveActions(actions []velero.RestoreItemAction, helper discovery.Helper) } type context struct { - backup *api.Backup + backup *velerov1api.Backup backupReader io.Reader - restore *api.Restore + restore *velerov1api.Restore restoreDir string resourceIncludesExcludes *collections.IncludesExcludes namespaceIncludesExcludes *collections.IncludesExcludes @@ -358,6 +361,7 @@ type context struct { pvsToProvision sets.String pvRestorer PVRestorer volumeSnapshots []*volume.Snapshot + podVolumeBackups []*velerov1api.PodVolumeBackup resourceTerminatingTimeout time.Duration extractor *backupExtractor resourceClients map[resourceClientKey]client.Dynamic @@ -391,7 +395,7 @@ func (ctx *context) restoreFromDir() (Result, Result) { warnings, errs := Result{}, Result{} // Make sure the top level "resources" dir exists: - resourcesDir := filepath.Join(ctx.restoreDir, api.ResourcesDir) + resourcesDir := filepath.Join(ctx.restoreDir, velerov1api.ResourcesDir) rde, err := ctx.fileSystem.DirExists(resourcesDir) if err != nil { addVeleroError(&errs, err) @@ -430,7 +434,7 @@ func (ctx *context) restoreFromDir() (Result, Result) { resourcePath := filepath.Join(resourcesDir, rscDir.Name()) - clusterSubDir := filepath.Join(resourcePath, api.ClusterScopedDir) + clusterSubDir := filepath.Join(resourcePath, velerov1api.ClusterScopedDir) clusterSubDirExists, err := ctx.fileSystem.DirExists(clusterSubDir) if err != nil { addVeleroError(&errs, err) @@ -443,7 +447,7 @@ func (ctx *context) restoreFromDir() (Result, Result) { continue } - nsSubDir := filepath.Join(resourcePath, api.NamespaceScopedDir) + nsSubDir := filepath.Join(resourcePath, velerov1api.NamespaceScopedDir) nsSubDirExists, err := ctx.fileSystem.DirExists(nsSubDir) if err != nil { addVeleroError(&errs, err) @@ -518,9 +522,9 @@ func (ctx *context) restoreFromDir() (Result, Result) { func getItemFilePath(rootDir, groupResource, namespace, name string) string { switch namespace { case "": - return filepath.Join(rootDir, api.ResourcesDir, groupResource, api.ClusterScopedDir, name+".json") + return filepath.Join(rootDir, velerov1api.ResourcesDir, groupResource, velerov1api.ClusterScopedDir, name+".json") default: - return filepath.Join(rootDir, api.ResourcesDir, groupResource, api.NamespaceScopedDir, namespace, name+".json") + return filepath.Join(rootDir, velerov1api.ResourcesDir, groupResource, velerov1api.NamespaceScopedDir, namespace, name+".json") } } @@ -1037,7 +1041,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc // We know the object from the cluster won't have the backup/restore name labels, so // copy them from the object we attempted to restore. labels := obj.GetLabels() - addRestoreLabels(fromCluster, labels[api.RestoreNameLabel], labels[api.BackupNameLabel]) + addRestoreLabels(fromCluster, labels[velerov1api.RestoreNameLabel], labels[velerov1api.BackupNameLabel]) if !equality.Semantic.DeepEqual(fromCluster, obj) { switch groupResource { @@ -1085,30 +1089,42 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc return warnings, errs } - if groupResource == kuberesource.Pods && len(restic.GetPodSnapshotAnnotations(obj)) > 0 { - if ctx.resticRestorer == nil { - ctx.log.Warn("No restic restorer, not restoring pod's volumes") - } else { - ctx.globalWaitGroup.GoErrorSlice(func() []error { - pod := new(v1.Pod) - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { - ctx.log.WithError(err).Error("error converting unstructured pod") - return []error{err} - } - - if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, originalNamespace, ctx.backup.Spec.StorageLocation, ctx.log); errs != nil { - ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") - return errs - } - - return nil - }) - } + if groupResource == kuberesource.Pods && len(restic.GetVolumeBackupsForPod(ctx.podVolumeBackups, obj)) > 0 { + restorePodVolumeBackups(ctx, createdObj, originalNamespace) } return warnings, errs } +// restorePodVolumeBackups restores the PodVolumeBackups for the given restored pod +func restorePodVolumeBackups(ctx *context, createdObj *unstructured.Unstructured, originalNamespace string) { + if ctx.resticRestorer == nil { + ctx.log.Warn("No restic restorer, not restoring pod's volumes") + } else { + ctx.globalWaitGroup.GoErrorSlice(func() []error { + pod := new(v1.Pod) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { + ctx.log.WithError(err).Error("error converting unstructured pod") + return []error{err} + } + + data := restic.RestoreData{ + Restore: ctx.restore, + Pod: pod, + PodVolumeBackups: ctx.podVolumeBackups, + SourceNamespace: originalNamespace, + BackupLocation: ctx.backup.Spec.StorageLocation, + } + if errs := ctx.resticRestorer.RestorePodVolumes(data); errs != nil { + ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") + return errs + } + + return nil + }) + } +} + func hasDeleteReclaimPolicy(obj map[string]interface{}) bool { policy, _, _ := unstructured.NestedString(obj, "spec", "persistentVolumeReclaimPolicy") return policy == string(v1.PersistentVolumeReclaimDelete) @@ -1147,8 +1163,8 @@ func addRestoreLabels(obj metav1.Object, restoreName, backupName string) { labels = make(map[string]string) } - labels[api.BackupNameLabel] = label.GetValidName(backupName) - labels[api.RestoreNameLabel] = label.GetValidName(restoreName) + labels[velerov1api.BackupNameLabel] = label.GetValidName(backupName) + labels[velerov1api.RestoreNameLabel] = label.GetValidName(restoreName) obj.SetLabels(labels) } diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index c70cc7ec5..737a24a82 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -507,12 +507,16 @@ func TestRestoreResourceFiltering(t *testing.T) { } require.NoError(t, h.restorer.discoveryHelper.Refresh()) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, nil, // actions nil, // snapshot location lister nil, // volume snapshotter getter @@ -566,12 +570,16 @@ func TestRestoreNamespaceMapping(t *testing.T) { } require.NoError(t, h.restorer.discoveryHelper.Refresh()) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, nil, // actions nil, // snapshot location lister nil, // volume snapshotter getter @@ -644,12 +652,16 @@ func TestRestoreResourcePriorities(t *testing.T) { } require.NoError(t, h.restorer.discoveryHelper.Refresh()) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, nil, // actions nil, // snapshot location lister nil, // volume snapshotter getter @@ -717,12 +729,16 @@ func TestInvalidTarballContents(t *testing.T) { } require.NoError(t, h.restorer.discoveryHelper.Refresh()) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, nil, // actions nil, // snapshot location lister nil, // volume snapshotter getter @@ -928,12 +944,16 @@ func TestRestoreItems(t *testing.T) { h.addItems(t, r) } + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, nil, // actions nil, // snapshot location lister nil, // volume snapshotter getter @@ -1118,12 +1138,16 @@ func TestRestoreActionsRunForCorrectItems(t *testing.T) { actions = append(actions, action) } + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, actions, nil, // snapshot location lister nil, // volume snapshotter getter @@ -1253,12 +1277,16 @@ func TestRestoreActionModifications(t *testing.T) { } } + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, tc.actions, nil, // snapshot location lister nil, // volume snapshotter getter @@ -1416,12 +1444,16 @@ func TestRestoreActionAdditionalItems(t *testing.T) { h.addItems(t, r) } + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - nil, // volume snapshots - tc.tarball, + data, tc.actions, nil, // snapshot location lister nil, // volume snapshotter getter @@ -1707,6 +1739,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshots []*volume.Snapshot volumeSnapshotLocations []*velerov1api.VolumeSnapshotLocation volumeSnapshotterGetter volumeSnapshotterGetter + podVolumeBackups []*velerov1api.PodVolumeBackup want []*test.APIResource }{ { @@ -1997,6 +2030,71 @@ func TestRestorePersistentVolumes(t *testing.T) { ), }, }, + + { + name: "include podvolumebackups, and when a PV with a reclaim policy of retain has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", + }, + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "default", + }, + Spec: velerov1api.VolumeSnapshotLocationSpec{ + Provider: "provider-1", + }, + }, + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + // the volume snapshotter fake is not configured with any snapshotID -> volumeID + // mappings as a way to verify that the snapshot is not restored, since if it were + // restored, we'd get an error of "snapshot not found". + "provider-1": &volumeSnapshotter{}, + }, + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").Result(), + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ), + }, + }, } for _, tc := range tests { @@ -2025,12 +2123,16 @@ func TestRestorePersistentVolumes(t *testing.T) { } } + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: tc.podVolumeBackups, + VolumeSnapshots: tc.volumeSnapshots, + BackupReader: tc.tarball, + } warnings, errs := h.restorer.Restore( - h.log, - tc.restore, - tc.backup, - tc.volumeSnapshots, - tc.tarball, + data, nil, // actions vslInformer.Lister(), tc.volumeSnapshotterGetter,