diff --git a/changelogs/unreleased/1577-carlisia b/changelogs/unreleased/1577-carlisia new file mode 100644 index 000000000..89070b89d --- /dev/null +++ b/changelogs/unreleased/1577-carlisia @@ -0,0 +1 @@ +Store restic PodVolumeBackups in obj storage & use that as source of truth like regular backups. \ No newline at end of file diff --git a/pkg/backup/builder.go b/pkg/backup/backup_builder.go similarity index 96% rename from pkg/backup/builder.go rename to pkg/backup/backup_builder.go index b7984946e..293641983 100644 --- a/pkg/backup/builder.go +++ b/pkg/backup/backup_builder.go @@ -31,12 +31,12 @@ type Builder struct { // NewBuilder returns a Builder for a Backup with no namespace/name. func NewBuilder() *Builder { - return NewNamedBuilder("", "") + return NewNamedBackupBuilder("", "") } -// NewNamedBuilder returns a Builder for a Backup with the specified namespace +// NewNamedBackupBuilder returns a Builder for a Backup with the specified namespace // and name. -func NewNamedBuilder(namespace, name string) *Builder { +func NewNamedBackupBuilder(namespace, name string) *Builder { return &Builder{ backup: velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/backup/backup_new_test.go b/pkg/backup/backup_new_test.go index 0c5fe48cd..6fa893cf1 100644 --- a/pkg/backup/backup_new_test.go +++ b/pkg/backup/backup_new_test.go @@ -2101,7 +2101,7 @@ func newSnapshotLocation(ns, name, provider string) *velerov1.VolumeSnapshotLoca } func defaultBackup() *Builder { - return NewNamedBuilder(velerov1.DefaultNamespace, "backup-1") + return NewNamedBackupBuilder(velerov1.DefaultNamespace, "backup-1") } func toUnstructuredOrFail(t *testing.T, obj interface{}) map[string]interface{} { diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index b581841cc..859a8985d 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -33,6 +33,7 @@ import ( kubeerrs "k8s.io/apimachinery/pkg/util/errors" api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/discovery" "github.com/heptio/velero/pkg/kuberesource" @@ -217,15 +218,10 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim } if groupResource == kuberesource.Pods && pod != nil { - // this function will return partial results, so process volumeSnapshots + // this function will return partial results, so process podVolumeBackups // even if there are errors. - volumeSnapshots, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup) - - // annotate the pod with the successful volume snapshots - for volume, snapshot := range volumeSnapshots { - restic.SetPodSnapshotAnnotation(metadata, volume, snapshot) - } - + podVolumeBackups, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup) + ib.backupRequest.PodVolumeBackups = podVolumeBackups backupErrs = append(backupErrs, errs...) } @@ -269,9 +265,9 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim return nil } -// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a map of volume name -> snapshot ID +// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a list of PodVolumeBackups // for volumes that were successfully backed up, and a slice of any errors that were encountered. -func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) (map[string]string, []error) { +func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) ([]*velerov1api.PodVolumeBackup, []error) { if len(volumes) == 0 { return nil, nil } diff --git a/pkg/backup/item_backupper_test.go b/pkg/backup/item_backupper_test.go index 5900a5890..ba8349f20 100644 --- a/pkg/backup/item_backupper_test.go +++ b/pkg/backup/item_backupper_test.go @@ -28,14 +28,12 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/plugin/velero" - resticmocks "github.com/heptio/velero/pkg/restic/mocks" "github.com/heptio/velero/pkg/util/collections" velerotest "github.com/heptio/velero/pkg/util/test" ) @@ -102,10 +100,10 @@ func TestBackupItemNoSkips(t *testing.T) { w = &fakeTarWriter{} ) - backup.Backup = new(v1.Backup) + backup.Backup = new(velerov1api.Backup) backup.NamespaceIncludesExcludes = collections.NewIncludesExcludes() backup.ResourceIncludesExcludes = collections.NewIncludesExcludes() - backup.SnapshotLocations = []*v1.VolumeSnapshotLocation{ + backup.SnapshotLocations = []*velerov1api.VolumeSnapshotLocation{ newSnapshotLocation("velero", "default", "default"), } @@ -245,7 +243,7 @@ func TestBackupItemNoSkips(t *testing.T) { type addAnnotationAction struct{} -func (a *addAnnotationAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { +func (a *addAnnotationAction) Execute(item runtime.Unstructured, backup *velerov1api.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { // since item actions run out-of-proc, do a deep-copy here to simulate passing data // across a process boundary. copy := item.(*unstructured.Unstructured).DeepCopy() @@ -269,74 +267,6 @@ func (a *addAnnotationAction) AppliesTo() (velero.ResourceSelector, error) { panic("not implemented") } -func TestResticAnnotationsPersist(t *testing.T) { - var ( - w = &fakeTarWriter{} - obj = &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "myns", - "name": "bar", - "annotations": map[string]interface{}{ - "backup.velero.io/backup-volumes": "volume-1,volume-2", - }, - }, - }, - } - req = &Request{ - NamespaceIncludesExcludes: collections.NewIncludesExcludes(), - ResourceIncludesExcludes: collections.NewIncludesExcludes(), - ResolvedActions: []resolvedAction{ - { - BackupItemAction: &addAnnotationAction{}, - namespaceIncludesExcludes: collections.NewIncludesExcludes(), - resourceIncludesExcludes: collections.NewIncludesExcludes(), - selector: labels.Everything(), - }, - }, - } - resticBackupper = &resticmocks.Backupper{} - b = (&defaultItemBackupperFactory{}).newItemBackupper( - req, - make(map[itemKey]struct{}), - nil, - w, - &velerotest.FakeDynamicFactory{}, - velerotest.NewFakeDiscoveryHelper(true, nil), - resticBackupper, - newPVCSnapshotTracker(), - nil, - ).(*defaultItemBackupper) - ) - - resticBackupper. - On("BackupPodVolumes", mock.Anything, mock.Anything, mock.Anything). - Return(map[string]string{"volume-1": "snapshot-1", "volume-2": "snapshot-2"}, nil) - - // our expected backed-up object is the passed-in object, plus the annotation - // that the backup item action adds, plus the annotations that the restic - // backupper adds - expected := obj.DeepCopy() - annotations := expected.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations["foo"] = "bar" - annotations["snapshot.velero.io/volume-1"] = "snapshot-1" - annotations["snapshot.velero.io/volume-2"] = "snapshot-2" - expected.SetAnnotations(annotations) - - // method under test - require.NoError(t, b.backupItem(velerotest.NewLogger(), obj, schema.ParseGroupResource("pods"))) - - // get the actual backed-up item - require.Len(t, w.data, 1) - actual, err := velerotest.GetAsMap(string(w.data[0])) - require.NoError(t, err) - - assert.EqualValues(t, expected.Object, actual) -} - type fakeTarWriter struct { closeCalled bool headers []*tar.Header diff --git a/pkg/backup/pod_volume_backup_builder.go b/pkg/backup/pod_volume_backup_builder.go new file mode 100644 index 000000000..9ab2b47e2 --- /dev/null +++ b/pkg/backup/pod_volume_backup_builder.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +// PodVolumeBackupBuilder is a helper for concisely constructing PodVolumeBackup API objects. +type PodVolumeBackupBuilder struct { + podVolumeBackup velerov1api.PodVolumeBackup +} + +// NewPodVolumeBackupBuilder returns a PodVolumeBackupBuilder for a PodVolumeBackup with no namespace/name. +func NewPodVolumeBackupBuilder() *PodVolumeBackupBuilder { + return NewNamedPodVolumeBackupBuilder("", "") +} + +// NewNamedPodVolumeBackupBuilder returns a PodVolumeBackupBuilder for a Backup with the specified namespace +// and name. +func NewNamedPodVolumeBackupBuilder(namespace, name string) *PodVolumeBackupBuilder { + return &PodVolumeBackupBuilder{ + podVolumeBackup: velerov1api.PodVolumeBackup{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "PodVolumeBackup", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + }, + } +} + +// PodVolumeBackup returns the built PodVolumeBackup API object. +func (p *PodVolumeBackupBuilder) PodVolumeBackup() *velerov1api.PodVolumeBackup { + return &p.podVolumeBackup +} + +// Namespace sets the PodVolumeBackup's namespace. +func (p *PodVolumeBackupBuilder) Namespace(namespace string) *PodVolumeBackupBuilder { + p.podVolumeBackup.Namespace = namespace + return p +} + +// Name sets the PodVolumeBackup's name. +func (p *PodVolumeBackupBuilder) Name(name string) *PodVolumeBackupBuilder { + p.podVolumeBackup.Name = name + return p +} + +// Labels sets the PodVolumeBackup's labels. +func (p *PodVolumeBackupBuilder) Labels(vals ...string) *PodVolumeBackupBuilder { + if p.podVolumeBackup.Labels == nil { + p.podVolumeBackup.Labels = map[string]string{} + } + + // if we don't have an even number of values, e.g. a key and a value + // for each pair, add an empty-string value at the end to serve as + // the default value for the last key. + if len(vals)%2 != 0 { + vals = append(vals, "") + } + + for i := 0; i < len(vals); i += 2 { + p.podVolumeBackup.Labels[vals[i]] = vals[i+1] + } + return p +} + +// Phase sets the PodVolumeBackup's phase. +func (p *PodVolumeBackupBuilder) Phase(phase velerov1api.PodVolumeBackupPhase) *PodVolumeBackupBuilder { + p.podVolumeBackup.Status.Phase = phase + return p +} diff --git a/pkg/backup/request.go b/pkg/backup/request.go index d405ea606..495442b55 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -18,5 +18,6 @@ type Request struct { ResourceHooks []resourceHook ResolvedActions []resolvedAction - VolumeSnapshots []*volume.Snapshot + VolumeSnapshots []*volume.Snapshot + PodVolumeBackups []*velerov1api.PodVolumeBackup } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 7d98b0264..b9466e3a0 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -557,10 +557,12 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string backupSyncControllerRunInfo := func() controllerRunInfo { backupSyncContoller := controller.NewBackupSyncController( + s.veleroClient.VeleroV1(), s.veleroClient.VeleroV1(), s.veleroClient.VeleroV1(), s.sharedInformerFactory.Velero().V1().Backups(), s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().PodVolumeBackups(), s.config.backupSyncPeriod, s.namespace, s.config.defaultBackupLocation, diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index def1dac13..91247e59d 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -576,6 +576,17 @@ func persistBackup(backup *pkgbackup.Request, backupContents, backupLog *os.File errs = append(errs, errors.Wrap(err, "error closing gzip writer")) } + podVolumeBackups := new(bytes.Buffer) + gzw = gzip.NewWriter(podVolumeBackups) + defer gzw.Close() + + if err := json.NewEncoder(gzw).Encode(backup.PodVolumeBackups); err != nil { + errs = append(errs, errors.Wrap(err, "error encoding pod volume backups")) + } + if err := gzw.Close(); err != nil { + errs = append(errs, errors.Wrap(err, "error closing gzip writer")) + } + if len(errs) > 0 { // Don't upload the JSON files or backup tarball if encoding to json fails. backupJSON = nil @@ -583,7 +594,15 @@ func persistBackup(backup *pkgbackup.Request, backupContents, backupLog *os.File volumeSnapshots = nil } - if err := backupStore.PutBackup(backup.Name, backupJSON, backupContents, backupLog, volumeSnapshots); err != nil { + backupInfo := persistence.BackupInfo{ + Name: backup.Name, + Metadata: backupJSON, + Contents: backupContents, + Log: backupLog, + PodVolumeBackups: podVolumeBackups, + VolumeSnapshots: volumeSnapshots, + } + if err := backupStore.PutBackup(backupInfo); err != nil { errs = append(errs, err) } diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 801ff2e18..4494e2370 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -57,7 +57,7 @@ func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Requ } func defaultBackup() *pkgbackup.Builder { - return pkgbackup.NewNamedBuilder(velerov1api.DefaultNamespace, "backup-1") + return pkgbackup.NewNamedBackupBuilder(velerov1api.DefaultNamespace, "backup-1") } func TestProcessBackupNonProcessedItems(t *testing.T) { @@ -556,16 +556,18 @@ func TestProcessBackupCompletions(t *testing.T) { pluginManager.On("GetBackupItemActions").Return(nil, nil) pluginManager.On("CleanupClients").Return(nil) - backupper.On("Backup", mock.Anything, mock.Anything, mock.Anything, []velero.BackupItemAction(nil), pluginManager).Return(nil) - - // Ensure we have a CompletionTimestamp when uploading. - // Failures will display the bytes in buf. - completionTimestampIsPresent := func(buf *bytes.Buffer) bool { - return strings.Contains(buf.String(), `"completionTimestamp": "2006-01-02T22:04:05Z"`) - } backupStore.On("BackupExists", test.backupLocation.Spec.StorageType.ObjectStorage.Bucket, test.backup.Name).Return(test.backupExists, test.existenceCheckError) - backupStore.On("PutBackup", test.backup.Name, mock.MatchedBy(completionTimestampIsPresent), mock.Anything, mock.Anything, mock.Anything).Return(nil) + + // Ensure we have a CompletionTimestamp when uploading and that the backup name matches the backup in the object store. + // Failures will display the bytes in buf. + hasNameAndCompletionTimestamp := func(info persistence.BackupInfo) bool { + buf := new(bytes.Buffer) + buf.ReadFrom(info.Metadata) + return info.Name == test.backup.Name && + strings.Contains(buf.String(), `"completionTimestamp": "2006-01-02T22:04:05Z"`) + } + backupStore.On("PutBackup", mock.MatchedBy(hasNameAndCompletionTimestamp)).Return(nil) // add the test's backup to the informer/lister store require.NotNil(t, test.backup) diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index b2bdd1adf..a84a10e29 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -43,8 +43,10 @@ type backupSyncController struct { backupClient velerov1client.BackupsGetter backupLocationClient velerov1client.BackupStorageLocationsGetter + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter backupLister listers.BackupLister backupStorageLocationLister listers.BackupStorageLocationLister + podVolumeBackupLister listers.PodVolumeBackupLister namespace string defaultBackupLocation string newPluginManager func(logrus.FieldLogger) clientmgmt.Manager @@ -54,8 +56,10 @@ type backupSyncController struct { func NewBackupSyncController( backupClient velerov1client.BackupsGetter, backupLocationClient velerov1client.BackupStorageLocationsGetter, + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter, backupInformer informers.BackupInformer, backupStorageLocationInformer informers.BackupStorageLocationInformer, + podVolumeBackupInformer informers.PodVolumeBackupInformer, syncPeriod time.Duration, namespace string, defaultBackupLocation string, @@ -71,10 +75,12 @@ func NewBackupSyncController( genericController: newGenericController("backup-sync", logger), backupClient: backupClient, backupLocationClient: backupLocationClient, + podVolumeBackupClient: podVolumeBackupClient, namespace: namespace, defaultBackupLocation: defaultBackupLocation, backupLister: backupInformer.Lister(), backupStorageLocationLister: backupStorageLocationInformer.Lister(), + podVolumeBackupLister: podVolumeBackupInformer.Lister(), // use variables to refer to these functions so they can be // replaced with fakes for testing. @@ -159,7 +165,7 @@ func (c *backupSyncController) run() { backupStore, err := c.newBackupStore(location, pluginManager, log) if err != nil { - log.WithError(err).Error("Error getting backup store for location") + log.WithError(err).Error("Error getting backup store for this location") continue } @@ -167,7 +173,7 @@ func (c *backupSyncController) run() { if !ok { continue } - log.Infof("Syncing contents of backup store into cluster") + log.Info("Syncing contents of backup store into cluster") res, err := backupStore.ListBackups() if err != nil { @@ -179,26 +185,16 @@ func (c *backupSyncController) run() { for backupName := range backupStoreBackups { log = log.WithField("backup", backupName) - log.Debug("Checking backup store backup to see if it needs to be synced into the cluster") + log.Debug("Checking this backup to see if it needs to be synced into the cluster") // use the controller's namespace when getting the backup because that's where we // are syncing backups to, regardless of the namespace of the cloud backup. backup, err := c.backupClient.Backups(c.namespace).Get(backupName, metav1.GetOptions{}) if err == nil { log.Debug("Backup already exists in cluster") - - if backup.Spec.StorageLocation != "" { - continue - } - - // pre-v0.10 backups won't initially have a .spec.storageLocation so fill it in - log.Debug("Patching backup's .spec.storageLocation because it's missing") - if err := patchStorageLocation(backup, c.backupClient.Backups(c.namespace), location.Name); err != nil { - log.WithError(err).Error("Error patching backup's .spec.storageLocation") - } - continue } + if !kuberrs.IsNotFound(err) { log.WithError(errors.WithStack(err)).Error("Error getting backup from client, proceeding with sync into cluster") } @@ -220,8 +216,8 @@ func (c *backupSyncController) run() { backup.Labels = make(map[string]string) } backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation) - - _, err = c.backupClient.Backups(backup.Namespace).Create(backup) + // process the regular velero backup + backup, err = c.backupClient.Backups(backup.Namespace).Create(backup) switch { case err != nil && kuberrs.IsAlreadyExists(err): log.Debug("Backup already exists in cluster") @@ -232,6 +228,40 @@ func (c *backupSyncController) run() { default: log.Debug("Synced backup into cluster") } + + // process the pod volume backups from object store, if any + podVolumeBackups, err := backupStore.GetPodVolumeBackups(backupName) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error getting pod volumes for this backup from backup store") + continue + } + + for _, podVolumeBackup := range podVolumeBackups { + log = log.WithField("podVolumeBackup", podVolumeBackup.Name) + log.Debug("Checking this pod volume backup to see if it needs to be synced into the cluster") + + for _, or := range podVolumeBackup.ObjectMeta.OwnerReferences { + if or.Name == backup.Name { + or.UID = backup.UID + } + } + + if _, ok := podVolumeBackup.Labels[velerov1api.BackupUIDLabel]; ok { + podVolumeBackup.Labels[velerov1api.BackupUIDLabel] = string(backup.UID) + } + + _, err = c.podVolumeBackupClient.PodVolumeBackups(backup.Namespace).Create(podVolumeBackup) + switch { + case err != nil && kuberrs.IsAlreadyExists(err): + log.Debug("Pod volume backup already exists in cluster") + continue + case err != nil && !kuberrs.IsAlreadyExists(err): + log.WithError(errors.WithStack(err)).Error("Error syncing pod volume backup into cluster") + continue + default: + log.Debug("Synced pod volume backup into cluster") + } + } } c.deleteOrphanedBackups(location.Name, backupStoreBackups, log) @@ -280,9 +310,9 @@ func patchStorageLocation(backup *velerov1api.Backup, client velerov1client.Back return nil } -// deleteOrphanedBackups deletes backup objects from Kubernetes that have the specified location +// deleteOrphanedBackups deletes backup objects (CRDs) from Kubernetes that have the specified location // and a phase of Completed, but no corresponding backup in object storage. -func (c *backupSyncController) deleteOrphanedBackups(locationName string, cloudBackupNames sets.String, log logrus.FieldLogger) { +func (c *backupSyncController) deleteOrphanedBackups(locationName string, backupStoreBackups sets.String, log logrus.FieldLogger) { locationSelector := labels.Set(map[string]string{ velerov1api.StorageLocationLabel: label.GetValidName(locationName), }).AsSelector() @@ -298,7 +328,7 @@ func (c *backupSyncController) deleteOrphanedBackups(locationName string, cloudB for _, backup := range backups { log = log.WithField("backup", backup.Name) - if backup.Status.Phase != velerov1api.BackupPhaseCompleted || cloudBackupNames.Has(backup.Name) { + if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) { continue } diff --git a/pkg/controller/backup_sync_controller_test.go b/pkg/controller/backup_sync_controller_test.go index 9b6f1535a..3465f71d6 100644 --- a/pkg/controller/backup_sync_controller_test.go +++ b/pkg/controller/backup_sync_controller_test.go @@ -32,6 +32,7 @@ import ( core "k8s.io/client-go/testing" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + pkgbackup "github.com/heptio/velero/pkg/backup" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/label" @@ -42,6 +43,10 @@ import ( velerotest "github.com/heptio/velero/pkg/util/test" ) +func defaultPodVolumeBackup() *pkgbackup.PodVolumeBackupBuilder { + return pkgbackup.NewNamedPodVolumeBackupBuilder(velerov1api.DefaultNamespace, "pvb-1") +} + func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation { return []*velerov1api.BackupStorageLocation{ { @@ -109,13 +114,19 @@ func defaultLocationsListWithLongerLocationName(namespace string) []*velerov1api } func TestBackupSyncControllerRun(t *testing.T) { + type cloudBackupData struct { + backup *velerov1api.Backup + podVolumeBackups []*velerov1api.PodVolumeBackup + } + tests := []struct { - name string - namespace string - locations []*velerov1api.BackupStorageLocation - cloudBackups map[string][]*velerov1api.Backup - existingBackups []*velerov1api.Backup - longLocationNameEnabled bool + name string + namespace string + locations []*velerov1api.BackupStorageLocation + cloudBuckets map[string][]*cloudBackupData + existingBackups []*velerov1api.Backup + existingPodVolumeBackups []*velerov1api.PodVolumeBackup + longLocationNameEnabled bool }{ { name: "no cloud backups", @@ -124,13 +135,19 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "normal case", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), - defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + }, }, "bucket-2": { - defaultBackup().Namespace("ns-1").Name("backup-3").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-3").Backup(), + }, }, }, }, @@ -138,14 +155,22 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "all synced backups get created in Velero server's namespace", namespace: "velero", locations: defaultLocationsList("velero"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), - defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + }, }, "bucket-2": { - defaultBackup().Namespace("ns-2").Name("backup-3").Backup(), - defaultBackup().Namespace("velero").Name("backup-4").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-2").Name("backup-3").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("velero").Name("backup-4").Backup(), + }, }, }, }, @@ -153,14 +178,22 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "new backups get synced when some cloud backups already exist in the cluster", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), - defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + }, }, "bucket-2": { - defaultBackup().Namespace("ns-1").Name("backup-3").Backup(), - defaultBackup().Namespace("ns-1").Name("backup-4").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-3").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-4").Backup(), + }, }, }, existingBackups: []*velerov1api.Backup{ @@ -174,9 +207,11 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "existing backups without a StorageLocation get it filled in", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), + }, }, }, existingBackups: []*velerov1api.Backup{ @@ -189,13 +224,19 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "backup storage location names and labels get updated", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - defaultBackup().Namespace("ns-1").Name("backup-1").StorageLocation("foo").Labels(velerov1api.StorageLocationLabel, "foo").Backup(), - defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").StorageLocation("foo").Labels(velerov1api.StorageLocationLabel, "foo").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + }, }, "bucket-2": { - defaultBackup().Namespace("ns-1").Name("backup-3").StorageLocation("bar").Labels(velerov1api.StorageLocationLabel, "bar").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-3").StorageLocation("bar").Labels(velerov1api.StorageLocationLabel, "bar").Backup(), + }, }, }, }, @@ -204,16 +245,94 @@ func TestBackupSyncControllerRun(t *testing.T) { namespace: "ns-1", locations: defaultLocationsListWithLongerLocationName("ns-1"), longLocationNameEnabled: true, - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - defaultBackup().Namespace("ns-1").Name("backup-1").StorageLocation("foo").Labels(velerov1api.StorageLocationLabel, "foo").Backup(), - defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").StorageLocation("foo").Labels(velerov1api.StorageLocationLabel, "foo").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + }, }, "bucket-2": { - defaultBackup().Namespace("ns-1").Name("backup-3").StorageLocation("bar").Labels(velerov1api.StorageLocationLabel, "bar").Backup(), + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-3").StorageLocation("bar").Labels(velerov1api.StorageLocationLabel, "bar").Backup(), + }, }, }, }, + { + name: "all synced backups and pod volume backups get created in Velero server's namespace", + namespace: "ns-1", + locations: defaultLocationsList("ns-1"), + cloudBuckets: map[string][]*cloudBackupData{ + "bucket-1": { + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-1").PodVolumeBackup(), + }, + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-2").PodVolumeBackup(), + }, + }, + }, + "bucket-2": { + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-3").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-4").Backup(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-1").PodVolumeBackup(), + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-2").PodVolumeBackup(), + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-3").PodVolumeBackup(), + }, + }, + }, + }, + }, + { + name: "new pod volume backups get synched when some pod volume backups already exist in the cluster", + namespace: "ns-1", + locations: defaultLocationsList("ns-1"), + cloudBuckets: map[string][]*cloudBackupData{ + "bucket-1": { + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-1").Backup(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-1").PodVolumeBackup(), + }, + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-2").Backup(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-3").PodVolumeBackup(), + }, + }, + }, + "bucket-2": { + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-3").Backup(), + }, + &cloudBackupData{ + backup: defaultBackup().Namespace("ns-1").Name("backup-4").Backup(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-1").PodVolumeBackup(), + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-5").PodVolumeBackup(), + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-6").PodVolumeBackup(), + }, + }, + }, + }, + existingPodVolumeBackups: []*velerov1api.PodVolumeBackup{ + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-1").PodVolumeBackup(), + defaultPodVolumeBackup().Namespace("ns-1").Name("pvb-2").PodVolumeBackup(), + }, + }, } for _, test := range tests { @@ -226,10 +345,12 @@ func TestBackupSyncControllerRun(t *testing.T) { ) c := NewBackupSyncController( + client.VeleroV1(), client.VeleroV1(), client.VeleroV1(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), time.Duration(0), test.namespace, "", @@ -256,9 +377,10 @@ func TestBackupSyncControllerRun(t *testing.T) { backupStore.On("GetRevision").Return("foo", nil) var backupNames []string - for _, b := range test.cloudBackups[location.Spec.ObjectStorage.Bucket] { - backupNames = append(backupNames, b.Name) - backupStore.On("GetBackupMetadata", b.Name).Return(b, nil) + for _, bucket := range test.cloudBuckets[location.Spec.ObjectStorage.Bucket] { + backupNames = append(backupNames, bucket.backup.Name) + backupStore.On("GetBackupMetadata", bucket.backup.Name).Return(bucket.backup, nil) + backupStore.On("GetPodVolumeBackups", bucket.backup.Name).Return(bucket.podVolumeBackups, nil) } backupStore.On("ListBackups").Return(backupNames, nil) } @@ -269,11 +391,18 @@ func TestBackupSyncControllerRun(t *testing.T) { _, err := client.VeleroV1().Backups(test.namespace).Create(existingBackup) require.NoError(t, err) } + + for _, existingPodVolumeBackup := range test.existingPodVolumeBackups { + require.NoError(t, sharedInformers.Velero().V1().PodVolumeBackups().Informer().GetStore().Add(existingPodVolumeBackup)) + + _, err := client.VeleroV1().PodVolumeBackups(test.namespace).Create(existingPodVolumeBackup) + require.NoError(t, err) + } client.ClearActions() c.run() - for bucket, backups := range test.cloudBackups { + for bucket, backupDataSet := range test.cloudBuckets { // figure out which location this bucket is for; we need this for verification // purposes later var location *velerov1api.BackupStorageLocation @@ -285,14 +414,15 @@ func TestBackupSyncControllerRun(t *testing.T) { } require.NotNil(t, location) - for _, cloudBackup := range backups { - obj, err := client.VeleroV1().Backups(test.namespace).Get(cloudBackup.Name, metav1.GetOptions{}) + // process the cloud backups + for _, cloudBackupData := range backupDataSet { + obj, err := client.VeleroV1().Backups(test.namespace).Get(cloudBackupData.backup.Name, metav1.GetOptions{}) require.NoError(t, err) // did this cloud backup already exist in the cluster? var existing *velerov1api.Backup for _, obj := range test.existingBackups { - if obj.Name == cloudBackup.Name { + if obj.Name == cloudBackupData.backup.Name { existing = obj break } @@ -318,6 +448,28 @@ func TestBackupSyncControllerRun(t *testing.T) { assert.Equal(t, locationName, obj.Labels[velerov1api.StorageLocationLabel]) assert.Equal(t, true, len(obj.Labels[velerov1api.StorageLocationLabel]) <= validation.DNS1035LabelMaxLength) } + + // process the cloud pod volume backups for this backup, if any + for _, podVolumeBackup := range cloudBackupData.podVolumeBackups { + objPodVolumeBackup, err := client.VeleroV1().PodVolumeBackups(test.namespace).Get(podVolumeBackup.Name, metav1.GetOptions{}) + require.NoError(t, err) + + // did this cloud pod volume backup already exist in the cluster? + var existingPodVolumeBackup *velerov1api.PodVolumeBackup + for _, objPodVolumeBackup := range test.existingPodVolumeBackups { + if objPodVolumeBackup.Name == podVolumeBackup.Name { + existingPodVolumeBackup = objPodVolumeBackup + break + } + } + + if existingPodVolumeBackup != nil { + // if this cloud pod volume backup already exists in the cluster, make sure that what we get from the + // client is the existing backup, not the cloud one. + expected := existingPodVolumeBackup.DeepCopy() + assert.Equal(t, expected, objPodVolumeBackup) + } + } } } }) @@ -415,10 +567,12 @@ func TestDeleteOrphanedBackups(t *testing.T) { ) c := NewBackupSyncController( + client.VeleroV1(), client.VeleroV1(), client.VeleroV1(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), time.Duration(0), test.namespace, "", @@ -493,10 +647,12 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) { ) c := NewBackupSyncController( + client.VeleroV1(), client.VeleroV1(), client.VeleroV1(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), time.Duration(0), test.namespace, "", @@ -646,3 +802,13 @@ func numBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) { return len(existingK8SBackups.Items), nil } + +func numPodVolumeBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) { + t.Helper() + existingK8SPodvolumeBackups, err := c.VeleroV1().PodVolumeBackups(ns).List(metav1.ListOptions{}) + if err != nil { + return 0, err + } + + return len(existingK8SPodvolumeBackups.Items), nil +} diff --git a/pkg/persistence/mocks/backup_store.go b/pkg/persistence/mocks/backup_store.go index 340ed9386..b0eb0d6d4 100644 --- a/pkg/persistence/mocks/backup_store.go +++ b/pkg/persistence/mocks/backup_store.go @@ -1,9 +1,10 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. + package mocks import io "io" import mock "github.com/stretchr/testify/mock" - +import persistence "github.com/heptio/velero/pkg/persistence" import v1 "github.com/heptio/velero/pkg/apis/velero/v1" import volume "github.com/heptio/velero/pkg/volume" @@ -12,6 +13,27 @@ type BackupStore struct { mock.Mock } +// BackupExists provides a mock function with given fields: bucket, backupName +func (_m *BackupStore) BackupExists(bucket string, backupName string) (bool, error) { + ret := _m.Called(bucket, backupName) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, string) bool); ok { + r0 = rf(bucket, backupName) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(bucket, backupName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // DeleteBackup provides a mock function with given fields: name func (_m *BackupStore) DeleteBackup(name string) error { ret := _m.Called(name) @@ -63,27 +85,6 @@ func (_m *BackupStore) GetBackupContents(name string) (io.ReadCloser, error) { return r0, r1 } -// BackupExists provides a mock function with given fields: bucket, backupName -func (_m *BackupStore) BackupExists(bucket string, backupName string) (bool, error) { - ret := _m.Called(bucket, backupName) - - var r0 bool - if rf, ok := ret.Get(0).(func(string, string) bool); ok { - r0 = rf(bucket, backupName) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(bucket, backupName) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetBackupMetadata provides a mock function with given fields: name func (_m *BackupStore) GetBackupMetadata(name string) (*v1.Backup, error) { ret := _m.Called(name) @@ -151,18 +152,27 @@ func (_m *BackupStore) GetDownloadURL(target v1.DownloadTarget) (string, error) return r0, r1 } -// IsValid provides a mock function with given fields: -func (_m *BackupStore) IsValid() error { - ret := _m.Called() +// GetPodVolumeBackups provides a mock function with given fields: name +func (_m *BackupStore) GetPodVolumeBackups(name string) ([]*v1.PodVolumeBackup, error) { + ret := _m.Called(name) - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + var r0 []*v1.PodVolumeBackup + if rf, ok := ret.Get(0).(func(string) []*v1.PodVolumeBackup); ok { + r0 = rf(name) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*v1.PodVolumeBackup) + } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // GetRevision provides a mock function with given fields: @@ -186,6 +196,20 @@ func (_m *BackupStore) GetRevision() (string, error) { return r0, r1 } +// IsValid provides a mock function with given fields: +func (_m *BackupStore) IsValid() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ListBackups provides a mock function with given fields: func (_m *BackupStore) ListBackups() ([]string, error) { ret := _m.Called() @@ -209,13 +233,13 @@ func (_m *BackupStore) ListBackups() ([]string, error) { return r0, r1 } -// PutBackup provides a mock function with given fields: name, metadata, contents, log, volumeSnapshots -func (_m *BackupStore) PutBackup(name string, metadata io.Reader, contents io.Reader, log io.Reader, volumeSnapshots io.Reader) error { - ret := _m.Called(name, metadata, contents, log, volumeSnapshots) +// PutBackup provides a mock function with given fields: info +func (_m *BackupStore) PutBackup(info persistence.BackupInfo) error { + ret := _m.Called(info) var r0 error - if rf, ok := ret.Get(0).(func(string, io.Reader, io.Reader, io.Reader, io.Reader) error); ok { - r0 = rf(name, metadata, contents, log, volumeSnapshots) + if rf, ok := ret.Get(0).(func(persistence.BackupInfo) error); ok { + r0 = rf(info) } else { r0 = ret.Error(0) } diff --git a/pkg/persistence/object_store.go b/pkg/persistence/object_store.go index bf3bae50a..c9dea6860 100644 --- a/pkg/persistence/object_store.go +++ b/pkg/persistence/object_store.go @@ -35,6 +35,11 @@ import ( "github.com/heptio/velero/pkg/volume" ) +type BackupInfo struct { + Name string + Metadata, Contents, Log, PodVolumeBackups, VolumeSnapshots io.Reader +} + // BackupStore defines operations for creating, retrieving, and deleting // Velero backup and restore data in/from a persistent backup store. type BackupStore interface { @@ -43,9 +48,10 @@ type BackupStore interface { ListBackups() ([]string, error) - PutBackup(name string, metadata, contents, log, volumeSnapshots io.Reader) error + PutBackup(info BackupInfo) error GetBackupMetadata(name string) (*velerov1api.Backup, error) GetBackupVolumeSnapshots(name string) ([]*volume.Snapshot, error) + GetPodVolumeBackups(name string) ([]*velerov1api.PodVolumeBackup, error) GetBackupContents(name string) (io.ReadCloser, error) // BackupExists checks if the backup metadata file exists in object storage. @@ -166,44 +172,56 @@ func (s *objectBackupStore) ListBackups() ([]string, error) { return output, nil } -func (s *objectBackupStore) PutBackup(name string, metadata, contents, log, volumeSnapshots io.Reader) error { - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupLogKey(name), log); err != nil { +func (s *objectBackupStore) PutBackup(info BackupInfo) error { + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupLogKey(info.Name), info.Log); err != nil { // Uploading the log file is best-effort; if it fails, we log the error but it doesn't impact the // backup's status. - s.logger.WithError(err).WithField("backup", name).Error("Error uploading log file") + s.logger.WithError(err).WithField("backup", info.Name).Error("Error uploading log file") } - if metadata == nil { + if info.Metadata == nil { // If we don't have metadata, something failed, and there's no point in continuing. An object // storage bucket that is missing the metadata file can't be restored, nor can its logs be // viewed. return nil } - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupMetadataKey(name), metadata); err != nil { + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupMetadataKey(info.Name), info.Metadata); err != nil { // failure to upload metadata file is a hard-stop return err } - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupContentsKey(name), contents); err != nil { - deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(name)) + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupContentsKey(info.Name), info.Contents); err != nil { + deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(info.Name)) return kerrors.NewAggregate([]error{err, deleteErr}) } - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupVolumeSnapshotsKey(name), volumeSnapshots); err != nil { + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getPodVolumeBackupsKey(info.Name), info.PodVolumeBackups); err != nil { errs := []error{err} - deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupContentsKey(name)) + deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupContentsKey(info.Name)) errs = append(errs, deleteErr) - deleteErr = s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(name)) + deleteErr = s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(info.Name)) + errs = append(errs, deleteErr) + + return kerrors.NewAggregate(errs) + } + + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupVolumeSnapshotsKey(info.Name), info.VolumeSnapshots); err != nil { + errs := []error{err} + + deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupContentsKey(info.Name)) + errs = append(errs, deleteErr) + + deleteErr = s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(info.Name)) errs = append(errs, deleteErr) return kerrors.NewAggregate(errs) } if err := s.putRevision(); err != nil { - s.logger.WithField("backup", name).WithError(err).Warn("Error updating backup store revision") + s.logger.WithField("backup", info.Name).WithError(err).Warn("Error updating backup store revision") } return nil @@ -288,6 +306,40 @@ func (s *objectBackupStore) GetBackupVolumeSnapshots(name string) ([]*volume.Sna return volumeSnapshots, nil } +func (s *objectBackupStore) GetPodVolumeBackups(name string) ([]*velerov1api.PodVolumeBackup, error) { + key := s.layout.getPodVolumeBackupsKey(name) + + // if the podvolumebackups file doesn't exist, we don't want to return an error, since + // a legacy backup or a backup with no pod volumes would not have this file, so check for + // its existence before attempting to get its contents. + ok, err := keyExists(s.objectStore, s.bucket, s.layout.getBackupDir(name), key) + if err != nil { + return nil, errors.WithStack(err) + } + if !ok { + return nil, nil + } + + res, err := s.objectStore.GetObject(s.bucket, key) + if err != nil { + return nil, err + } + defer res.Close() + + gzr, err := gzip.NewReader(res) + if err != nil { + return nil, errors.WithStack(err) + } + defer gzr.Close() + + var podVolumeBackups []*velerov1api.PodVolumeBackup + if err := json.NewDecoder(gzr).Decode(&podVolumeBackups); err != nil { + return nil, errors.Wrap(err, "error decoding object data") + } + + return podVolumeBackups, nil +} + func (s *objectBackupStore) GetBackupContents(name string) (io.ReadCloser, error) { return s.objectStore.GetObject(s.bucket, s.layout.getBackupContentsKey(name)) } diff --git a/pkg/persistence/object_store_layout.go b/pkg/persistence/object_store_layout.go index 55dc77f6a..f032e0d28 100644 --- a/pkg/persistence/object_store_layout.go +++ b/pkg/persistence/object_store_layout.go @@ -83,6 +83,10 @@ func (l *ObjectStoreLayout) getBackupLogKey(backup string) string { return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-logs.gz", backup)) } +func (l *ObjectStoreLayout) getPodVolumeBackupsKey(backup string) string { + return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-podvolumebackups.json.gz", backup)) +} + func (l *ObjectStoreLayout) getBackupVolumeSnapshotsKey(backup string) string { return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-volumesnapshots.json.gz", backup)) } diff --git a/pkg/persistence/object_store_test.go b/pkg/persistence/object_store_test.go index a7c093743..5976b39f4 100644 --- a/pkg/persistence/object_store_test.go +++ b/pkg/persistence/object_store_test.go @@ -208,54 +208,60 @@ func TestListBackups(t *testing.T) { func TestPutBackup(t *testing.T) { tests := []struct { - name string - prefix string - metadata io.Reader - contents io.Reader - log io.Reader - snapshots io.Reader - expectedErr string - expectedKeys []string + name string + prefix string + metadata io.Reader + contents io.Reader + log io.Reader + podVolumeBackup io.Reader + snapshots io.Reader + expectedErr string + expectedKeys []string }{ { - name: "normal case", - metadata: newStringReadSeeker("metadata"), - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", + name: "normal case", + metadata: newStringReadSeeker("metadata"), + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + expectedErr: "", expectedKeys: []string{ "backups/backup-1/velero-backup.json", "backups/backup-1/backup-1.tar.gz", "backups/backup-1/backup-1-logs.gz", + "backups/backup-1/backup-1-podvolumebackups.json.gz", "backups/backup-1/backup-1-volumesnapshots.json.gz", "metadata/revision", }, }, { - name: "normal case with backup store prefix", - prefix: "prefix-1/", - metadata: newStringReadSeeker("metadata"), - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", + name: "normal case with backup store prefix", + prefix: "prefix-1/", + metadata: newStringReadSeeker("metadata"), + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + expectedErr: "", expectedKeys: []string{ "prefix-1/backups/backup-1/velero-backup.json", "prefix-1/backups/backup-1/backup-1.tar.gz", "prefix-1/backups/backup-1/backup-1-logs.gz", + "prefix-1/backups/backup-1/backup-1-podvolumebackups.json.gz", "prefix-1/backups/backup-1/backup-1-volumesnapshots.json.gz", "prefix-1/metadata/revision", }, }, { - name: "error on metadata upload does not upload data", - metadata: new(errorReader), - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "error readers return errors", - expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, + name: "error on metadata upload does not upload data", + metadata: new(errorReader), + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + expectedErr: "error readers return errors", + expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, { name: "error on data upload deletes metadata", @@ -267,27 +273,30 @@ func TestPutBackup(t *testing.T) { expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, { - name: "error on log upload is ok", - metadata: newStringReadSeeker("foo"), - contents: newStringReadSeeker("bar"), - log: new(errorReader), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", + name: "error on log upload is ok", + metadata: newStringReadSeeker("foo"), + contents: newStringReadSeeker("bar"), + log: new(errorReader), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + expectedErr: "", expectedKeys: []string{ "backups/backup-1/velero-backup.json", "backups/backup-1/backup-1.tar.gz", + "backups/backup-1/backup-1-podvolumebackups.json.gz", "backups/backup-1/backup-1-volumesnapshots.json.gz", "metadata/revision", }, }, { - name: "don't upload data when metadata is nil", - metadata: nil, - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", - expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, + name: "don't upload data when metadata is nil", + metadata: nil, + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + expectedErr: "", + expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, } @@ -295,7 +304,15 @@ func TestPutBackup(t *testing.T) { t.Run(tc.name, func(t *testing.T) { harness := newObjectBackupStoreTestHarness("foo", tc.prefix) - err := harness.PutBackup("backup-1", tc.metadata, tc.contents, tc.log, tc.snapshots) + backupInfo := BackupInfo{ + Name: "backup-1", + Metadata: tc.metadata, + Contents: tc.contents, + Log: tc.log, + PodVolumeBackups: tc.podVolumeBackup, + VolumeSnapshots: tc.snapshots, + } + err := harness.PutBackup(backupInfo) velerotest.AssertErrorMatches(t, tc.expectedErr, err) assert.Len(t, harness.objectStore.Data[harness.bucket], len(tc.expectedKeys)) diff --git a/pkg/restic/backupper.go b/pkg/restic/backupper.go index 8df0faa1d..985befeae 100644 --- a/pkg/restic/backupper.go +++ b/pkg/restic/backupper.go @@ -36,7 +36,7 @@ import ( // Backupper can execute restic backups of volumes in a pod. type Backupper interface { // BackupPodVolumes backs up all annotated volumes in a pod. - BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) + BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, []error) } type backupper struct { @@ -96,7 +96,7 @@ func resultsKey(ns, name string) string { return fmt.Sprintf("%s/%s", ns, name) } -func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) { +func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, []error) { // get volumes to backup from pod's annotations volumesToBackup := GetVolumesToBackup(pod) if len(volumesToBackup) == 0 { @@ -120,9 +120,9 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. b.resultsLock.Unlock() var ( - errs []error - volumeSnapshots = make(map[string]string) - podVolumes = make(map[string]corev1api.Volume) + errs []error + podVolumeBackups []*velerov1api.PodVolumeBackup + podVolumes = make(map[string]corev1api.Volume) ) // put the pod's volumes in a map for efficient lookup below @@ -130,6 +130,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. podVolumes[podVolume.Name] = podVolume } + var numVolumeSnapshots int for _, volumeName := range volumesToBackup { volume, ok := podVolumes[volumeName] if !ok { @@ -150,17 +151,15 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } volumeBackup := newPodVolumeBackup(backup, pod, volumeName, repo.Spec.ResticIdentifier) - - if err := errorOnly(b.repoManager.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(volumeBackup)); err != nil { + numVolumeSnapshots++ + if volumeBackup, err = b.repoManager.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(volumeBackup); err != nil { errs = append(errs, err) continue } - - volumeSnapshots[volumeName] = "" } ForEachVolume: - for i, count := 0, len(volumeSnapshots); i < count; i++ { + for i, count := 0, numVolumeSnapshots; i < count; i++ { select { case <-b.ctx.Done(): errs = append(errs, errors.New("timed out waiting for all PodVolumeBackups to complete")) @@ -169,13 +168,12 @@ ForEachVolume: switch res.Status.Phase { case velerov1api.PodVolumeBackupPhaseCompleted: if res.Status.SnapshotID == "" { // when the volume is empty there is no restic snapshot, so best to exclude it - delete(volumeSnapshots, res.Spec.Volume) break } - volumeSnapshots[res.Spec.Volume] = res.Status.SnapshotID + podVolumeBackups = append(podVolumeBackups, res) case velerov1api.PodVolumeBackupPhaseFailed: errs = append(errs, errors.Errorf("pod volume backup failed: %s", res.Status.Message)) - delete(volumeSnapshots, res.Spec.Volume) + podVolumeBackups = append(podVolumeBackups, res) } } } @@ -184,7 +182,7 @@ ForEachVolume: delete(b.results, resultsKey(pod.Namespace, pod.Name)) b.resultsLock.Unlock() - return volumeSnapshots, errs + return podVolumeBackups, errs } type pvcGetter interface { diff --git a/pkg/restic/common.go b/pkg/restic/common.go index 80ed99935..7b8971a5f 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -39,25 +39,15 @@ const ( InitContainer = "restic-wait" DefaultMaintenanceFrequency = 24 * time.Hour - podAnnotationPrefix = "snapshot.velero.io/" + // Deprecated. + podAnnotationPrefix = "snapshot.velero.io/" + volumesToBackupAnnotation = "backup.velero.io/backup-volumes" ) -// PodHasSnapshotAnnotation returns true if the object has an annotation -// indicating that there is a restic snapshot for a volume in this pod, -// or false otherwise. -func PodHasSnapshotAnnotation(obj metav1.Object) bool { - for key := range obj.GetAnnotations() { - if strings.HasPrefix(key, podAnnotationPrefix) { - return true - } - } - - return false -} - // GetPodSnapshotAnnotations returns a map, of volume name -> snapshot id, // of all restic snapshots for this pod. +// Deprecated: we will stop using pod annotations to record restic snapshot IDs after they're taken. func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string { var res map[string]string @@ -77,20 +67,6 @@ func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string { return res } -// SetPodSnapshotAnnotation adds an annotation to a pod to indicate that -// the specified volume has a restic snapshot with the provided id. -func SetPodSnapshotAnnotation(obj metav1.Object, volumeName, snapshotID string) { - annotations := obj.GetAnnotations() - - if annotations == nil { - annotations = make(map[string]string) - } - - annotations[podAnnotationPrefix+volumeName] = snapshotID - - obj.SetAnnotations(annotations) -} - // GetVolumesToBackup returns a list of volume names to backup for // the provided pod. func GetVolumesToBackup(obj metav1.Object) []string { diff --git a/pkg/restic/common_test.go b/pkg/restic/common_test.go index 1dc6cbc99..c0e5ada96 100644 --- a/pkg/restic/common_test.go +++ b/pkg/restic/common_test.go @@ -33,53 +33,6 @@ import ( velerotest "github.com/heptio/velero/pkg/util/test" ) -func TestPodHasSnapshotAnnotation(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - expected bool - }{ - { - name: "nil annotations", - annotations: nil, - expected: false, - }, - { - name: "empty annotations", - annotations: make(map[string]string), - expected: false, - }, - { - name: "non-empty map, no snapshot annotation", - annotations: map[string]string{"foo": "bar"}, - expected: false, - }, - { - name: "has snapshot annotation only, no suffix", - annotations: map[string]string{podAnnotationPrefix: "bar"}, - expected: true, - }, - { - name: "has snapshot annotation only, with suffix", - annotations: map[string]string{podAnnotationPrefix + "foo": "bar"}, - expected: true, - }, - { - name: "has snapshot annotation, with suffix", - annotations: map[string]string{"foo": "bar", podAnnotationPrefix + "foo": "bar"}, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pod := &corev1api.Pod{} - pod.Annotations = test.annotations - assert.Equal(t, test.expected, PodHasSnapshotAnnotation(pod)) - }) - } -} - func TestGetPodSnapshotAnnotations(t *testing.T) { tests := []struct { name string @@ -127,48 +80,6 @@ func TestGetPodSnapshotAnnotations(t *testing.T) { } } -func TestSetPodSnapshotAnnotation(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - volumeName string - snapshotID string - expected map[string]string - }{ - { - name: "set snapshot annotation on pod with no annotations", - annotations: nil, - volumeName: "foo", - snapshotID: "bar", - expected: map[string]string{podAnnotationPrefix + "foo": "bar"}, - }, - { - name: "set snapshot annotation on pod with existing annotations", - annotations: map[string]string{"existing": "annotation"}, - volumeName: "foo", - snapshotID: "bar", - expected: map[string]string{"existing": "annotation", podAnnotationPrefix + "foo": "bar"}, - }, - { - name: "snapshot annotation is overwritten if already exists", - annotations: map[string]string{podAnnotationPrefix + "foo": "existing"}, - volumeName: "foo", - snapshotID: "bar", - expected: map[string]string{podAnnotationPrefix + "foo": "bar"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pod := &corev1api.Pod{} - pod.Annotations = test.annotations - - SetPodSnapshotAnnotation(pod, test.volumeName, test.snapshotID) - assert.Equal(t, test.expected, pod.Annotations) - }) - } -} - func TestGetVolumesToBackup(t *testing.T) { tests := []struct { name string diff --git a/pkg/restic/mocks/backupper.go b/pkg/restic/mocks/backupper.go index 6065b3fb8..528b9473c 100644 --- a/pkg/restic/mocks/backupper.go +++ b/pkg/restic/mocks/backupper.go @@ -1,11 +1,14 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. + package mocks -import corev1 "k8s.io/api/core/v1" -import logrus "github.com/sirupsen/logrus" -import mock "github.com/stretchr/testify/mock" +import ( + logrus "github.com/sirupsen/logrus" + mock "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" -import v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) // Backupper is an autogenerated mock type for the Backupper type type Backupper struct { @@ -13,20 +16,20 @@ type Backupper struct { } // BackupPodVolumes provides a mock function with given fields: backup, pod, log -func (_m *Backupper) BackupPodVolumes(backup *v1.Backup, pod *corev1.Pod, log logrus.FieldLogger) (map[string]string, []error) { +func (_m *Backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1.Pod, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, []error) { ret := _m.Called(backup, pod, log) - var r0 map[string]string - if rf, ok := ret.Get(0).(func(*v1.Backup, *corev1.Pod, logrus.FieldLogger) map[string]string); ok { + var r0 []*velerov1api.PodVolumeBackup + if rf, ok := ret.Get(0).(func(*velerov1api.Backup, *corev1.Pod, logrus.FieldLogger) []*velerov1api.PodVolumeBackup); ok { r0 = rf(backup, pod, log) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) + r0 = ret.Get(0).([]*velerov1api.PodVolumeBackup) } } var r1 []error - if rf, ok := ret.Get(1).(func(*v1.Backup, *corev1.Pod, logrus.FieldLogger) []error); ok { + if rf, ok := ret.Get(1).(func(*velerov1api.Backup, *corev1.Pod, logrus.FieldLogger) []error); ok { r1 = rf(backup, pod, log) } else { if ret.Get(1) != nil { diff --git a/pkg/restore/pv_restorer_test.go b/pkg/restore/pv_restorer_test.go index f930052c3..6fb6cc006 100644 --- a/pkg/restore/pv_restorer_test.go +++ b/pkg/restore/pv_restorer_test.go @@ -36,7 +36,7 @@ import ( ) func defaultBackup() *backup.Builder { - return backup.NewNamedBuilder(api.DefaultNamespace, "backup-1") + return backup.NewNamedBackupBuilder(api.DefaultNamespace, "backup-1") } func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index afdcf066d..2d51a9a3b 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -820,7 +820,7 @@ func TestRestoreItems(t *testing.T) { restore: NewNamedBuilder(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-exactly-63-characters"). Backup("the-really-long-kube-service-name-that-is-exactly-63-characters"). Restore(), - backup: backup.NewNamedBuilder(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-exactly-63-characters").Backup(), + backup: backup.NewNamedBackupBuilder(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-exactly-63-characters").Backup(), tarball: newTarWriter(t). addItems("pods", test.NewPod("ns-1", "pod-1")). done(), @@ -839,7 +839,7 @@ func TestRestoreItems(t *testing.T) { restore: NewNamedBuilder(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-much-greater-than-63-characters"). Backup("the-really-long-kube-service-name-that-is-much-greater-than-63-characters"). Restore(), - backup: backup.NewNamedBuilder(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-much-greater-than-63-characters").Backup(), + backup: backup.NewNamedBackupBuilder(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-much-greater-than-63-characters").Backup(), tarball: newTarWriter(t). addItems("pods", test.NewPod("ns-1", "pod-1")). done(), diff --git a/site/docs/master/restic.md b/site/docs/master/restic.md index a2901cc06..25114dbe9 100644 --- a/site/docs/master/restic.md +++ b/site/docs/master/restic.md @@ -307,14 +307,12 @@ should be taken (`backup.velero.io/backup-volumes`) - finds the pod volume's subdirectory within the above volume - runs `restic backup` - updates the status of the custom resource to `Completed` or `Failed` -1. As each `PodVolumeBackup` finishes, the main Velero process captures its restic snapshot ID and adds it as an annotation -to the copy of the pod JSON that's stored in the Velero backup. This will be used for restores, as seen in the next section. +1. As each `PodVolumeBackup` finishes, the main Velero process adds it to the Velero backup in a file named `-podvolumebackups.json.gz`. This file gets uploaded to object storage alongside the backup tarball. It will be used for restores, as seen in the next section. ### Restore -1. The main Velero restore process checks each pod that it's restoring for annotations specifying a restic backup -exists for a volume in the pod (`snapshot.velero.io/`) -1. When found, Velero first ensures a restic repository exists for the pod's namespace, by: +1. The main Velero restore process checks each existing `PodVolumeBackup` custom resource in the cluster to backup from. +1. For each `PodVolumeBackup` found, Velero first ensures a restic repository exists for the pod's namespace, by: - checking if a `ResticRepository` custom resource already exists - if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it (note that in this case, the actual repository should already exist in object storage, so the Velero controller will simply @@ -343,4 +341,4 @@ on to running other init containers/the main containers. [3]: https://github.com/heptio/velero/releases/ [4]: https://kubernetes.io/docs/concepts/storage/volumes/#local [5]: http://restic.readthedocs.io/en/latest/100_references.html#terminology -[6]: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation +[6]: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation \ No newline at end of file