diff --git a/changelogs/unreleased/1297-skriss b/changelogs/unreleased/1297-skriss new file mode 100644 index 000000000..63a13ee54 --- /dev/null +++ b/changelogs/unreleased/1297-skriss @@ -0,0 +1 @@ +check for and exclude hostPath-based persistent volumes from restic backup diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index b196bc22a..58019aa03 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -489,6 +489,8 @@ func (s *server) initRestic() error { s.sharedInformerFactory.Velero().V1().ResticRepositories(), s.veleroClient.VeleroV1(), s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), + s.kubeClient.CoreV1(), + s.kubeClient.CoreV1(), s.logger, ) if err != nil { diff --git a/pkg/restic/backupper.go b/pkg/restic/backupper.go index 1952fa9a7..5c922d8ee 100644 --- a/pkg/restic/backupper.go +++ b/pkg/restic/backupper.go @@ -25,6 +25,7 @@ import ( "github.com/sirupsen/logrus" corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" @@ -41,6 +42,8 @@ type backupper struct { ctx context.Context repoManager *repositoryManager repoEnsurer *repositoryEnsurer + pvcClient corev1client.PersistentVolumeClaimsGetter + pvClient corev1client.PersistentVolumesGetter results map[string]chan *velerov1api.PodVolumeBackup resultsLock sync.Mutex @@ -51,12 +54,16 @@ func newBackupper( repoManager *repositoryManager, repoEnsurer *repositoryEnsurer, podVolumeBackupInformer cache.SharedIndexInformer, + pvcClient corev1client.PersistentVolumeClaimsGetter, + pvClient corev1client.PersistentVolumesGetter, log logrus.FieldLogger, ) *backupper { b := &backupper{ ctx: ctx, repoManager: repoManager, repoEnsurer: repoEnsurer, + pvcClient: pvcClient, + pvClient: pvClient, results: make(map[string]chan *velerov1api.PodVolumeBackup), } @@ -123,14 +130,20 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } for _, volumeName := range volumesToBackup { - if !volumeExists(podVolumes, volumeName) { + volume, ok := podVolumes[volumeName] + if !ok { log.Warnf("No volume named %s found in pod %s/%s, skipping", volumeName, pod.Namespace, pod.Name) continue } // hostPath volumes are not supported because they're not mounted into /var/lib/kubelet/pods, so our // daemonset pod has no way to access their data. - if isHostPathVolume(podVolumes, volumeName) { + isHostPath, err := isHostPathVolume(&volume, b.pvcClient.PersistentVolumeClaims(pod.Namespace), b.pvClient.PersistentVolumes()) + if err != nil { + errs = append(errs, errors.Wrap(err, "error checking if volume is a hostPath volume")) + continue + } + if isHostPath { log.Warnf("Volume %s in pod %s/%s is a hostPath volume which is not supported for restic backup, skipping", volumeName, pod.Namespace, pod.Name) continue } @@ -169,18 +182,40 @@ ForEachVolume: return volumeSnapshots, errs } -func volumeExists(podVolumes map[string]corev1api.Volume, volumeName string) bool { - _, found := podVolumes[volumeName] - return found +type pvcGetter interface { + Get(name string, opts metav1.GetOptions) (*corev1api.PersistentVolumeClaim, error) } -func isHostPathVolume(podVolumes map[string]corev1api.Volume, volumeName string) bool { - volume, found := podVolumes[volumeName] - if !found { - return false +type pvGetter interface { + Get(name string, opts metav1.GetOptions) (*corev1api.PersistentVolume, error) +} + +// isHostPathVolume returns true if the volume is either a hostPath pod volume or a persistent +// volume claim on a hostPath persistent volume, or false otherwise. +func isHostPathVolume(volume *corev1api.Volume, pvcGetter pvcGetter, pvGetter pvGetter) (bool, error) { + if volume.HostPath != nil { + return true, nil } - return volume.HostPath != nil + if volume.PersistentVolumeClaim == nil { + return false, nil + } + + pvc, err := pvcGetter.Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + if err != nil { + return false, errors.WithStack(err) + } + + if pvc.Spec.VolumeName == "" { + return false, nil + } + + pv, err := pvGetter.Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return false, errors.WithStack(err) + } + + return pv.Spec.HostPath != nil, nil } func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volumeName, repoIdentifier string) *velerov1api.PodVolumeBackup { diff --git a/pkg/restic/backupper_test.go b/pkg/restic/backupper_test.go index b91af9a5d..7338bf618 100644 --- a/pkg/restic/backupper_test.go +++ b/pkg/restic/backupper_test.go @@ -19,32 +19,140 @@ package restic import ( "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func TestVolumeExists(t *testing.T) { - podVolumes := map[string]corev1api.Volume{ - "foo": {}, - "bar": {}, - } - - assert.True(t, volumeExists(podVolumes, "foo")) - assert.True(t, volumeExists(podVolumes, "bar")) - assert.False(t, volumeExists(podVolumes, "non-existent volume")) -} - func TestIsHostPathVolume(t *testing.T) { - podVolumes := map[string]corev1api.Volume{ - "foo": { - VolumeSource: corev1api.VolumeSource{ - HostPath: &corev1api.HostPathVolumeSource{}, + // hostPath pod volume + vol := &corev1api.Volume{ + VolumeSource: corev1api.VolumeSource{ + HostPath: &corev1api.HostPathVolumeSource{}, + }, + } + isHostPath, err := isHostPathVolume(vol, nil, nil) + assert.Nil(t, err) + assert.True(t, isHostPath) + + // non-hostPath pod volume + vol = &corev1api.Volume{ + VolumeSource: corev1api.VolumeSource{ + EmptyDir: &corev1api.EmptyDirVolumeSource{}, + }, + } + isHostPath, err = isHostPathVolume(vol, nil, nil) + assert.Nil(t, err) + assert.False(t, isHostPath) + + // PVC that doesn't have a PV + vol = &corev1api.Volume{ + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-1", }, }, - "bar": {}, + } + pvcGetter := &fakePVCGetter{ + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "pvc-1", + }, + }, + } + isHostPath, err = isHostPathVolume(vol, pvcGetter, nil) + assert.Nil(t, err) + assert.False(t, isHostPath) + + // PVC that claims a non-hostPath PV + vol = &corev1api.Volume{ + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-1", + }, + }, + } + pvcGetter = &fakePVCGetter{ + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "pvc-1", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-1", + }, + }, + } + pvGetter := &fakePVGetter{ + pv: &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1", + }, + Spec: corev1api.PersistentVolumeSpec{}, + }, + } + isHostPath, err = isHostPathVolume(vol, pvcGetter, pvGetter) + assert.Nil(t, err) + assert.False(t, isHostPath) + + // PVC that claims a hostPath PV + vol = &corev1api.Volume{ + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-1", + }, + }, + } + pvcGetter = &fakePVCGetter{ + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "pvc-1", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-1", + }, + }, + } + pvGetter = &fakePVGetter{ + pv: &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1", + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + HostPath: &corev1api.HostPathVolumeSource{}, + }, + }, + }, + } + isHostPath, err = isHostPathVolume(vol, pvcGetter, pvGetter) + assert.Nil(t, err) + assert.True(t, isHostPath) +} + +type fakePVCGetter struct { + pvc *corev1api.PersistentVolumeClaim +} + +func (g *fakePVCGetter) Get(name string, opts metav1.GetOptions) (*corev1api.PersistentVolumeClaim, error) { + if g.pvc != nil { + return g.pvc, nil } - assert.True(t, isHostPathVolume(podVolumes, "foo")) - assert.False(t, isHostPathVolume(podVolumes, "bar")) - assert.False(t, isHostPathVolume(podVolumes, "non-existent volume")) + return nil, errors.New("item not found") +} + +type fakePVGetter struct { + pv *corev1api.PersistentVolume +} + +func (g *fakePVGetter) Get(name string, opts metav1.GetOptions) (*corev1api.PersistentVolume, error) { + if g.pv != nil { + return g.pv, nil + } + + return nil, errors.New("item not found") } diff --git a/pkg/restic/repository_manager.go b/pkg/restic/repository_manager.go index 14be08833..393c35947 100644 --- a/pkg/restic/repository_manager.go +++ b/pkg/restic/repository_manager.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" @@ -90,6 +91,8 @@ type repositoryManager struct { repoEnsurer *repositoryEnsurer fileSystem filesystem.Interface ctx context.Context + pvcClient corev1client.PersistentVolumeClaimsGetter + pvClient corev1client.PersistentVolumesGetter } // NewRepositoryManager constructs a RepositoryManager. @@ -101,6 +104,8 @@ func NewRepositoryManager( repoInformer velerov1informers.ResticRepositoryInformer, repoClient velerov1client.ResticRepositoriesGetter, backupLocationInformer velerov1informers.BackupStorageLocationInformer, + pvcClient corev1client.PersistentVolumeClaimsGetter, + pvClient corev1client.PersistentVolumesGetter, log logrus.FieldLogger, ) (RepositoryManager, error) { rm := &repositoryManager{ @@ -111,6 +116,8 @@ func NewRepositoryManager( repoInformerSynced: repoInformer.Informer().HasSynced, backupLocationLister: backupLocationInformer.Lister(), backupLocationInformerSynced: backupLocationInformer.Informer().HasSynced, + pvcClient: pvcClient, + pvClient: pvClient, log: log, ctx: ctx, @@ -137,7 +144,7 @@ func (rm *repositoryManager) NewBackupper(ctx context.Context, backup *velerov1a }, ) - b := newBackupper(ctx, rm, rm.repoEnsurer, informer, rm.log) + b := newBackupper(ctx, rm, rm.repoEnsurer, informer, rm.pvcClient, rm.pvClient, rm.log) go informer.Run(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, rm.repoInformerSynced) {