diff --git a/changelogs/unreleased/3866-alaypatel07 b/changelogs/unreleased/3866-alaypatel07 new file mode 100644 index 000000000..7480bc062 --- /dev/null +++ b/changelogs/unreleased/3866-alaypatel07 @@ -0,0 +1 @@ +skip backuping projected volume when using restic \ No newline at end of file diff --git a/pkg/restic/common.go b/pkg/restic/common.go index 5798f0218..5ce15e32e 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -183,6 +183,10 @@ func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) [ if pv.ConfigMap != nil { continue } + // don't backup volumes mounted as projected volumes, all data in those come from kube state. + if pv.Projected != nil { + continue + } // don't backup volumes that are included in the exclude list. if contains(volsToExclude, pv.Name) { continue diff --git a/pkg/restic/common_test.go b/pkg/restic/common_test.go index d08d1f076..b5e0f1ff9 100644 --- a/pkg/restic/common_test.go +++ b/pkg/restic/common_test.go @@ -507,6 +507,41 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { }, expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, + { + name: "should exclude projected volumes", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", + }, + }, + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + { + Name: "projected", + VolumeSource: corev1api.VolumeSource{ + Projected: &corev1api.ProjectedVolumeSource{ + Sources: []corev1api.VolumeProjection{{ + Secret: &corev1api.SecretProjection{ + LocalObjectReference: corev1api.LocalObjectReference{}, + Items: nil, + Optional: nil, + }, + DownwardAPI: nil, + ConfigMap: nil, + ServiceAccountToken: nil, + }}, + DefaultMode: nil, + }, + }, + }, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, } for _, tc := range testCases { diff --git a/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md b/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md index c136fa781..fd7d19b78 100644 --- a/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md +++ b/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md @@ -459,7 +459,7 @@ cqlsh:demodb> select * from emp; cqlsh:demodb> ``` -It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubenetes objects for the Cassandra application, as well as restored the database and table contents. +It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubernetes objects for the Cassandra application, as well as restored the database and table contents. ## Feedback and Participation diff --git a/test/e2e/kibishii_tests.go b/test/e2e/kibishii_tests.go index cb5b976e6..70bbcb702 100644 --- a/test/e2e/kibishii_tests.go +++ b/test/e2e/kibishii_tests.go @@ -161,7 +161,7 @@ func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespac } if err := client.clientGo.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil { - return errors.Wrapf(err, "Failed to cleanup %s wrokload namespace", kibishiiNamespace) + return errors.Wrapf(err, "Failed to cleanup %s workload namespace", kibishiiNamespace) } // wait for ns delete if err = waitForNamespaceDeletion(interval, timeout, client, kibishiiNamespace); err != nil {