mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-05 04:55:22 +00:00
Merge pull request #3866 from alaypatel07/fix-projected-volume-for-restic
skip backuping projected volume when using restic Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
This commit is contained in:
committed by
Bridget McErlean
parent
119529c9a2
commit
0f576fb748
1
changelogs/unreleased/3866-alaypatel07
Normal file
1
changelogs/unreleased/3866-alaypatel07
Normal file
@@ -0,0 +1 @@
|
||||
skip backuping projected volume when using restic
|
||||
@@ -183,6 +183,10 @@ func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) [
|
||||
if pv.ConfigMap != nil {
|
||||
continue
|
||||
}
|
||||
// don't backup volumes mounted as projected volumes, all data in those come from kube state.
|
||||
if pv.Projected != nil {
|
||||
continue
|
||||
}
|
||||
// don't backup volumes that are included in the exclude list.
|
||||
if contains(volsToExclude, pv.Name) {
|
||||
continue
|
||||
|
||||
@@ -507,6 +507,41 @@ func TestGetPodVolumesUsingRestic(t *testing.T) {
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
},
|
||||
{
|
||||
name: "should exclude projected volumes",
|
||||
defaultVolumesToRestic: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
{
|
||||
Name: "projected",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
Projected: &corev1api.ProjectedVolumeSource{
|
||||
Sources: []corev1api.VolumeProjection{{
|
||||
Secret: &corev1api.SecretProjection{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{},
|
||||
Items: nil,
|
||||
Optional: nil,
|
||||
},
|
||||
DownwardAPI: nil,
|
||||
ConfigMap: nil,
|
||||
ServiceAccountToken: nil,
|
||||
}},
|
||||
DefaultMode: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
@@ -459,7 +459,7 @@ cqlsh:demodb> select * from emp;
|
||||
cqlsh:demodb>
|
||||
```
|
||||
|
||||
It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubenetes objects for the Cassandra application, as well as restored the database and table contents.
|
||||
It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubernetes objects for the Cassandra application, as well as restored the database and table contents.
|
||||
|
||||
## Feedback and Participation
|
||||
|
||||
|
||||
@@ -161,7 +161,7 @@ func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespac
|
||||
}
|
||||
|
||||
if err := client.clientGo.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
|
||||
return errors.Wrapf(err, "Failed to cleanup %s wrokload namespace", kibishiiNamespace)
|
||||
return errors.Wrapf(err, "Failed to cleanup %s workload namespace", kibishiiNamespace)
|
||||
}
|
||||
// wait for ns delete
|
||||
if err = waitForNamespaceDeletion(interval, timeout, client, kibishiiNamespace); err != nil {
|
||||
|
||||
Reference in New Issue
Block a user