From 55a465a941bcba72b10b158ff84a740bda2a03a5 Mon Sep 17 00:00:00 2001 From: danfengl Date: Mon, 13 Nov 2023 12:47:38 +0000 Subject: [PATCH] Add E2E test for taking CSI snapshot to PV with retain reclaim policy Signed-off-by: danfengl --- test/e2e/backup/backup.go | 36 +++++++- test/e2e/basic/namespace-mapping.go | 2 +- test/e2e/e2e_suite_test.go | 4 + test/e2e/migration/migration.go | 7 +- test/e2e/pv-backup/pv-backup-filter.go | 4 +- .../e2e/resourcepolicies/resource_policies.go | 3 +- test/e2e/upgrade/upgrade.go | 3 +- test/util/csi/common.go | 7 +- test/util/k8s/common.go | 24 ++++-- test/util/k8s/persistentvolumes.go | 16 +++- test/util/kibishii/kibishii_utils.go | 82 +++++++++++++++++-- test/util/velero/velero_utils.go | 59 +++++++++++++ 12 files changed, 216 insertions(+), 31 deletions(-) diff --git a/test/e2e/backup/backup.go b/test/e2e/backup/backup.go index 52dc7ad8f..923781e25 100644 --- a/test/e2e/backup/backup.go +++ b/test/e2e/backup/backup.go @@ -31,15 +31,33 @@ import ( . "github.com/vmware-tanzu/velero/test/util/velero" ) +type BackupRestoreTestConfig struct { + useVolumeSnapshots bool + kibishiiPatchSubDir string + isRetainPVTest bool +} + func BackupRestoreWithSnapshots() { - BackupRestoreTest(true) + config := BackupRestoreTestConfig{true, "", false} + BackupRestoreTest(config) } func BackupRestoreWithRestic() { - BackupRestoreTest(false) + config := BackupRestoreTestConfig{false, "", false} + BackupRestoreTest(config) } -func BackupRestoreTest(useVolumeSnapshots bool) { +func BackupRestoreRetainedPVWithSnapshots() { + config := BackupRestoreTestConfig{true, "overlays/sc-reclaim-policy/", true} + BackupRestoreTest(config) +} + +func BackupRestoreRetainedPVWithRestic() { + config := BackupRestoreTestConfig{false, "overlays/sc-reclaim-policy/", true} + BackupRestoreTest(config) +} + +func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) { var ( backupName, restoreName, kibishiiNamespace string @@ -48,25 +66,34 @@ func BackupRestoreTest(useVolumeSnapshots bool) { veleroCfg VeleroConfig ) provideSnapshotVolumesParmInBackup = false + useVolumeSnapshots := backupRestoreTestConfig.useVolumeSnapshots BeforeEach(func() { veleroCfg = VeleroCfg + + veleroCfg.KibishiiDirectory = veleroCfg.KibishiiDirectory + backupRestoreTestConfig.kibishiiPatchSubDir veleroCfg.UseVolumeSnapshots = useVolumeSnapshots veleroCfg.UseNodeAgent = !useVolumeSnapshots if useVolumeSnapshots && veleroCfg.CloudProvider == "kind" { Skip("Volume snapshots not supported on kind") } + var err error flag.Parse() UUIDgen, err = uuid.NewRandom() kibishiiNamespace = "k-" + UUIDgen.String() Expect(err).To(Succeed()) + DeleteStorageClass(context.Background(), *veleroCfg.ClientToInstallVelero, KibishiiStorageClassName) }) AfterEach(func() { if !veleroCfg.Debug { By("Clean backups after test", func() { DeleteAllBackups(context.Background(), *veleroCfg.ClientToInstallVelero) + if backupRestoreTestConfig.isRetainPVTest { + CleanAllRetainedPV(context.Background(), *veleroCfg.ClientToInstallVelero) + } + DeleteStorageClass(context.Background(), *veleroCfg.ClientToInstallVelero, KibishiiStorageClassName) }) if veleroCfg.InstallVelero { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) @@ -106,6 +133,9 @@ func BackupRestoreTest(useVolumeSnapshots bool) { }) It("should successfully back up and restore to an additional BackupStorageLocation with unique credentials", func() { + if backupRestoreTestConfig.isRetainPVTest { + Skip("It's tested by 1st test case") + } if veleroCfg.AdditionalBSLProvider == "" { Skip("no additional BSL provider given, not running multiple BackupStorageLocation with unique credentials tests") } diff --git a/test/e2e/basic/namespace-mapping.go b/test/e2e/basic/namespace-mapping.go index ea2a8f53a..dbf98c1f9 100644 --- a/test/e2e/basic/namespace-mapping.go +++ b/test/e2e/basic/namespace-mapping.go @@ -102,7 +102,7 @@ func (n *NamespaceMapping) Verify() error { n.kibishiiData.Levels = len(*n.NSIncluded) + index By(fmt.Sprintf("Verify workload %s after restore ", ns), func() { Expect(KibishiiVerifyAfterRestore(n.Client, ns, - n.Ctx, n.kibishiiData)).To(Succeed(), "Fail to verify workload after restore") + n.Ctx, n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) } for _, ns := range *n.NSIncluded { diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index b4cb6b22a..7bf3a2e97 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -102,6 +102,10 @@ var _ = Describe("[Basic][Restic] Velero tests on cluster using the plugin provi var _ = Describe("[Basic][Snapshot] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreWithSnapshots) +var _ = Describe("[Basic][Snapshot][RetainPV] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreRetainedPVWithSnapshots) + +var _ = Describe("[Basic][Restic][RetainPV] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreRetainedPVWithRestic) + var _ = Describe("[Basic][ClusterResource] Backup/restore of cluster resources", ResourcesCheckTest) var _ = Describe("[Scale][LongTime] Backup/restore of 2500 namespaces", MultiNSBackupRestore) diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index a1a5e895c..da808ba92 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -273,15 +273,16 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) } By(fmt.Sprintf("Install Velero in cluster-B (%s) to restore workload", veleroCfg.StandbyCluster), func() { + //Ensure workload of "migrationNamespace" existed in cluster-A ns, err := GetNamespace(context.Background(), *veleroCfg.DefaultClient, migrationNamespace) Expect(ns.Name).To(Equal(migrationNamespace)) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("get namespace in cluster-B err: %v", err)) + //Ensure cluster-B is the target cluster Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyCluster)).To(Succeed()) _, err = GetNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace) Expect(err).To(HaveOccurred()) strings.Contains(fmt.Sprint(err), "namespaces \""+migrationNamespace+"\" not found") - fmt.Println(err) veleroCfg.ClientToInstallVelero = veleroCfg.StandbyClient @@ -335,7 +336,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) By(fmt.Sprintf("Verify workload %s after restore ", migrationNamespace), func() { Expect(KibishiiVerifyAfterRestore(*veleroCfg.StandbyClient, migrationNamespace, - oneHourTimeout, &KibishiiData)).To(Succeed(), "Fail to verify workload after restore") + oneHourTimeout, &KibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) // TODO: delete backup created by case self, not all diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index d8de42dd2..556dfeb70 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -180,7 +180,7 @@ func fileContent(namespace, podName, volume string) string { } func fileExist(ctx context.Context, namespace, podName, volume string) error { - c, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) + c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) if err != nil { return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ", FILE_NAME, volume, podName, namespace)) @@ -195,7 +195,7 @@ func fileExist(ctx context.Context, namespace, podName, volume string) error { } } func fileNotExist(ctx context.Context, namespace, podName, volume string) error { - _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) + _, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) if err != nil { return nil } else { diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index 6f98c5ebd..df96bc3d9 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -24,7 +24,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/pkg/errors" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -164,7 +163,7 @@ func (r *ResourcePoliciesCase) Verify() error { if vol.Name != volName { continue } - content, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) + content, _, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) if i%2 == 0 { Expect(err).To(HaveOccurred(), "Expected file not found") // File should not exist } else { diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 6fd4c40ed..c9e9af90b 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -29,7 +29,6 @@ import ( . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/util/k8s" . "github.com/vmware-tanzu/velero/test/util/kibishii" - . "github.com/vmware-tanzu/velero/test/util/providers" . "github.com/vmware-tanzu/velero/test/util/velero" ) @@ -256,7 +255,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() { Expect(KibishiiVerifyAfterRestore(*veleroCfg.ClientToInstallVelero, upgradeNamespace, - oneHourTimeout, DefaultKibishiiData)).To(Succeed(), "Fail to verify workload after restore") + oneHourTimeout, DefaultKibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) }) }) diff --git a/test/util/csi/common.go b/test/util/csi/common.go index e96e865b0..932646f0c 100644 --- a/test/util/csi/common.go +++ b/test/util/csi/common.go @@ -21,14 +21,12 @@ import ( "fmt" "strings" - "github.com/pkg/errors" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - . "github.com/vmware-tanzu/velero/test/util/k8s" ) @@ -128,6 +126,7 @@ func GetCsiSnapshotHandleV1(client TestClient, backupName string) ([]string, err } return snapshotHandleList, nil } + func GetVolumeSnapshotContentNameByPod(client TestClient, podName, namespace, backupName string) (string, error) { pvcList, err := GetPvcByPVCName(context.Background(), namespace, podName) if err != nil { diff --git a/test/util/k8s/common.go b/test/util/k8s/common.go index ed579cb77..da439f24c 100644 --- a/test/util/k8s/common.go +++ b/test/util/k8s/common.go @@ -104,7 +104,6 @@ func GetPvcByPVCName(ctx context.Context, namespace, pvcName string) ([]string, Args: []string{"{print $1}"}, } cmds = append(cmds, cmd) - return common.GetListByCmdPipes(ctx, cmds) } @@ -279,15 +278,30 @@ func CreateFileToPod(ctx context.Context, namespace, podName, containerName, vol fmt.Printf("Kubectl exec cmd =%v\n", cmd) return cmd.Run() } -func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, error) { +func FileExistInPV(ctx context.Context, namespace, podName, containerName, volume, filename string) (bool, error) { + stdout, stderr, err := ReadFileFromPodVolume(ctx, namespace, podName, containerName, volume, filename) + + output := fmt.Sprintf("%s:%s", stdout, stderr) + if strings.Contains(output, fmt.Sprintf("/%s/%s: No such file or directory", volume, filename)) { + return false, nil + } else { + if err == nil { + return true, nil + } else { + return false, errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s", + filename, volume, podName, namespace)) + } + } +} +func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, string, error) { arg := []string{"exec", "-n", namespace, "-c", containerName, podName, "--", "cat", fmt.Sprintf("/%s/%s", volume, filename)} cmd := exec.CommandContext(ctx, "kubectl", arg...) fmt.Printf("Kubectl exec cmd =%v\n", cmd) stdout, stderr, err := veleroexec.RunCommand(cmd) - fmt.Print(stdout) - fmt.Print(stderr) - return stdout, err + fmt.Printf("stdout: %s\n", stdout) + fmt.Printf("stderr: %s\n", stderr) + return stdout, stderr, err } func RunCommand(cmdName string, arg []string) string { diff --git a/test/util/k8s/persistentvolumes.go b/test/util/k8s/persistentvolumes.go index f4c800594..441c1bd10 100644 --- a/test/util/k8s/persistentvolumes.go +++ b/test/util/k8s/persistentvolumes.go @@ -22,10 +22,9 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/retry" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" ) func CreatePersistentVolume(client TestClient, name string) (*corev1.PersistentVolume, error) { @@ -93,3 +92,16 @@ func ClearClaimRefForFailedPVs(ctx context.Context, client TestClient) error { return nil } + +func GetAllPVNames(ctx context.Context, client TestClient) ([]string, error) { + var pvNameList []string + pvList, err := client.ClientGo.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to List PV") + } + + for _, pvName := range pvList.Items { + pvNameList = append(pvNameList, pvName.Name) + } + return pvNameList, nil +} diff --git a/test/util/kibishii/kibishii_utils.go b/test/util/kibishii/kibishii_utils.go index f9d2b00d8..de30dccf4 100644 --- a/test/util/kibishii/kibishii_utils.go +++ b/test/util/kibishii/kibishii_utils.go @@ -51,6 +51,7 @@ type KibishiiData struct { var DefaultKibishiiWorkerCounts = 2 var DefaultKibishiiData = &KibishiiData{2, 10, 10, 1024, 1024, 0, DefaultKibishiiWorkerCounts} +var KibishiiPodNameList = []string{"kibishii-deployment-0", "kibishii-deployment-1"} var KibishiiPVCNameList = []string{"kibishii-data-kibishii-deployment-0", "kibishii-data-kibishii-deployment-1"} var KibishiiStorageClassName = "kibishii-storage-class" @@ -107,6 +108,8 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc } fmt.Printf("VeleroBackupNamespace done %s\n", time.Now().Format("2006-01-02 15:04:05")) + + // Checkpoint for a successful backup if useVolumeSnapshots { if providerName == "vsphere" { // Wait for uploads started by the Velero Plugin for vSphere to complete @@ -165,11 +168,49 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc } } + // Modify PV data right after backup. If PV's reclaim policy is retain, PV will be restored with the origin resource config + fileName := "file-" + kibishiiNamespace + fileBaseContent := fileName + fmt.Printf("Re-poulate volume %s\n", time.Now().Format("2006-01-02 15:04:05")) + for _, pod := range KibishiiPodNameList { + // To ensure Kibishii verification result is accurate + ClearKibishiiData(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data") + + fileContent := fileBaseContent + pod + err := CreateFileToPod(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", + fileName, fileContent) + if err != nil { + return errors.Wrapf(err, "failed to create file %s", fileName) + } + } + fmt.Printf("Re-poulate volume done %s\n", time.Now().Format("2006-01-02 15:04:05")) + + pvList := []string{} + if strings.Contains(veleroCfg.KibishiiDirectory, "sc-reclaim-policy") { + // Get leftover PV list for PV cleanup + for _, pvc := range KibishiiPVCNameList { + pv, err := GetPvName(oneHourTimeout, client, pvc, kibishiiNamespace) + if err != nil { + errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) + } + pvList = append(pvList, pv) + } + } + fmt.Printf("Simulating a disaster by removing namespace %s %s\n", kibishiiNamespace, time.Now().Format("2006-01-02 15:04:05")) if err := DeleteNamespace(oneHourTimeout, client, kibishiiNamespace, true); err != nil { return errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) } + if strings.Contains(veleroCfg.KibishiiDirectory, "sc-reclaim-policy") { + // In scenario of CSI PV-retain-policy test, to restore PV of the backed up resource, we should make sure + // there are no PVs of the same name left, because in previous test step, PV's reclaim policy is retain, + // so PVs are not deleted although workload namespace is destroyed. + if err := DeletePVs(oneHourTimeout, *veleroCfg.ClientToInstallVelero, pvList); err != nil { + return errors.Wrapf(err, "failed to delete PVs %v", pvList) + } + } + // the snapshots of AWS may be still in pending status when do the restore, wait for a while // to avoid this https://github.com/vmware-tanzu/velero/issues/1799 // TODO remove this after https://github.com/vmware-tanzu/velero/issues/3533 is fixed @@ -191,10 +232,12 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc return errors.New(fmt.Sprintf("PVR count %d is not as expected %d", len(pvrs), pvCount)) } } + fmt.Printf("KibishiiVerifyAfterRestore %s\n", time.Now().Format("2006-01-02 15:04:05")) - if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData); err != nil { + if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData, fileName); err != nil { return errors.Wrapf(err, "Error verifying kibishii after restore") } + fmt.Printf("kibishii test completed successfully %s\n", time.Now().Format("2006-01-02 15:04:05")) return nil } @@ -309,6 +352,15 @@ func waitForKibishiiPods(ctx context.Context, client TestClient, kibishiiNamespa return WaitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"}) } +func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace string, kibishiiData *KibishiiData) error { + fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05")) + if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil { + return errors.Wrap(err, "Failed to generate data") + } + fmt.Printf("generateData done %s\n", time.Now().Format("2006-01-02 15:04:05")) + return nil +} + func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClient, providerName, kibishiiNamespace, registryCredentialFile, veleroFeatures, kibishiiDirectory string, useVolumeSnapshots bool, kibishiiData *KibishiiData) error { @@ -338,16 +390,12 @@ func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClie if kibishiiData == nil { kibishiiData = DefaultKibishiiData } - fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05")) - if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil { - return errors.Wrap(err, "Failed to generate data") - } - fmt.Printf("generateData done %s\n", time.Now().Format("2006-01-02 15:04:05")) + KibishiiGenerateData(oneHourTimeout, kibishiiNamespace, kibishiiData) return nil } func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, oneHourTimeout context.Context, - kibishiiData *KibishiiData) error { + kibishiiData *KibishiiData, incrementalFileName string) error { if kibishiiData == nil { kibishiiData = DefaultKibishiiData } @@ -357,6 +405,18 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one if err := waitForKibishiiPods(oneHourTimeout, client, kibishiiNamespace); err != nil { return errors.Wrapf(err, "Failed to wait for ready status of kibishii pods in %s", kibishiiNamespace) } + if incrementalFileName != "" { + for _, pod := range KibishiiPodNameList { + exist, err := FileExistInPV(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName) + if err != nil { + return errors.Wrapf(err, fmt.Sprintf("fail to get file %s", incrementalFileName)) + } + + if exist { + return errors.New("Unexpected incremental data exist") + } + } + } // TODO - check that namespace exists fmt.Printf("running kibishii verify\n") @@ -365,3 +425,11 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one } return nil } + +func ClearKibishiiData(ctx context.Context, namespace, podName, containerName, dir string) error { + arg := []string{"exec", "-n", namespace, "-c", containerName, podName, + "--", "/bin/sh", "-c", "rm -rf /" + dir + "/*"} + cmd := exec.CommandContext(ctx, "kubectl", arg...) + fmt.Printf("Kubectl exec cmd =%v\n", cmd) + return cmd.Run() +} diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index a106cf5b5..fd0d919e4 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -1561,3 +1561,62 @@ func InstallTestStorageClasses(path string) error { } return InstallStorageClass(ctx, tmpFile.Name()) } + +func GetPvName(ctx context.Context, client TestClient, pvcName, namespace string) (string, error) { + + pvcList, err := GetPvcByPVCName(context.Background(), namespace, pvcName) + if err != nil { + return "", err + } + + if len(pvcList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], pvcName, namespace)) + } + + pvList, err := GetPvByPvc(context.Background(), namespace, pvcList[0]) + if err != nil { + return "", err + } + if len(pvList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], pvcName, namespace)) + } + + return pvList[0], nil + +} +func DeletePVs(ctx context.Context, client TestClient, pvList []string) error { + for _, pv := range pvList { + args := []string{"delete", "pv", pv, "--timeout=0s"} + fmt.Println(args) + err := exec.CommandContext(ctx, "kubectl", args...).Run() + if err != nil { + return errors.New(fmt.Sprintf("Deleted PV %s ", pv)) + } + } + return nil +} + +func CleanAllRetainedPV(ctx context.Context, client TestClient) { + + pvNameList, err := GetAllPVNames(ctx, client) + if err != nil { + fmt.Println("fail to list PV") + } + for _, pv := range pvNameList { + args := []string{"patch", "pv", pv, "-p", "{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Delete\"}}"} + fmt.Println(args) + cmd := exec.CommandContext(ctx, "kubectl", args...) + stdout, errMsg, err := veleroexec.RunCommand(cmd) + if err != nil { + fmt.Printf("fail to patch PV %s reclaim policy to delete: stdout: %s, stderr: %s", pv, stdout, errMsg) + } + + args = []string{"delete", "pv", pv, "--timeout=60s"} + fmt.Println(args) + cmd = exec.CommandContext(ctx, "kubectl", args...) + stdout, errMsg, err = veleroexec.RunCommand(cmd) + if err != nil { + fmt.Printf("fail to delete PV %s reclaim policy to delete: stdout: %s, stderr: %s", pv, stdout, errMsg) + } + } +}