From 14e1055a9acb5dfd671a634679b5d999ab3d5615 Mon Sep 17 00:00:00 2001 From: Xun Jiang Date: Fri, 9 May 2025 19:27:03 +0800 Subject: [PATCH] Support using image registry proxy in more cases. Signed-off-by: Xun Jiang --- test/e2e/backups/deletion.go | 13 +- test/e2e/backups/ttl.go | 14 +- test/e2e/basic/backup-volume-info/base.go | 2 +- test/e2e/basic/namespace-mapping.go | 14 +- test/e2e/basic/pvc-selected-node-changing.go | 3 +- test/e2e/basic/storage-class-changing.go | 2 +- test/e2e/bsl-mgmt/deletion.go | 14 +- test/e2e/e2e_suite_test.go | 3 + test/e2e/migration/migration.go | 2 +- .../parallel_files_download.go | 3 +- .../parallel_files_upload.go | 3 +- test/e2e/pv-backup/pv-backup-filter.go | 3 +- test/e2e/resource-filtering/base.go | 2 +- test/e2e/resource-filtering/exclude_label.go | 2 +- test/e2e/resource-filtering/label_selector.go | 2 +- .../resourcemodifiers/resource_modifiers.go | 2 +- .../e2e/resourcepolicies/resource_policies.go | 2 +- test/e2e/schedule/in_progress.go | 1 + test/e2e/schedule/ordered_resources.go | 2 +- test/e2e/upgrade/upgrade.go | 26 +- test/util/k8s/deployment.go | 46 +-- test/util/k8s/pod.go | 17 +- test/util/kibishii/kibishii_utils.go | 269 +++++++++++++++--- test/util/velero/install.go | 5 +- test/util/velero/velero_utils.go | 42 ++- 25 files changed, 387 insertions(+), 107 deletions(-) diff --git a/test/e2e/backups/deletion.go b/test/e2e/backups/deletion.go index e4bf69702..263567d33 100644 --- a/test/e2e/backups/deletion.go +++ b/test/e2e/backups/deletion.go @@ -115,8 +115,17 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc }() } - if err := KibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName, ns, - registryCredentialFile, veleroFeatures, kibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData); err != nil { + if err := KibishiiPrepareBeforeBackup( + oneHourTimeout, + client, + providerName, + ns, + registryCredentialFile, + veleroFeatures, + kibishiiDirectory, + DefaultKibishiiData, + veleroCfg.ImageRegistryProxy, + ); err != nil { return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", ns) } err := ObjectsShouldNotBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLPrefix, veleroCfg.BSLConfig, backupName, BackupObjectsPrefix, 1) diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index c2c2224e9..83df75eb0 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -100,9 +100,17 @@ func TTLTest() { }) By("Deploy sample workload of Kibishii", func() { - Expect(KibishiiPrepareBeforeBackup(ctx, client, veleroCfg.CloudProvider, - test.testNS, veleroCfg.RegistryCredentialFile, veleroCfg.Features, - veleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed()) + Expect(KibishiiPrepareBeforeBackup( + ctx, + client, + veleroCfg.CloudProvider, + test.testNS, + veleroCfg.RegistryCredentialFile, + veleroCfg.Features, + veleroCfg.KibishiiDirectory, + DefaultKibishiiData, + veleroCfg.ImageRegistryProxy, + )).To(Succeed()) }) var BackupCfg BackupConfig diff --git a/test/e2e/basic/backup-volume-info/base.go b/test/e2e/basic/backup-volume-info/base.go index 0f5cd144a..c3993a34b 100644 --- a/test/e2e/basic/backup-volume-info/base.go +++ b/test/e2e/basic/backup-volume-info/base.go @@ -121,7 +121,7 @@ func (v *BackupVolumeInfo) CreateResources() error { volumeName := fmt.Sprintf("volume-info-pv-%d", i) vols = append(vols, CreateVolumes(pvc.Name, []string{volumeName})...) } - deployment := NewDeployment(v.CaseBaseName, createNSName, 1, labels, nil).WithVolume(vols).Result() + deployment := NewDeployment(v.CaseBaseName, createNSName, 1, labels, v.VeleroCfg.ImageRegistryProxy).WithVolume(vols).Result() deployment, err := CreateDeployment(v.Client.ClientGo, createNSName, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", createNSName)) diff --git a/test/e2e/basic/namespace-mapping.go b/test/e2e/basic/namespace-mapping.go index a2ebc24c8..a2736b3f1 100644 --- a/test/e2e/basic/namespace-mapping.go +++ b/test/e2e/basic/namespace-mapping.go @@ -91,9 +91,17 @@ func (n *NamespaceMapping) CreateResources() error { Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) }) By("Deploy sample workload of Kibishii", func() { - Expect(KibishiiPrepareBeforeBackup(n.Ctx, n.Client, n.VeleroCfg.CloudProvider, - ns, n.VeleroCfg.RegistryCredentialFile, n.VeleroCfg.Features, - n.VeleroCfg.KibishiiDirectory, false, n.kibishiiData)).To(Succeed()) + Expect(KibishiiPrepareBeforeBackup( + n.Ctx, + n.Client, + n.VeleroCfg.CloudProvider, + ns, + n.VeleroCfg.RegistryCredentialFile, + n.VeleroCfg.Features, + n.VeleroCfg.KibishiiDirectory, + n.kibishiiData, + n.VeleroCfg.ImageRegistryProxy, + )).To(Succeed()) }) } return nil diff --git a/test/e2e/basic/pvc-selected-node-changing.go b/test/e2e/basic/pvc-selected-node-changing.go index e200929c1..c1008e9b1 100644 --- a/test/e2e/basic/pvc-selected-node-changing.go +++ b/test/e2e/basic/pvc-selected-node-changing.go @@ -75,7 +75,8 @@ func (p *PVCSelectedNodeChanging) CreateResources() error { p.oldNodeName = nodeName fmt.Printf("Create PVC on node %s\n", p.oldNodeName) pvcAnn := map[string]string{p.ann: nodeName} - _, err := CreatePod(p.Client, p.namespace, p.podName, StorageClassName, p.pvcName, []string{p.volume}, pvcAnn, nil) + _, err := CreatePod(p.Client, p.namespace, p.podName, StorageClassName, p.pvcName, []string{p.volume}, + pvcAnn, nil, p.VeleroCfg.ImageRegistryProxy) Expect(err).To(Succeed()) err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.podName}) Expect(err).To(Succeed()) diff --git a/test/e2e/basic/storage-class-changing.go b/test/e2e/basic/storage-class-changing.go index e2e5a0296..77f6c3b7c 100644 --- a/test/e2e/basic/storage-class-changing.go +++ b/test/e2e/basic/storage-class-changing.go @@ -82,7 +82,7 @@ func (s *StorageClasssChanging) CreateResources() error { Expect(err).To(Succeed()) vols := CreateVolumes(pvc.Name, []string{s.volume}) - deployment := NewDeployment(s.CaseBaseName, s.namespace, 1, label, nil).WithVolume(vols).Result() + deployment := NewDeployment(s.CaseBaseName, s.namespace, 1, label, s.VeleroCfg.ImageRegistryProxy).WithVolume(vols).Result() deployment, err = CreateDeployment(s.Client.ClientGo, s.namespace, deployment) Expect(err).To(Succeed()) s.deploymentName = deployment.Name diff --git a/test/e2e/bsl-mgmt/deletion.go b/test/e2e/bsl-mgmt/deletion.go index fd6660056..832f9a6fd 100644 --- a/test/e2e/bsl-mgmt/deletion.go +++ b/test/e2e/bsl-mgmt/deletion.go @@ -152,9 +152,17 @@ func BslDeletionTest(useVolumeSnapshots bool) { }) By("Deploy sample workload of Kibishii", func() { - Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *veleroCfg.ClientToInstallVelero, veleroCfg.CloudProvider, - bslDeletionTestNs, veleroCfg.RegistryCredentialFile, veleroCfg.Features, - veleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed()) + Expect(KibishiiPrepareBeforeBackup( + oneHourTimeout, + *veleroCfg.ClientToInstallVelero, + veleroCfg.CloudProvider, + bslDeletionTestNs, + veleroCfg.RegistryCredentialFile, + veleroCfg.Features, + veleroCfg.KibishiiDirectory, + DefaultKibishiiData, + veleroCfg.ImageRegistryProxy, + )).To(Succeed()) }) // Restic can not backup PV only, so pod need to be labeled also diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 911d0006a..4aa76eb32 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -55,6 +55,7 @@ import ( func init() { test.VeleroCfg.Options = install.Options{} + test.VeleroCfg.BackupRepoConfigMap = test.BackupRepositoryConfigName // Set to the default value flag.StringVar( &test.VeleroCfg.CloudProvider, "cloud-provider", @@ -699,6 +700,8 @@ func TestE2e(t *testing.T) { t.FailNow() } + veleroutil.UpdateImagesMatrixByProxy(test.VeleroCfg.ImageRegistryProxy) + RegisterFailHandler(Fail) testSuitePassed = RunSpecs(t, "E2e Suite") } diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index 82c794813..6c39f0734 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -195,8 +195,8 @@ func (m *migrationE2E) Backup() error { OriginVeleroCfg.RegistryCredentialFile, OriginVeleroCfg.Features, OriginVeleroCfg.KibishiiDirectory, - OriginVeleroCfg.UseVolumeSnapshots, &m.kibishiiData, + OriginVeleroCfg.ImageRegistryProxy, )).To(Succeed()) }) diff --git a/test/e2e/parallelfilesdownload/parallel_files_download.go b/test/e2e/parallelfilesdownload/parallel_files_download.go index a678c6c80..b11a9061d 100644 --- a/test/e2e/parallelfilesdownload/parallel_files_download.go +++ b/test/e2e/parallelfilesdownload/parallel_files_download.go @@ -95,7 +95,8 @@ func (p *ParallelFilesDownload) CreateResources() error { }) By(fmt.Sprintf("Create pod %s in namespace %s", p.pod, p.namespace), func() { - _, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, []string{p.volume}, nil, nil) + _, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, []string{p.volume}, + nil, nil, p.VeleroCfg.ImageRegistryProxy) Expect(err).To(Succeed()) err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.pod}) Expect(err).To(Succeed()) diff --git a/test/e2e/parallelfilesupload/parallel_files_upload.go b/test/e2e/parallelfilesupload/parallel_files_upload.go index 5478e3518..2721752bd 100644 --- a/test/e2e/parallelfilesupload/parallel_files_upload.go +++ b/test/e2e/parallelfilesupload/parallel_files_upload.go @@ -86,7 +86,8 @@ func (p *ParallelFilesUpload) CreateResources() error { }) By(fmt.Sprintf("Create pod %s in namespace %s", p.pod, p.namespace), func() { - _, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, []string{p.volume}, nil, nil) + _, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, + []string{p.volume}, nil, nil, p.VeleroCfg.ImageRegistryProxy) Expect(err).To(Succeed()) err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.pod}) Expect(err).To(Succeed()) diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index b58cb8a55..e71af4717 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -87,7 +87,8 @@ func (p *PVBackupFiltering) CreateResources() error { podName := fmt.Sprintf("pod-%d", i) pods = append(pods, podName) By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() { - pod, err := CreatePod(p.Client, ns, podName, StorageClassName, "", volumes, nil, nil) + pod, err := CreatePod(p.Client, ns, podName, StorageClassName, "", + volumes, nil, nil, p.VeleroCfg.ImageRegistryProxy) Expect(err).To(Succeed()) ann := map[string]string{ p.annotation: volumesToAnnotation, diff --git a/test/e2e/resource-filtering/base.go b/test/e2e/resource-filtering/base.go index 8cfce438d..19e8ba056 100644 --- a/test/e2e/resource-filtering/base.go +++ b/test/e2e/resource-filtering/base.go @@ -68,7 +68,7 @@ func (f *FilteringCase) CreateResources() error { } //Create deployment fmt.Printf("Creating deployment in namespaces ...%s\n", namespace) - deployment := NewDeployment(f.CaseBaseName, namespace, f.replica, f.labels, nil).Result() + deployment := NewDeployment(f.CaseBaseName, namespace, f.replica, f.labels, f.VeleroCfg.ImageRegistryProxy).Result() deployment, err := CreateDeployment(f.Client.ClientGo, namespace, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace)) diff --git a/test/e2e/resource-filtering/exclude_label.go b/test/e2e/resource-filtering/exclude_label.go index b90d63dd8..b2b2986af 100644 --- a/test/e2e/resource-filtering/exclude_label.go +++ b/test/e2e/resource-filtering/exclude_label.go @@ -88,7 +88,7 @@ func (e *ExcludeFromBackup) CreateResources() error { } //Create deployment: to be included fmt.Printf("Creating deployment in namespaces ...%s\n", namespace) - deployment := NewDeployment(e.CaseBaseName, namespace, e.replica, label2, nil).Result() + deployment := NewDeployment(e.CaseBaseName, namespace, e.replica, label2, e.VeleroCfg.ImageRegistryProxy).Result() deployment, err := CreateDeployment(e.Client.ClientGo, namespace, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace)) diff --git a/test/e2e/resource-filtering/label_selector.go b/test/e2e/resource-filtering/label_selector.go index 97e3d71ae..2d745a0fc 100644 --- a/test/e2e/resource-filtering/label_selector.go +++ b/test/e2e/resource-filtering/label_selector.go @@ -88,7 +88,7 @@ func (l *LabelSelector) CreateResources() error { //Create deployment fmt.Printf("Creating deployment in namespaces ...%s\n", namespace) - deployment := NewDeployment(l.CaseBaseName, namespace, l.replica, labels, nil).Result() + deployment := NewDeployment(l.CaseBaseName, namespace, l.replica, labels, l.VeleroCfg.ImageRegistryProxy).Result() deployment, err := CreateDeployment(l.Client.ClientGo, namespace, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace)) diff --git a/test/e2e/resourcemodifiers/resource_modifiers.go b/test/e2e/resourcemodifiers/resource_modifiers.go index e3bd9ea11..2f048698a 100644 --- a/test/e2e/resourcemodifiers/resource_modifiers.go +++ b/test/e2e/resourcemodifiers/resource_modifiers.go @@ -145,7 +145,7 @@ func (r *ResourceModifiersCase) Clean() error { } func (r *ResourceModifiersCase) createDeployment(namespace string) error { - deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"app": "test"}, nil).Result() + deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"app": "test"}, r.VeleroCfg.ImageRegistryProxy).Result() deployment, err := CreateDeployment(r.Client.ClientGo, namespace, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to create deloyment %s the namespace %q", deployment.Name, namespace)) diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index 8afc9d4c5..117a3ef1b 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -209,7 +209,7 @@ func (r *ResourcePoliciesCase) createPVC(index int, namespace string, volList [] } func (r *ResourcePoliciesCase) createDeploymentWithVolume(namespace string, volList []*corev1api.Volume) error { - deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"resource-policies": "resource-policies"}, nil).WithVolume(volList).Result() + deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"resource-policies": "resource-policies"}, r.VeleroCfg.ImageRegistryProxy).WithVolume(volList).Result() deployment, err := CreateDeployment(r.Client.ClientGo, namespace, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to create deloyment %s the namespace %q", deployment.Name, namespace)) diff --git a/test/e2e/schedule/in_progress.go b/test/e2e/schedule/in_progress.go index 3a148a8c1..65b024fb2 100644 --- a/test/e2e/schedule/in_progress.go +++ b/test/e2e/schedule/in_progress.go @@ -84,6 +84,7 @@ func (s *InProgressCase) CreateResources() error { []string{s.volume}, nil, s.podAnn, + s.VeleroCfg.ImageRegistryProxy, ) Expect(err).To(Succeed()) diff --git a/test/e2e/schedule/ordered_resources.go b/test/e2e/schedule/ordered_resources.go index 238a014c7..c562ea9d2 100644 --- a/test/e2e/schedule/ordered_resources.go +++ b/test/e2e/schedule/ordered_resources.go @@ -99,7 +99,7 @@ func (o *OrderedResources) CreateResources() error { //Create deployment deploymentName := fmt.Sprintf("deploy-%s", o.CaseBaseName) fmt.Printf("Creating deployment %s in %s namespaces ...\n", deploymentName, o.Namespace) - deployment := k8sutil.NewDeployment(deploymentName, o.Namespace, 1, label, nil).Result() + deployment := k8sutil.NewDeployment(deploymentName, o.Namespace, 1, label, o.VeleroCfg.ImageRegistryProxy).Result() _, err := k8sutil.CreateDeployment(o.Client.ClientGo, o.Namespace, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to create namespace %q with err %v", o.Namespace, err)) diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 4757fbbe9..5ad868547 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -126,6 +126,15 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC tmpCfgForOldVeleroInstall.UpgradeFromVeleroVersion = veleroCLI2Version.VeleroVersion tmpCfgForOldVeleroInstall.VeleroCLI = veleroCLI2Version.VeleroCLI + // CLI under version v1.14.x + if veleroCLI2Version.VeleroVersion < "v1.15" { + tmpCfgForOldVeleroInstall.BackupRepoConfigMap = "" + fmt.Printf( + "CLI version %s is lower than v1.15. Set BackupRepoConfigMap to empty, because it's not supported", + veleroCLI2Version.VeleroVersion, + ) + } + tmpCfgForOldVeleroInstall, err = SetImagesToDefaultValues( tmpCfgForOldVeleroInstall, veleroCLI2Version.VeleroVersion, @@ -157,9 +166,17 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC }) By("Deploy sample workload of Kibishii", func() { - Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *veleroCfg.ClientToInstallVelero, tmpCfg.CloudProvider, - upgradeNamespace, tmpCfg.RegistryCredentialFile, tmpCfg.Features, - tmpCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed()) + Expect(KibishiiPrepareBeforeBackup( + oneHourTimeout, + *veleroCfg.ClientToInstallVelero, + tmpCfg.CloudProvider, + upgradeNamespace, + tmpCfg.RegistryCredentialFile, + tmpCfg.Features, + tmpCfg.KibishiiDirectory, + DefaultKibishiiData, + tmpCfg.ImageRegistryProxy, + )).To(Succeed()) }) By(fmt.Sprintf("Backup namespace %s", upgradeNamespace), func() { @@ -239,6 +256,9 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC } }) + // Wait for 70s to make sure the backups are synced after Velero reinstall + time.Sleep(70 * time.Second) + By(fmt.Sprintf("Restore %s", upgradeNamespace), func() { Expect(VeleroRestore(oneHourTimeout, tmpCfg.VeleroCLI, tmpCfg.VeleroNamespace, restoreName, backupName, "")).To(Succeed(), func() string { diff --git a/test/util/k8s/deployment.go b/test/util/k8s/deployment.go index c41a0a393..fdc334033 100644 --- a/test/util/k8s/deployment.go +++ b/test/util/k8s/deployment.go @@ -18,6 +18,7 @@ package k8s import ( "fmt" + "path" "time" "golang.org/x/net/context" @@ -36,6 +37,7 @@ const ( PollInterval = 2 * time.Second PollTimeout = 15 * time.Minute DefaultContainerName = "container-busybox" + TestImage = "busybox:1.37.0" ) // DeploymentBuilder builds Deployment objects. @@ -48,29 +50,33 @@ func (d *DeploymentBuilder) Result() *appsv1api.Deployment { } // newDeployment returns a RollingUpdate Deployment with a fake container image -func NewDeployment(name, ns string, replicas int32, labels map[string]string, containers []corev1api.Container) *DeploymentBuilder { - if containers == nil { - containers = []corev1api.Container{ - { - Name: DefaultContainerName, - Image: "busybox:1.37.0", - Command: []string{"sleep", "1000000"}, - // Make pod obeys the restricted pod security standards. - SecurityContext: &corev1api.SecurityContext{ - AllowPrivilegeEscalation: boolptr.False(), - Capabilities: &corev1api.Capabilities{ - Drop: []corev1api.Capability{"ALL"}, - }, - RunAsNonRoot: boolptr.True(), - RunAsUser: func(i int64) *int64 { return &i }(65534), - RunAsGroup: func(i int64) *int64 { return &i }(65534), - SeccompProfile: &corev1api.SeccompProfile{ - Type: corev1api.SeccompProfileTypeRuntimeDefault, - }, +func NewDeployment(name, ns string, replicas int32, labels map[string]string, imageRegistryProxy string) *DeploymentBuilder { + imageAddress := TestImage + if imageRegistryProxy != "" { + imageAddress = path.Join(imageRegistryProxy, TestImage) + } + + containers := []corev1api.Container{ + { + Name: DefaultContainerName, + Image: imageAddress, + Command: []string{"sleep", "1000000"}, + // Make pod obeys the restricted pod security standards. + SecurityContext: &corev1api.SecurityContext{ + AllowPrivilegeEscalation: boolptr.False(), + Capabilities: &corev1api.Capabilities{ + Drop: []corev1api.Capability{"ALL"}, + }, + RunAsNonRoot: boolptr.True(), + RunAsUser: func(i int64) *int64 { return &i }(65534), + RunAsGroup: func(i int64) *int64 { return &i }(65534), + SeccompProfile: &corev1api.SeccompProfile{ + Type: corev1api.SeccompProfileTypeRuntimeDefault, }, }, - } + }, } + return &DeploymentBuilder{ &appsv1api.Deployment{ TypeMeta: metav1.TypeMeta{ diff --git a/test/util/k8s/pod.go b/test/util/k8s/pod.go index 2a9b9ef9a..06fa807e0 100644 --- a/test/util/k8s/pod.go +++ b/test/util/k8s/pod.go @@ -19,6 +19,7 @@ package k8s import ( "context" "fmt" + "path" "github.com/pkg/errors" corev1api "k8s.io/api/core/v1" @@ -27,10 +28,22 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/boolptr" ) -func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList []string, pvcAnn, ann map[string]string) (*corev1api.Pod, error) { +func CreatePod( + client TestClient, + ns, name, sc, pvcName string, + volumeNameList []string, + pvcAnn, ann map[string]string, + imageRegistryProxy string, +) (*corev1api.Pod, error) { if pvcName != "" && len(volumeNameList) != 1 { return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty") } + + imageAddress := TestImage + if imageRegistryProxy != "" { + imageAddress = path.Join(imageRegistryProxy, TestImage) + } + volumes := []corev1api.Volume{} for _, volume := range volumeNameList { var _pvcName string @@ -76,7 +89,7 @@ func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList [ Containers: []corev1api.Container{ { Name: name, - Image: "busybox:1.37.0", + Image: imageAddress, Command: []string{"sleep", "3600"}, VolumeMounts: vmList, // Make pod obeys the restricted pod security standards. diff --git a/test/util/kibishii/kibishii_utils.go b/test/util/kibishii/kibishii_utils.go index fb767cecd..7883d9b73 100644 --- a/test/util/kibishii/kibishii_utils.go +++ b/test/util/kibishii/kibishii_utils.go @@ -18,6 +18,8 @@ package kibishii import ( "fmt" + "html/template" + "os" "os/exec" "path" "strconv" @@ -27,7 +29,10 @@ import ( . "github.com/onsi/ginkgo/v2" "github.com/pkg/errors" "golang.org/x/net/context" + appsv1api "k8s.io/api/apps/v1" + corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/yaml" veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec" . "github.com/vmware-tanzu/velero/test" @@ -102,9 +107,17 @@ func RunKibishiiTests( } }() fmt.Printf("KibishiiPrepareBeforeBackup %s\n", time.Now().Format("2006-01-02 15:04:05")) - if err := KibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName, - kibishiiNamespace, registryCredentialFile, veleroFeatures, - kibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData); err != nil { + if err := KibishiiPrepareBeforeBackup( + oneHourTimeout, + client, + providerName, + kibishiiNamespace, + registryCredentialFile, + veleroFeatures, + kibishiiDirectory, + DefaultKibishiiData, + veleroCfg.ImageRegistryProxy, + ); err != nil { return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", kibishiiNamespace) } fmt.Printf("KibishiiPrepareBeforeBackup done %s\n", time.Now().Format("2006-01-02 15:04:05")) @@ -264,8 +277,15 @@ func RunKibishiiTests( return nil } -func installKibishii(ctx context.Context, namespace string, cloudPlatform, veleroFeatures, - kibishiiDirectory string, useVolumeSnapshots bool, workerReplicas int) error { +func installKibishii( + ctx context.Context, + namespace string, + cloudPlatform, + veleroFeatures, + kibishiiDirectory string, + workerReplicas int, + imageRegistryProxy string, +) error { if strings.EqualFold(cloudPlatform, Azure) && strings.EqualFold(veleroFeatures, FeatureCSI) { cloudPlatform = AzureCSI @@ -274,6 +294,29 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler strings.EqualFold(veleroFeatures, FeatureCSI) { cloudPlatform = AwsCSI } + + if strings.EqualFold(cloudPlatform, Vsphere) { + if strings.HasPrefix(kibishiiDirectory, "https://") { + return errors.New("vSphere needs to download the Kibishii repository first because it needs to inject some image patch file to work.") + } + + kibishiiImage := readBaseKibishiiImage(path.Join(kibishiiDirectory, "base", "kibishii.yaml")) + if err := generateKibishiiImagePatch( + path.Join(imageRegistryProxy, kibishiiImage), + path.Join(kibishiiDirectory, cloudPlatform, "worker-image-patch.yaml"), + ); err != nil { + return nil + } + + jumpPadImage := readBaseJumpPadImage(path.Join(kibishiiDirectory, "base", "jump-pad.yaml")) + if err := generateJumpPadPatch( + path.Join(imageRegistryProxy, jumpPadImage), + path.Join(kibishiiDirectory, cloudPlatform, "jump-pad-image-patch.yaml"), + ); err != nil { + return nil + } + } + // We use kustomize to generate YAML for Kibishii from the checked-in yaml directories kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k", path.Join(kibishiiDirectory, cloudPlatform), "--timeout=90s") @@ -313,16 +356,134 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler return err } +func readBaseKibishiiImage(kibishiiFilePath string) string { + bytes, err := os.ReadFile(kibishiiFilePath) + if err != nil { + return "" + } + + sts := &appsv1api.StatefulSet{} + if err := yaml.UnmarshalStrict(bytes, sts); err != nil { + return "" + } + + kibishiiImage := "" + if len(sts.Spec.Template.Spec.Containers) > 0 { + kibishiiImage = sts.Spec.Template.Spec.Containers[0].Image + } + + return kibishiiImage +} + +func readBaseJumpPadImage(jumpPadFilePath string) string { + bytes, err := os.ReadFile(jumpPadFilePath) + if err != nil { + return "" + } + + pod := &corev1api.Pod{} + if err := yaml.UnmarshalStrict(bytes, pod); err != nil { + return "" + } + + jumpPadImage := "" + if len(pod.Spec.Containers) > 0 { + jumpPadImage = pod.Spec.Containers[0].Image + } + + return jumpPadImage +} + +type patchImageData struct { + Image string +} + +func generateKibishiiImagePatch(kibishiiImage string, patchDirectory string) error { + patchString := ` +apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +kind: StatefulSet +metadata: + name: kibishii-deployment +spec: + template: + spec: + containers: + - name: kibishii + image: {{.Image}} +` + + file, err := os.OpenFile(patchDirectory, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + defer file.Close() + + if err != nil { + return err + } + + patchTemplate, err := template.New("imagePatch").Parse(patchString) + if err != nil { + return err + } + + if err := patchTemplate.Execute(file, patchImageData{Image: kibishiiImage}); err != nil { + return err + } + + return nil +} + +func generateJumpPadPatch(jumpPadImage string, patchDirectory string) error { + patchString := ` +apiVersion: v1 +kind: Pod +metadata: + name: jump-pad +spec: + containers: + - name: jump-pad + image: {{.Image}} +` + file, err := os.OpenFile(patchDirectory, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + defer file.Close() + + if err != nil { + return err + } + + patchTemplate, err := template.New("imagePatch").Parse(patchString) + if err != nil { + return err + } + + if err := patchTemplate.Execute(file, patchImageData{Image: jumpPadImage}); err != nil { + return err + } + + return nil +} + func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error { timeout := 30 * time.Minute interval := 1 * time.Second - err := wait.PollImmediate(interval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) { timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20) defer ctxCancel() - kibishiiGenerateCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--", - "/usr/local/bin/generate.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel), - strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength), - strconv.Itoa(kibishiiData.BlockSize), strconv.Itoa(kibishiiData.PassNum), strconv.Itoa(kibishiiData.ExpectedNodes)) + kibishiiGenerateCmd := exec.CommandContext( + timeout, + "kubectl", + "exec", + "-n", + namespace, + "jump-pad", + "--", + "/usr/local/bin/generate.sh", + strconv.Itoa(kibishiiData.Levels), + strconv.Itoa(kibishiiData.DirsPerLevel), + strconv.Itoa(kibishiiData.FilesPerLevel), + strconv.Itoa(kibishiiData.FileLength), + strconv.Itoa(kibishiiData.BlockSize), + strconv.Itoa(kibishiiData.PassNum), + strconv.Itoa(kibishiiData.ExpectedNodes), + ) fmt.Printf("kibishiiGenerateCmd cmd =%v\n", kibishiiGenerateCmd) stdout, stderr, err := veleroexec.RunCommand(kibishiiGenerateCmd) @@ -342,26 +503,44 @@ func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiD func verifyData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error { timeout := 10 * time.Minute interval := 5 * time.Second - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20) - defer ctxCancel() - kibishiiVerifyCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--", - "/usr/local/bin/verify.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel), - strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength), - strconv.Itoa(kibishiiData.BlockSize), strconv.Itoa(kibishiiData.PassNum), - strconv.Itoa(kibishiiData.ExpectedNodes)) - fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd) + err := wait.PollUntilContextTimeout( + ctx, + interval, + timeout, + true, + func(ctx context.Context) (bool, error) { + timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20) + defer ctxCancel() + kibishiiVerifyCmd := exec.CommandContext( + timeout, + "kubectl", + "exec", + "-n", + namespace, + "jump-pad", + "--", + "/usr/local/bin/verify.sh", + strconv.Itoa(kibishiiData.Levels), + strconv.Itoa(kibishiiData.DirsPerLevel), + strconv.Itoa(kibishiiData.FilesPerLevel), + strconv.Itoa(kibishiiData.FileLength), + strconv.Itoa(kibishiiData.BlockSize), + strconv.Itoa(kibishiiData.PassNum), + strconv.Itoa(kibishiiData.ExpectedNodes), + ) + fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd) - stdout, stderr, err := veleroexec.RunCommand(kibishiiVerifyCmd) - if strings.Contains(stderr, "Timeout occurred") { - return false, nil - } - if err != nil { - fmt.Printf("Kibishi verify stdout Timeout occurred: %s stderr: %s err: %s\n", stdout, stderr, err) - return false, nil - } - return true, nil - }) + stdout, stderr, err := veleroexec.RunCommand(kibishiiVerifyCmd) + if strings.Contains(stderr, "Timeout occurred") { + return false, nil + } + if err != nil { + fmt.Printf("Kibishi verify stdout Timeout occurred: %s stderr: %s err: %s\n", stdout, stderr, err) + return false, nil + } + return true, nil + }, + ) if err != nil { return errors.Wrapf(err, "Failed to verify kibishii data in namespace %s\n", namespace) @@ -371,7 +550,12 @@ func verifyData(ctx context.Context, namespace string, kibishiiData *KibishiiDat } func waitForKibishiiPods(ctx context.Context, client TestClient, kibishiiNamespace string) error { - return WaitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"}) + return WaitForPods( + ctx, + client, + kibishiiNamespace, + []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"}, + ) } func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace string, kibishiiData *KibishiiData) error { @@ -383,9 +567,17 @@ func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace stri return nil } -func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClient, - providerName, kibishiiNamespace, registryCredentialFile, veleroFeatures, - kibishiiDirectory string, useVolumeSnapshots bool, kibishiiData *KibishiiData) error { +func KibishiiPrepareBeforeBackup( + oneHourTimeout context.Context, + client TestClient, + providerName, + kibishiiNamespace, + registryCredentialFile, + veleroFeatures, + kibishiiDirectory string, + kibishiiData *KibishiiData, + imageRegistryProxy string, +) error { fmt.Printf("installKibishii %s\n", time.Now().Format("2006-01-02 15:04:05")) serviceAccountName := "default" @@ -399,8 +591,15 @@ func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClie return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, kibishiiNamespace) } - if err := installKibishii(oneHourTimeout, kibishiiNamespace, providerName, veleroFeatures, - kibishiiDirectory, useVolumeSnapshots, kibishiiData.ExpectedNodes); err != nil { + if err := installKibishii( + oneHourTimeout, + kibishiiNamespace, + providerName, + veleroFeatures, + kibishiiDirectory, + kibishiiData.ExpectedNodes, + imageRegistryProxy, + ); err != nil { return errors.Wrap(err, "Failed to install Kibishii workload") } // wait for kibishii pod startup diff --git a/test/util/velero/install.go b/test/util/velero/install.go index e2e44e1b1..7940b0fe8 100644 --- a/test/util/velero/install.go +++ b/test/util/velero/install.go @@ -390,7 +390,10 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options if options.ItemBlockWorkerCount > 1 { args = append(args, fmt.Sprintf("--item-block-worker-count=%d", options.ItemBlockWorkerCount)) } - args = append(args, fmt.Sprintf("--backup-repository-configmap=%s", test.BackupRepositoryConfigName)) + + if options.BackupRepoConfigMap != "" { + args = append(args, fmt.Sprintf("--backup-repository-configmap=%s", options.BackupRepoConfigMap)) + } if err := createVeleroResources(ctx, cli, namespace, args, options); err != nil { return err diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index 69c84b1c2..bc7f76645 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -61,7 +61,7 @@ var ImagesMatrix = map[string]map[string][]string{ "v1.13": { "aws": {"velero/velero-plugin-for-aws:v1.9.2"}, "azure": {"velero/velero-plugin-for-microsoft-azure:v1.9.2"}, - "vsphere": {"velero/velero-plugin-for-vsphere:v1.5.2"}, + "vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"}, "gcp": {"velero/velero-plugin-for-gcp:v1.9.2"}, "csi": {"velero/velero-plugin-for-csi:v0.7.1"}, "datamover": {"velero/velero-plugin-for-aws:v1.9.2"}, @@ -71,7 +71,7 @@ var ImagesMatrix = map[string]map[string][]string{ "v1.14": { "aws": {"velero/velero-plugin-for-aws:v1.10.1"}, "azure": {"velero/velero-plugin-for-microsoft-azure:v1.10.1"}, - "vsphere": {"velero/velero-plugin-for-vsphere:v1.5.2"}, + "vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"}, "gcp": {"velero/velero-plugin-for-gcp:v1.10.1"}, "datamover": {"velero/velero-plugin-for-aws:v1.10.1"}, "velero": {"velero/velero:v1.14.1"}, @@ -80,7 +80,7 @@ var ImagesMatrix = map[string]map[string][]string{ "v1.15": { "aws": {"velero/velero-plugin-for-aws:v1.11.0"}, "azure": {"velero/velero-plugin-for-microsoft-azure:v1.11.0"}, - "vsphere": {"velero/velero-plugin-for-vsphere:v1.5.2"}, + "vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"}, "gcp": {"velero/velero-plugin-for-gcp:v1.11.0"}, "datamover": {"velero/velero-plugin-for-aws:v1.11.0"}, "velero": {"velero/velero:v1.15.2"}, @@ -89,7 +89,7 @@ var ImagesMatrix = map[string]map[string][]string{ "v1.16": { "aws": {"velero/velero-plugin-for-aws:v1.12.0"}, "azure": {"velero/velero-plugin-for-microsoft-azure:v1.12.0"}, - "vsphere": {"velero/velero-plugin-for-vsphere:v1.5.2"}, + "vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"}, "gcp": {"velero/velero-plugin-for-gcp:v1.12.0"}, "datamover": {"velero/velero-plugin-for-aws:v1.12.0"}, "velero": {"velero/velero:v1.15.0"}, @@ -98,7 +98,7 @@ var ImagesMatrix = map[string]map[string][]string{ "main": { "aws": {"velero/velero-plugin-for-aws:main"}, "azure": {"velero/velero-plugin-for-microsoft-azure:main"}, - "vsphere": {"velero/velero-plugin-for-vsphere:v1.5.2"}, + "vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"}, "gcp": {"velero/velero-plugin-for-gcp:main"}, "datamover": {"velero/velero-plugin-for-aws:main"}, "velero": {"velero/velero:main"}, @@ -106,6 +106,17 @@ var ImagesMatrix = map[string]map[string][]string{ }, } +// UpdateImagesMatrixByProxy is used to append the proxy to the image lists. +func UpdateImagesMatrixByProxy(imageRegistryProxy string) { + if imageRegistryProxy != "" { + for i := range ImagesMatrix { + for j := range ImagesMatrix[i] { + ImagesMatrix[i][j][0] = path.Join(imageRegistryProxy, ImagesMatrix[i][j][0]) + } + } + } +} + func SetImagesToDefaultValues(config VeleroConfig, version string) (VeleroConfig, error) { fmt.Printf("Get the images for version %s\n", version) @@ -121,12 +132,6 @@ func SetImagesToDefaultValues(config VeleroConfig, version string) (VeleroConfig versionWithoutPatch) } - if config.ImageRegistryProxy != "" { - for index := range images { - images[index][0] = path.Join(config.ImageRegistryProxy, images[index][0]) - } - } - ret.VeleroImage = images[Velero][0] ret.RestoreHelperImage = images[VeleroRestoreHelper][0] @@ -157,7 +162,7 @@ func SetImagesToDefaultValues(config VeleroConfig, version string) (VeleroConfig return ret, nil } -func getPluginsByVersion(version string, cloudProvider string, needDataMoverPlugin bool, imageRegistryProxy string) ([]string, error) { +func getPluginsByVersion(version string, cloudProvider string, needDataMoverPlugin bool) ([]string, error) { var cloudMap map[string][]string arr := strings.Split(version, ".") if len(arr) >= 3 { @@ -172,12 +177,6 @@ func getPluginsByVersion(version string, cloudProvider string, needDataMoverPlug var plugins []string var ok bool - if imageRegistryProxy != "" { - for index := range cloudMap { - cloudMap[index][0] = path.Join(imageRegistryProxy, cloudMap[index][0]) - } - } - if slices.Contains(LocalCloudProviders, cloudProvider) { plugins, ok = cloudMap[AWS] if !ok { @@ -667,7 +666,6 @@ func GetPlugins(ctx context.Context, veleroCfg VeleroConfig, defaultBSL bool) ([ cloudProvider := veleroCfg.CloudProvider objectStoreProvider := veleroCfg.ObjectStoreProvider providerPlugins := veleroCfg.Plugins - imageRegistryProxy := veleroCfg.ImageRegistryProxy needDataMoverPlugin := false var plugins []string @@ -686,9 +684,9 @@ func GetPlugins(ctx context.Context, veleroCfg VeleroConfig, defaultBSL bool) ([ return []string{}, errors.New("AdditionalBSLProvider should be provided.") } - plugins, err = getPluginsByVersion(version, cloudProvider, false, imageRegistryProxy) + plugins, err = getPluginsByVersion(version, veleroCfg.AdditionalBSLProvider, false) if err != nil { - return nil, errors.WithMessagef(err, "Fail to get plugin by provider %s and version %s", cloudProvider, version) + return nil, errors.WithMessagef(err, "Fail to get plugin by provider %s and version %s", veleroCfg.AdditionalBSLProvider, version) } } else { plugins = append(plugins, veleroCfg.AddBSLPlugins) @@ -716,7 +714,7 @@ func GetPlugins(ctx context.Context, veleroCfg VeleroConfig, defaultBSL bool) ([ needDataMoverPlugin = true } - plugins, err = getPluginsByVersion(version, cloudProvider, needDataMoverPlugin, imageRegistryProxy) + plugins, err = getPluginsByVersion(version, cloudProvider, needDataMoverPlugin) if err != nil { return nil, errors.WithMessagef(err, "Fail to get plugin by provider %s and version %s", objectStoreProvider, version) }