Support Windows E2E.

* Please notice only Kibishii workload support Windows test,
because the other work loads use busybox image, and not support Windows.
* Refactor CreateFileToPod to support Windows.
* Add skip logic for migration test if the version is under 1.16.
* Add main in semver check.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
This commit is contained in:
Xun Jiang
2025-05-02 10:48:14 +08:00
parent 4c23f66338
commit a18fe55585
23 changed files with 379 additions and 107 deletions

View File

@@ -79,6 +79,7 @@ These configuration parameters are expected as values to the following command l
1. `--debug-velero-pod-restart`: A switch for debugging velero pod restart.
1. `--fail-fast`: A switch for for failing fast on meeting error.
1. `--has-vsphere-plugin`: A switch to indicate whether the Velero vSphere plugin is installed for vSphere environment.
1. `--worker-os`: A switch to indicate the workload should be ran on windows or linux OS.
These configurations or parameters are used to generate install options for Velero for each test suite.
@@ -131,6 +132,7 @@ Below is a mapping between `make` variables to E2E configuration flags.
1. `DEBUG_VELERO_POD_RESTART`: `-debug-velero-pod-restart`. Optional.
1. `FAIL_FAST`: `--fail-fast`. Optional.
1. `HAS_VSPHERE_PLUGIN`: `--has-vsphere-plugin`. Optional.
1. `WORKER_OS`: `--worker-os`. Optional.

View File

@@ -140,7 +140,15 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
veleroCfg.ProvideSnapshotsVolumeParam = provideSnapshotVolumesParmInBackup
// Set DefaultVolumesToFsBackup to false since DefaultVolumesToFsBackup was set to true during installation
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, "", kibishiiNamespace, useVolumeSnapshots, false)).To(Succeed(),
Expect(RunKibishiiTests(
veleroCfg,
backupName,
restoreName,
"",
kibishiiNamespace,
useVolumeSnapshots,
false,
)).To(Succeed(),
"Failed to successfully backup and restore Kibishii namespace")
})
@@ -212,7 +220,17 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
}
veleroCfg.ProvideSnapshotsVolumeParam = !provideSnapshotVolumesParmInBackup
workloadNS := kibishiiNamespace + bsl
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, bsl, workloadNS, useVolumeSnapshots, !useVolumeSnapshots)).To(Succeed(),
Expect(
RunKibishiiTests(
veleroCfg,
backupName,
restoreName,
bsl,
workloadNS,
useVolumeSnapshots,
!useVolumeSnapshots,
),
).To(Succeed(),
"Failed to successfully backup and restore Kibishii namespace using BSL %s", bsl)
}
})

View File

@@ -125,6 +125,7 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
kibishiiDirectory,
DefaultKibishiiData,
veleroCfg.ImageRegistryProxy,
veleroCfg.WorkerOS,
); err != nil {
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", ns)
}

View File

@@ -110,6 +110,7 @@ func TTLTest() {
veleroCfg.KibishiiDirectory,
DefaultKibishiiData,
veleroCfg.ImageRegistryProxy,
veleroCfg.WorkerOS,
)).To(Succeed())
})

View File

@@ -138,8 +138,16 @@ func (v *BackupVolumeInfo) CreateResources() error {
// Hitting issue https://github.com/vmware-tanzu/velero/issues/7388
// So populate data only to some of pods, leave other pods empty to verify empty PV datamover
if i%2 == 0 {
Expect(CreateFileToPod(v.Ctx, createNSName, pod.Name, DefaultContainerName, vols[i].Name,
fmt.Sprintf("file-%s", pod.Name), CreateFileContent(createNSName, pod.Name, vols[i].Name))).To(Succeed())
Expect(CreateFileToPod(
v.Ctx,
createNSName,
pod.Name,
DefaultContainerName,
vols[i].Name,
fmt.Sprintf("file-%s", pod.Name),
CreateFileContent(createNSName, pod.Name, vols[i].Name),
WorkerOSLinux,
)).To(Succeed())
}
}
}

View File

@@ -101,6 +101,7 @@ func (n *NamespaceMapping) CreateResources() error {
n.VeleroCfg.KibishiiDirectory,
n.kibishiiData,
n.VeleroCfg.ImageRegistryProxy,
n.VeleroCfg.WorkerOS,
)).To(Succeed())
})
}
@@ -111,8 +112,14 @@ func (n *NamespaceMapping) Verify() error {
for index, ns := range n.MappedNamespaceList {
n.kibishiiData.Levels = len(*n.NSIncluded) + index
By(fmt.Sprintf("Verify workload %s after restore ", ns), func() {
Expect(KibishiiVerifyAfterRestore(n.Client, ns,
n.Ctx, n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
Expect(KibishiiVerifyAfterRestore(
n.Client,
ns,
n.Ctx,
n.kibishiiData,
"",
n.VeleroCfg.WorkerOS,
)).To(Succeed(), "Fail to verify workload after restore")
})
}
for _, ns := range *n.NSIncluded {

View File

@@ -31,7 +31,7 @@ import (
type MultiNSBackup struct {
TestCase
IsScalTest bool
IsScaleTest bool
NSExcluded *[]string
TimeoutDuration time.Duration
}
@@ -43,7 +43,7 @@ func (m *MultiNSBackup) Init() error {
m.RestoreName = "restore-" + m.CaseBaseName
m.NSExcluded = &[]string{}
if m.IsScalTest {
if m.IsScaleTest {
m.NamespacesTotal = 2500
m.TimeoutDuration = time.Hour * 2
m.TestMsg = &TestMSG{

View File

@@ -39,7 +39,7 @@ import (
func GetResourcesCheckTestCases() []VeleroBackupRestoreTest {
return []VeleroBackupRestoreTest{
&NSAnnotationCase{},
&MultiNSBackup{IsScalTest: false},
&MultiNSBackup{IsScaleTest: false},
&RBACCase{},
}
}

View File

@@ -162,6 +162,7 @@ func BslDeletionTest(useVolumeSnapshots bool) {
veleroCfg.KibishiiDirectory,
DefaultKibishiiData,
veleroCfg.ImageRegistryProxy,
veleroCfg.WorkerOS,
)).To(Succeed())
})

View File

@@ -356,6 +356,12 @@ func init() {
"",
"The image registry proxy, e.g. when the DockerHub access limitation is reached, can use available proxy to replace. Default is nil.",
)
flag.StringVar(
&test.VeleroCfg.WorkerOS,
"worker-os",
"linux",
"test k8s worker node OS version, should be either linux or windows.",
)
}
// Add label [SkipVanillaZfs]:
@@ -621,12 +627,12 @@ var _ = Describe(
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptIn"),
Label("PVBackup", "OptIn", "FSB"),
OptInPVBackupTest,
)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptOut"),
Label("PVBackup", "OptOut", "FSB"),
OptOutPVBackupTest,
)

View File

@@ -23,9 +23,11 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"golang.org/x/mod/semver"
"github.com/vmware-tanzu/velero/test"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
util "github.com/vmware-tanzu/velero/test/util/csi"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
"github.com/vmware-tanzu/velero/test/util/kibishii"
@@ -160,6 +162,10 @@ func (m *migrationE2E) Backup() error {
version, err := veleroutil.GetVeleroVersion(m.Ctx, OriginVeleroCfg.VeleroCLI, true)
Expect(err).To(Succeed(), "Fail to get Velero version")
OriginVeleroCfg.VeleroVersion = version
if OriginVeleroCfg.WorkerOS == common.WorkerOSWindows &&
(version != "main" && semver.Compare(version, "v1.16") < 0) {
Skip(fmt.Sprintf("Velero CLI version %s doesn't support Windows migration test.", version))
}
if OriginVeleroCfg.SnapshotMoveData {
OriginVeleroCfg.UseNodeAgent = true
@@ -197,6 +203,7 @@ func (m *migrationE2E) Backup() error {
OriginVeleroCfg.KibishiiDirectory,
&m.kibishiiData,
OriginVeleroCfg.ImageRegistryProxy,
OriginVeleroCfg.WorkerOS,
)).To(Succeed())
})
@@ -401,6 +408,7 @@ func (m *migrationE2E) Verify() error {
m.Ctx,
&m.kibishiiData,
"",
m.VeleroCfg.WorkerOS,
)).To(Succeed(), "Fail to verify workload after restore")
})
@@ -413,56 +421,66 @@ func (m *migrationE2E) Clean() error {
})
By("Clean resource on standby cluster.", func() {
defer func() {
By("Switch to default KubeConfig context", func() {
k8sutil.KubectlConfigUseContext(
m.Ctx,
m.VeleroCfg.DefaultClusterContext,
)
})
}()
Expect(k8sutil.KubectlConfigUseContext(
m.Ctx, m.VeleroCfg.StandbyClusterContext)).To(Succeed())
m.VeleroCfg.ClientToInstallVelero = m.VeleroCfg.StandbyClient
m.VeleroCfg.ClusterToInstallVelero = m.VeleroCfg.StandbyClusterName
By("Delete StorageClasses created by E2E")
Expect(
k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName,
),
).To(Succeed())
Expect(
k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName2,
),
).To(Succeed())
if err := k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName,
); err != nil {
fmt.Println("Fail to delete StorageClass1: ", err)
return
}
if err := k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName2,
); err != nil {
fmt.Println("Fail to delete StorageClass2: ", err)
return
}
if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
m.VeleroCfg.UseVolumeSnapshots {
By("Delete VolumeSnapshotClass created by E2E")
Expect(
k8sutil.KubectlDeleteByFile(
m.Ctx,
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
m.VeleroCfg.StandbyClusterCloudProvider),
),
).To(Succeed())
if err := k8sutil.KubectlDeleteByFile(
m.Ctx,
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
m.VeleroCfg.StandbyClusterCloudProvider),
); err != nil {
fmt.Println("Fail to delete VolumeSnapshotClass: ", err)
return
}
}
Expect(veleroutil.VeleroUninstall(m.Ctx, m.VeleroCfg)).To(Succeed())
if err := veleroutil.VeleroUninstall(m.Ctx, m.VeleroCfg); err != nil {
fmt.Println("Fail to uninstall Velero: ", err)
return
}
Expect(
k8sutil.DeleteNamespace(
m.Ctx,
*m.VeleroCfg.StandbyClient,
m.CaseBaseName,
true,
),
).To(Succeed())
})
By("Switch to default KubeConfig context", func() {
Expect(k8sutil.KubectlConfigUseContext(
if err := k8sutil.DeleteNamespace(
m.Ctx,
m.VeleroCfg.DefaultClusterContext,
)).To(Succeed())
*m.VeleroCfg.StandbyClient,
m.CaseBaseName,
true,
); err != nil {
fmt.Println("Fail to delete the workload namespace: ", err)
return
}
})
return nil

View File

@@ -115,8 +115,16 @@ func (p *PVBackupFiltering) CreateResources() error {
Expect(WaitForPods(p.Ctx, p.Client, ns, p.podsList[index])).To(Succeed())
for i, pod := range p.podsList[index] {
for j := range p.volumesList[i] {
Expect(CreateFileToPod(p.Ctx, ns, pod, pod, p.volumesList[i][j],
FILE_NAME, CreateFileContent(ns, pod, p.volumesList[i][j]))).To(Succeed())
Expect(CreateFileToPod(
p.Ctx,
ns,
pod,
pod,
p.volumesList[i][j],
FILE_NAME,
CreateFileContent(ns, pod, p.volumesList[i][j]),
WorkerOSLinux,
)).To(Succeed())
}
}
})
@@ -142,21 +150,45 @@ func (p *PVBackupFiltering) Verify() error {
if j%2 == 0 {
if p.annotation == OPT_IN_ANN {
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect")
Expect(fileExist(
p.Ctx,
ns,
p.podsList[k][i],
p.volumesList[i][j],
p.VeleroCfg.WorkerOS,
)).To(Succeed(), "File not exist as expect")
})
} else {
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect")
Expect(fileNotExist(
p.Ctx,
ns,
p.podsList[k][i],
p.volumesList[i][j],
p.VeleroCfg.WorkerOS,
)).To(Succeed(), "File exists, not as expect")
})
}
} else {
if p.annotation == OPT_OUT_ANN {
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect")
Expect(fileExist(
p.Ctx,
ns,
p.podsList[k][i],
p.volumesList[i][j],
p.VeleroCfg.WorkerOS,
)).To(Succeed(), "File not exist as expect")
})
} else {
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect")
Expect(fileNotExist(
p.Ctx,
ns,
p.podsList[k][i],
p.volumesList[i][j],
p.VeleroCfg.WorkerOS,
)).To(Succeed(), "File exists, not as expect")
})
}
}
@@ -168,8 +200,14 @@ func (p *PVBackupFiltering) Verify() error {
return nil
}
func fileExist(ctx context.Context, namespace, podName, volume string) error {
c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME)
func fileExist(
ctx context.Context,
namespace string,
podName string,
volume string,
workerOS string,
) error {
c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME, workerOS)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ",
FILE_NAME, volume, podName, namespace))
@@ -183,8 +221,14 @@ func fileExist(ctx context.Context, namespace, podName, volume string) error {
FILE_NAME, volume, podName, namespace))
}
}
func fileNotExist(ctx context.Context, namespace, podName, volume string) error {
_, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME)
func fileNotExist(
ctx context.Context,
namespace string,
podName string,
volume string,
workerOS string,
) error {
_, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME, workerOS)
if err != nil {
return nil
} else {

View File

@@ -28,6 +28,7 @@ import (
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -151,7 +152,15 @@ func (r *ResourcePoliciesCase) Verify() error {
if vol.Name != volName {
continue
}
content, _, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName)
content, _, err := ReadFileFromPodVolume(
r.Ctx,
ns,
pod.Name,
"container-busybox",
vol.Name,
FileName,
r.VeleroCfg.WorkerOS,
)
if i%2 == 0 {
Expect(err).To(HaveOccurred(), "Expected file not found") // File should not exist
} else {
@@ -231,7 +240,16 @@ func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) erro
if vol.Name != volName {
continue
}
err := CreateFileToPod(r.Ctx, namespace, pod.Name, "container-busybox", vol.Name, FileName, fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, pod.Name, vol.Name))
err := CreateFileToPod(
r.Ctx,
namespace,
pod.Name,
"container-busybox",
vol.Name,
FileName,
fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, pod.Name, vol.Name),
common.WorkerOSLinux,
)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create file into pod %s in namespace: %q", pod.Name, namespace))
}

View File

@@ -21,4 +21,4 @@ import (
. "github.com/vmware-tanzu/velero/test/e2e/test"
)
var MultiNSBackupRestore func() = TestFunc(&basic.MultiNSBackup{IsScalTest: true})
var MultiNSBackupRestore func() = TestFunc(&basic.MultiNSBackup{IsScaleTest: true})

View File

@@ -126,15 +126,6 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
tmpCfgForOldVeleroInstall.UpgradeFromVeleroVersion = veleroCLI2Version.VeleroVersion
tmpCfgForOldVeleroInstall.VeleroCLI = veleroCLI2Version.VeleroCLI
// CLI under version v1.14.x
if veleroCLI2Version.VeleroVersion < "v1.15" {
tmpCfgForOldVeleroInstall.BackupRepoConfigMap = ""
fmt.Printf(
"CLI version %s is lower than v1.15. Set BackupRepoConfigMap to empty, because it's not supported",
veleroCLI2Version.VeleroVersion,
)
}
tmpCfgForOldVeleroInstall, err = SetImagesToDefaultValues(
tmpCfgForOldVeleroInstall,
veleroCLI2Version.VeleroVersion,
@@ -176,6 +167,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
tmpCfg.KibishiiDirectory,
DefaultKibishiiData,
tmpCfg.ImageRegistryProxy,
veleroCfg.WorkerOS,
)).To(Succeed())
})
@@ -269,8 +261,14 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
})
By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() {
Expect(KibishiiVerifyAfterRestore(*veleroCfg.ClientToInstallVelero, upgradeNamespace,
oneHourTimeout, DefaultKibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
Expect(KibishiiVerifyAfterRestore(
*veleroCfg.ClientToInstallVelero,
upgradeNamespace,
oneHourTimeout,
DefaultKibishiiData,
"",
veleroCfg.WorkerOS,
)).To(Succeed(), "Fail to verify workload after restore")
})
})
})