Merge pull request #6274 from qiuming-best/e2e-time-optimize

Adjust Sleep time in E2E
This commit is contained in:
qiuming
2023-05-17 15:59:03 +08:00
committed by GitHub
5 changed files with 12 additions and 6 deletions

View File

@@ -51,7 +51,7 @@ func (b *TTL) Init() {
b.testNS = "backup-ttl-test-" + UUIDgen.String()
b.backupName = "backup-ttl-test-" + UUIDgen.String()
b.restoreName = "restore-ttl-test-" + UUIDgen.String()
b.ttl = 20 * time.Minute
b.ttl = 10 * time.Minute
}

View File

@@ -226,7 +226,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version)
// the snapshots of AWS may be still in pending status when do the restore, wait for a while
// to avoid this https://github.com/vmware-tanzu/velero/issues/1799
// TODO remove this after https://github.com/vmware-tanzu/velero/issues/3533 is fixed
if (veleroCfg.CloudProvider == "aws" || veleroCfg.CloudProvider == "vsphere") && useVolumeSnapshots {
if veleroCfg.CloudProvider == "aws" && useVolumeSnapshots {
fmt.Println("Waiting 5 minutes to make sure the snapshots are ready...")
time.Sleep(5 * time.Minute)
}

View File

@@ -196,7 +196,7 @@ func (t *TestCase) Restore() error {
// the snapshots of AWS may be still in pending status when do the restore, wait for a while
// to avoid this https://github.com/vmware-tanzu/velero/issues/1799
// TODO remove this after https://github.com/vmware-tanzu/velero/issues/3533 is fixed
if t.UseVolumeSnapshots {
if t.UseVolumeSnapshots && veleroCfg.CloudProvider != "vsphere" {
fmt.Println("Waiting 5 minutes to make sure the snapshots are ready...")
time.Sleep(5 * time.Minute)
}

View File

@@ -69,7 +69,13 @@ func GetNamespace(ctx context.Context, client TestClient, namespace string) (*co
func DeleteNamespace(ctx context.Context, client TestClient, namespace string, wait bool) error {
tenMinuteTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*10)
defer ctxCancel()
if err := client.ClientGo.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}); err != nil {
var zero int64 = 0
policy := metav1.DeletePropagationForeground
if err := client.ClientGo.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{
GracePeriodSeconds: &zero,
PropagationPolicy: &policy,
}); err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))
}
if !wait {

View File

@@ -179,7 +179,7 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc
RunDebug(context.Background(), veleroCLI, veleroNamespace, "", restoreName)
return errors.Wrapf(err, "Restore %s failed from backup %s", restoreName, backupName)
}
if !useVolumeSnapshots {
if !useVolumeSnapshots && providerName != "vsphere" {
pvrs, err := GetPVR(oneHourTimeout, veleroCfg.VeleroNamespace, kibishiiNamespace)
if err != nil {
return errors.Wrapf(err, "failed to get PVR for namespace %s", kibishiiNamespace)
@@ -323,7 +323,7 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one
if err := waitForKibishiiPods(oneHourTimeout, client, kibishiiNamespace); err != nil {
return errors.Wrapf(err, "Failed to wait for ready status of kibishii pods in %s", kibishiiNamespace)
}
time.Sleep(60 * time.Second)
// TODO - check that namespace exists
fmt.Printf("running kibishii verify\n")
if err := verifyData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {