Change the CreateFileToPod function's OS parameter as the E2E pass-in value.
Some checks failed
Run the E2E test on kind / build (push) Failing after 9s
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped

Fix GetResourceWithLabel's bug: labels were not applied.
Add workOS for deployment and pod creationg.
Add OS label for select node.
Enlarge the context timeout to 10 minutes. 5 min is not enough for Windows.
Enlarge the Kibishii test context to 15 minutes for Windows.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
This commit is contained in:
Xun Jiang
2025-08-18 10:45:23 +08:00
parent 5d9ea761d4
commit 2178d36d14
22 changed files with 530 additions and 129 deletions

View File

@@ -227,9 +227,10 @@ func TestGetResourceMatchedAction(t *testing.T) {
},
}
testCases := []struct {
name string
volume *structuredVolume
expectedAction *Action
name string
volume *structuredVolume
expectedAction *Action
resourcePolicies *ResourcePolicies
}{
{
name: "match policy",
@@ -299,12 +300,36 @@ func TestGetResourceMatchedAction(t *testing.T) {
},
expectedAction: nil,
},
{
name: "nil condition always match the action",
volume: &structuredVolume{
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
storageClass: "some-class",
pvcLabels: map[string]string{
"environment": "staging",
},
},
resourcePolicies: &ResourcePolicies{
Version: "v1",
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]any{},
},
},
},
expectedAction: &Action{Type: "skip"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
policies := &Policies{}
err := policies.BuildPolicy(resPolicies)
currentResourcePolicy := resPolicies
if tc.resourcePolicies != nil {
currentResourcePolicy = tc.resourcePolicies
}
err := policies.BuildPolicy(currentResourcePolicy)
if err != nil {
t.Errorf("Failed to build policy with error %v", err)
}

View File

@@ -109,7 +109,7 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
DeleteStorageClass(context.Background(), *veleroCfg.ClientToInstallVelero, KibishiiStorageClassName)
})
if InstallVelero {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*10)
defer ctxCancel()
err = VeleroUninstall(ctx, veleroCfg)
Expect(err).To(Succeed())

View File

@@ -29,6 +29,7 @@ import (
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -77,11 +78,6 @@ func (v *BackupVolumeInfo) Init() error {
}
func (v *BackupVolumeInfo) Start() error {
if v.VeleroCfg.CloudProvider == Vsphere && (!strings.Contains(v.CaseBaseName, "fs-upload") && !strings.Contains(v.CaseBaseName, "skipped")) {
fmt.Printf("Skip snapshot case %s for vsphere environment.\n", v.CaseBaseName)
Skip("Skip snapshot case due to vsphere environment doesn't cover the CSI test, and it doesn't have a Velero native snapshot plugin.")
}
if strings.Contains(v.VeleroCfg.Features, FeatureCSI) {
if strings.Contains(v.CaseBaseName, "native-snapshot") {
fmt.Printf("Skip native snapshot case %s when the CSI feature is enabled.\n", v.CaseBaseName)
@@ -100,6 +96,12 @@ func (v *BackupVolumeInfo) CreateResources() error {
labels := map[string]string{
"volume-info": "true",
}
if v.VeleroCfg.WorkerOS == common.WorkerOSWindows {
labels["pod-security.kubernetes.io/enforce"] = "privileged"
labels["pod-security.kubernetes.io/enforce-version"] = "latest"
}
for nsNum := 0; nsNum < v.NamespacesTotal; nsNum++ {
fmt.Printf("Creating namespaces ...\n")
createNSName := v.CaseBaseName
@@ -121,7 +123,14 @@ func (v *BackupVolumeInfo) CreateResources() error {
volumeName := fmt.Sprintf("volume-info-pv-%d", i)
vols = append(vols, CreateVolumes(pvc.Name, []string{volumeName})...)
}
deployment := NewDeployment(v.CaseBaseName, createNSName, 1, labels, v.VeleroCfg.ImageRegistryProxy).WithVolume(vols).Result()
deployment := NewDeployment(
v.CaseBaseName,
createNSName,
1,
labels,
v.VeleroCfg.ImageRegistryProxy,
v.VeleroCfg.WorkerOS,
).WithVolume(vols).Result()
deployment, err := CreateDeployment(v.Client.ClientGo, createNSName, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", createNSName))
@@ -139,14 +148,13 @@ func (v *BackupVolumeInfo) CreateResources() error {
// So populate data only to some of pods, leave other pods empty to verify empty PV datamover
if i%2 == 0 {
Expect(CreateFileToPod(
v.Ctx,
createNSName,
pod.Name,
DefaultContainerName,
vols[i].Name,
fmt.Sprintf("file-%s", pod.Name),
CreateFileContent(createNSName, pod.Name, vols[i].Name),
WorkerOSLinux,
v.VeleroCfg.WorkerOS,
)).To(Succeed())
}
}

View File

@@ -10,6 +10,7 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
)
@@ -64,19 +65,36 @@ func (p *PVCSelectedNodeChanging) Init() error {
func (p *PVCSelectedNodeChanging) CreateResources() error {
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(p.Ctx, p.Client, p.namespace)).To(Succeed(),
labels := make(map[string]string)
if p.VeleroCfg.WorkerOS == common.WorkerOSWindows {
labels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
Expect(CreateNamespaceWithLabel(p.Ctx, p.Client, p.namespace, labels)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() {
nodeNameList, err := GetWorkerNodes(p.Ctx)
nodeNameList, err := GetWorkerNodes(p.Ctx, p.VeleroCfg.WorkerOS)
Expect(err).To(Succeed())
for _, nodeName := range nodeNameList {
p.oldNodeName = nodeName
fmt.Printf("Create PVC on node %s\n", p.oldNodeName)
pvcAnn := map[string]string{p.ann: nodeName}
_, err := CreatePod(p.Client, p.namespace, p.podName, StorageClassName, p.pvcName, []string{p.volume},
pvcAnn, nil, p.VeleroCfg.ImageRegistryProxy)
_, err := CreatePod(
p.Client,
p.namespace,
p.podName,
StorageClassName,
p.pvcName,
[]string{p.volume},
pvcAnn,
nil,
p.VeleroCfg.ImageRegistryProxy,
p.VeleroCfg.WorkerOS,
)
Expect(err).To(Succeed())
err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.podName})
Expect(err).To(Succeed())
@@ -85,8 +103,9 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
})
By("Prepare ConfigMap data", func() {
nodeNameList, err := GetWorkerNodes(p.Ctx)
nodeNameList, err := GetWorkerNodes(p.Ctx, p.VeleroCfg.WorkerOS)
Expect(err).To(Succeed())
// Expect Windows node or Linux node number are no less than 2.
Expect(len(nodeNameList)).To(BeNumerically(">=", 2))
for _, nodeName := range nodeNameList {
if nodeName != p.oldNodeName {

View File

@@ -10,6 +10,7 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
)
@@ -73,7 +74,14 @@ func (s *StorageClasssChanging) CreateResources() error {
}
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(),
nsLabels := make(map[string]string)
if s.VeleroCfg.WorkerOS == common.WorkerOSWindows {
nsLabels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
Expect(CreateNamespaceWithLabel(s.Ctx, s.Client, s.namespace, nsLabels)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", s.namespace))
})
@@ -82,7 +90,14 @@ func (s *StorageClasssChanging) CreateResources() error {
Expect(err).To(Succeed())
vols := CreateVolumes(pvc.Name, []string{s.volume})
deployment := NewDeployment(s.CaseBaseName, s.namespace, 1, label, s.VeleroCfg.ImageRegistryProxy).WithVolume(vols).Result()
deployment := NewDeployment(
s.CaseBaseName,
s.namespace,
1,
label,
s.VeleroCfg.ImageRegistryProxy,
s.VeleroCfg.WorkerOS,
).WithVolume(vols).Result()
deployment, err = CreateDeployment(s.Client.ClientGo, s.namespace, deployment)
Expect(err).To(Succeed())
s.deploymentName = deployment.Name

View File

@@ -24,6 +24,7 @@ import (
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -90,13 +91,30 @@ func (p *ParallelFilesDownload) Init() error {
func (p *ParallelFilesDownload) CreateResources() error {
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(p.Ctx, p.Client, p.namespace)).To(Succeed(),
labels := make(map[string]string)
if p.VeleroCfg.WorkerOS == common.WorkerOSWindows {
labels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
Expect(CreateNamespaceWithLabel(p.Ctx, p.Client, p.namespace, labels)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", p.pod, p.namespace), func() {
_, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, []string{p.volume},
nil, nil, p.VeleroCfg.ImageRegistryProxy)
_, err := CreatePod(
p.Client,
p.namespace,
p.pod,
StorageClassName,
p.pvc,
[]string{p.volume},
nil,
nil,
p.VeleroCfg.ImageRegistryProxy,
p.VeleroCfg.WorkerOS,
)
Expect(err).To(Succeed())
err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.pod})
Expect(err).To(Succeed())

View File

@@ -24,6 +24,7 @@ import (
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -81,13 +82,31 @@ func (p *ParallelFilesUpload) Init() error {
func (p *ParallelFilesUpload) CreateResources() error {
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(p.Ctx, p.Client, p.namespace)).To(Succeed(),
labels := make(map[string]string)
if p.VeleroCfg.WorkerOS == common.WorkerOSWindows {
labels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
Expect(CreateNamespaceWithLabel(p.Ctx, p.Client, p.namespace, labels)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", p.pod, p.namespace), func() {
_, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc,
[]string{p.volume}, nil, nil, p.VeleroCfg.ImageRegistryProxy)
_, err := CreatePod(
p.Client,
p.namespace,
p.pod,
StorageClassName,
p.pvc,
[]string{p.volume},
nil,
nil,
p.VeleroCfg.ImageRegistryProxy,
p.VeleroCfg.WorkerOS,
)
Expect(err).To(Succeed())
err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.pod})
Expect(err).To(Succeed())

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"strings"
"unicode"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -11,6 +12,7 @@ import (
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -65,7 +67,14 @@ func (p *PVBackupFiltering) Init() error {
func (p *PVBackupFiltering) CreateResources() error {
for _, ns := range *p.NSIncluded {
By(fmt.Sprintf("Create namespaces %s for workload\n", ns), func() {
Expect(CreateNamespace(p.Ctx, p.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))
labels := make(map[string]string)
if p.VeleroCfg.WorkerOS == common.WorkerOSWindows {
labels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
Expect(CreateNamespaceWithLabel(p.Ctx, p.Client, ns, labels)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))
})
var pods []string
By(fmt.Sprintf("Deploy a few pods with several PVs in namespace %s", ns), func() {
@@ -87,8 +96,18 @@ func (p *PVBackupFiltering) CreateResources() error {
podName := fmt.Sprintf("pod-%d", i)
pods = append(pods, podName)
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
pod, err := CreatePod(p.Client, ns, podName, StorageClassName, "",
volumes, nil, nil, p.VeleroCfg.ImageRegistryProxy)
pod, err := CreatePod(
p.Client,
ns,
podName,
StorageClassName,
"",
volumes,
nil,
nil,
p.VeleroCfg.ImageRegistryProxy,
p.VeleroCfg.WorkerOS,
)
Expect(err).To(Succeed())
ann := map[string]string{
p.annotation: volumesToAnnotation,
@@ -116,7 +135,6 @@ func (p *PVBackupFiltering) CreateResources() error {
for i, pod := range p.podsList[index] {
for j := range p.volumesList[i] {
Expect(CreateFileToPod(
p.Ctx,
ns,
pod,
pod,
@@ -207,16 +225,20 @@ func fileExist(
volume string,
workerOS string,
) error {
c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME, workerOS)
c, _, err := ReadFileFromPodVolume(namespace, podName, podName, volume, FILE_NAME, workerOS)
if err != nil {
fmt.Printf("Fail to read file %s from volume %s of pod %s in %s: %s",
FILE_NAME, volume, podName, namespace, err.Error(),
)
return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ",
FILE_NAME, volume, podName, namespace))
}
c = strings.Replace(c, "\n", "", -1)
origin_content := strings.Replace(CreateFileContent(namespace, podName, volume), "\n", "", -1)
c = strings.TrimRightFunc(c, unicode.IsSpace)
origin_content := strings.TrimRightFunc(CreateFileContent(namespace, podName, volume), unicode.IsSpace)
if c == origin_content {
return nil
} else {
fmt.Printf("Content not match: \n origin: %s\n result: %s\n", origin_content, c)
return errors.New(fmt.Sprintf("UNEXPECTED: File %s does not exist in volume %s of pod %s in namespace %s.",
FILE_NAME, volume, podName, namespace))
}
@@ -228,7 +250,7 @@ func fileNotExist(
volume string,
workerOS string,
) error {
_, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME, workerOS)
_, _, err := ReadFileFromPodVolume(namespace, podName, podName, volume, FILE_NAME, workerOS)
if err != nil {
return nil
} else {

View File

@@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -63,12 +64,26 @@ func (f *FilteringCase) CreateResources() error {
for nsNum := 0; nsNum < f.NamespacesTotal; nsNum++ {
namespace := fmt.Sprintf("%s-%00000d", f.CaseBaseName, nsNum)
fmt.Printf("Creating resources in namespace ...%s\n", namespace)
if err := CreateNamespace(f.Ctx, f.Client, namespace); err != nil {
nsLabels := make(map[string]string)
if f.VeleroCfg.WorkerOS == common.WorkerOSWindows {
nsLabels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
if err := CreateNamespaceWithLabel(f.Ctx, f.Client, namespace, nsLabels); err != nil {
return errors.Wrapf(err, "Failed to create namespace %s", namespace)
}
//Create deployment
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
deployment := NewDeployment(f.CaseBaseName, namespace, f.replica, f.labels, f.VeleroCfg.ImageRegistryProxy).Result()
deployment := NewDeployment(
f.CaseBaseName,
namespace,
f.replica,
f.labels,
f.VeleroCfg.ImageRegistryProxy,
f.VeleroCfg.WorkerOS,
).Result()
deployment, err := CreateDeployment(f.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))

View File

@@ -26,6 +26,7 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -83,12 +84,28 @@ func (e *ExcludeFromBackup) CreateResources() error {
velerov1api.ExcludeFromBackupLabel: "false",
}
fmt.Printf("Creating resources in namespace ...%s\n", namespace)
if err := CreateNamespace(e.Ctx, e.Client, namespace); err != nil {
nsLabels := make(map[string]string)
if e.VeleroCfg.WorkerOS == common.WorkerOSWindows {
nsLabels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
if err := CreateNamespaceWithLabel(e.Ctx, e.Client, namespace, nsLabels); err != nil {
return errors.Wrapf(err, "Failed to create namespace %s", namespace)
}
//Create deployment: to be included
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
deployment := NewDeployment(e.CaseBaseName, namespace, e.replica, label2, e.VeleroCfg.ImageRegistryProxy).Result()
deployment := NewDeployment(
e.CaseBaseName,
namespace,
e.replica,
label2,
e.VeleroCfg.ImageRegistryProxy,
e.VeleroCfg.WorkerOS,
).Result()
deployment, err := CreateDeployment(e.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))

View File

@@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -82,13 +83,26 @@ func (l *LabelSelector) CreateResources() error {
"resourcefiltering": "false",
}
}
if l.VeleroCfg.WorkerOS == common.WorkerOSWindows {
labels["pod-security.kubernetes.io/enforce"] = "privileged"
labels["pod-security.kubernetes.io/enforce-version"] = "latest"
}
if err := CreateNamespaceWithLabel(l.Ctx, l.Client, namespace, labels); err != nil {
return errors.Wrapf(err, "Failed to create namespace %s", namespace)
}
//Create deployment
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
deployment := NewDeployment(l.CaseBaseName, namespace, l.replica, labels, l.VeleroCfg.ImageRegistryProxy).Result()
deployment := NewDeployment(
l.CaseBaseName,
namespace,
l.replica,
labels,
l.VeleroCfg.ImageRegistryProxy,
l.VeleroCfg.WorkerOS,
).Result()
deployment, err := CreateDeployment(l.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))

View File

@@ -25,6 +25,7 @@ import (
"github.com/pkg/errors"
. "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
)
@@ -105,8 +106,16 @@ func (r *ResourceModifiersCase) CreateResources() error {
for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ {
namespace := fmt.Sprintf("%s-%00000d", r.CaseBaseName, nsNum)
nsLabels := make(map[string]string)
if r.VeleroCfg.WorkerOS == common.WorkerOSWindows {
nsLabels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
By(fmt.Sprintf("Create namespaces %s for workload\n", namespace), func() {
Expect(CreateNamespace(r.Ctx, r.Client, namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", namespace))
Expect(CreateNamespaceWithLabel(r.Ctx, r.Client, namespace, nsLabels)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", namespace))
})
By(fmt.Sprintf("Creating deployment in namespaces ...%s\n", namespace), func() {
@@ -145,7 +154,14 @@ func (r *ResourceModifiersCase) Clean() error {
}
func (r *ResourceModifiersCase) createDeployment(namespace string) error {
deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"app": "test"}, r.VeleroCfg.ImageRegistryProxy).Result()
deployment := NewDeployment(
r.CaseBaseName,
namespace,
1,
map[string]string{"app": "test"},
r.VeleroCfg.ImageRegistryProxy,
r.VeleroCfg.WorkerOS,
).Result()
deployment, err := CreateDeployment(r.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create deloyment %s the namespace %q", deployment.Name, namespace))

View File

@@ -19,6 +19,7 @@ package resourcepolicies
import (
"fmt"
"strings"
"unicode"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -111,8 +112,17 @@ func (r *ResourcePoliciesCase) CreateResources() error {
for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ {
namespace := fmt.Sprintf("%s-%00000d", r.CaseBaseName, nsNum)
nsLabels := make(map[string]string)
if r.VeleroCfg.WorkerOS == common.WorkerOSWindows {
nsLabels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
By(fmt.Sprintf("Create namespaces %s for workload\n", namespace), func() {
Expect(CreateNamespace(r.Ctx, r.Client, namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", namespace))
Expect(CreateNamespaceWithLabel(r.Ctx, r.Client, namespace, nsLabels)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", namespace))
})
volName := fmt.Sprintf("vol-%s-%00000d", r.CaseBaseName, nsNum)
@@ -153,7 +163,6 @@ func (r *ResourcePoliciesCase) Verify() error {
continue
}
content, _, err := ReadFileFromPodVolume(
r.Ctx,
ns,
pod.Name,
"container-busybox",
@@ -167,11 +176,12 @@ func (r *ResourcePoliciesCase) Verify() error {
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Fail to read file %s from volume %s of pod %s in namespace %s",
FileName, vol.Name, pod.Name, ns))
content = strings.Replace(content, "\n", "", -1)
originContent := strings.Replace(fmt.Sprintf("ns-%s pod-%s volume-%s", ns, pod.Name, vol.Name), "\n", "", -1)
content = strings.TrimRightFunc(content, unicode.IsSpace)
originContent := fmt.Sprintf("ns-%s pod-%s volume-%s", ns, pod.Name, vol.Name)
Expect(content).To(Equal(originContent), fmt.Sprintf("File %s does not exist in volume %s of pod %s in namespace %s",
FileName, vol.Name, pod.Name, ns))
Expect(content).To(Equal(originContent),
fmt.Sprintf("Content not match.\n origin: %s\n result: %s\n", originContent, content),
)
}
}
}
@@ -218,7 +228,14 @@ func (r *ResourcePoliciesCase) createPVC(index int, namespace string, volList []
}
func (r *ResourcePoliciesCase) createDeploymentWithVolume(namespace string, volList []*corev1api.Volume) error {
deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"resource-policies": "resource-policies"}, r.VeleroCfg.ImageRegistryProxy).WithVolume(volList).Result()
deployment := NewDeployment(
r.CaseBaseName,
namespace,
1,
map[string]string{"resource-policies": "resource-policies"},
r.VeleroCfg.ImageRegistryProxy,
r.VeleroCfg.WorkerOS,
).WithVolume(volList).Result()
deployment, err := CreateDeployment(r.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create deloyment %s the namespace %q", deployment.Name, namespace))
@@ -241,14 +258,13 @@ func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) erro
continue
}
err := CreateFileToPod(
r.Ctx,
namespace,
pod.Name,
"container-busybox",
vol.Name,
FileName,
fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, pod.Name, vol.Name),
common.WorkerOSLinux,
r.VeleroCfg.WorkerOS,
)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create file into pod %s in namespace: %q", pod.Name, namespace))

View File

@@ -14,6 +14,7 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/test"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
)
@@ -64,11 +65,19 @@ func (s *InProgressCase) Init() error {
func (s *InProgressCase) CreateResources() error {
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
labels := make(map[string]string)
if s.VeleroCfg.WorkerOS == common.WorkerOSWindows {
labels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
Expect(
k8sutil.CreateNamespace(
k8sutil.CreateNamespaceWithLabel(
s.Ctx,
s.Client,
s.namespace,
labels,
),
).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", s.namespace))
@@ -85,6 +94,7 @@ func (s *InProgressCase) CreateResources() error {
nil,
s.podAnn,
s.VeleroCfg.ImageRegistryProxy,
s.VeleroCfg.WorkerOS,
)
Expect(err).To(Succeed())

View File

@@ -32,6 +32,7 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
)
@@ -92,14 +93,29 @@ func (o *OrderedResources) CreateResources() error {
"orderedresources": "true",
}
fmt.Printf("Creating resources in %s namespace ...\n", o.Namespace)
if err := k8sutil.CreateNamespace(o.Ctx, o.Client, o.Namespace); err != nil {
nsLabels := make(map[string]string)
if o.VeleroCfg.WorkerOS == common.WorkerOSWindows {
nsLabels = map[string]string{
"pod-security.kubernetes.io/enforce": "privileged",
"pod-security.kubernetes.io/enforce-version": "latest",
}
}
if err := k8sutil.CreateNamespaceWithLabel(o.Ctx, o.Client, o.Namespace, nsLabels); err != nil {
return errors.Wrapf(err, "failed to create namespace %s", o.Namespace)
}
//Create deployment
deploymentName := fmt.Sprintf("deploy-%s", o.CaseBaseName)
fmt.Printf("Creating deployment %s in %s namespaces ...\n", deploymentName, o.Namespace)
deployment := k8sutil.NewDeployment(deploymentName, o.Namespace, 1, label, o.VeleroCfg.ImageRegistryProxy).Result()
deployment := k8sutil.NewDeployment(
deploymentName,
o.Namespace,
1,
label,
o.VeleroCfg.ImageRegistryProxy,
o.VeleroCfg.WorkerOS,
).Result()
_, err := k8sutil.CreateDeployment(o.Client.ClientGo, o.Namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create namespace %q with err %v", o.Namespace, err))

View File

@@ -72,10 +72,13 @@ func GetListByCmdPipes(ctx context.Context, cmdLines []*OsCommandLine) ([]string
func GetResourceWithLabel(ctx context.Context, namespace, resourceName string, labels map[string]string) ([]string, error) {
labelStr := ""
parts := make([]string, 0, len(labels))
for key, value := range labels {
strings.Join([]string{labelStr, key + "=" + value}, ",")
parts = append(parts, key+"="+value)
}
labelStr = strings.Join(parts, ",")
cmds := []*OsCommandLine{}
cmd := &OsCommandLine{

View File

@@ -323,7 +323,6 @@ func WriteRandomDataToFileInPod(ctx context.Context, namespace, podName, contain
}
func CreateFileToPod(
ctx context.Context,
namespace string,
podName string,
containerName string,
@@ -345,8 +344,9 @@ func CreateFileToPod(
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
"--", shell, shellParameter, fmt.Sprintf("echo ns-%s pod-%s volume-%s > %s", namespace, podName, volume, filePath)}
cmd := exec.CommandContext(ctx, "kubectl", arg...)
cmd := exec.CommandContext(context.Background(), "kubectl", arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
return cmd.Run()
}
@@ -359,9 +359,9 @@ func FileExistInPV(
filename string,
workerOS string,
) (bool, error) {
stdout, stderr, err := ReadFileFromPodVolume(ctx, namespace, podName, containerName, volume, filename, workerOS)
stdout, stderr, err := ReadFileFromPodVolume(namespace, podName, containerName, volume, filename, workerOS)
output := fmt.Sprintf("%s:%s", stdout, stderr)
output := fmt.Sprintf("%s:%s:%s", stdout, stderr, err)
if workerOS == common.WorkerOSWindows {
if strings.Contains(output, "The system cannot find the file specified") {
@@ -380,8 +380,8 @@ func FileExistInPV(
filename, volume, podName, namespace))
}
}
func ReadFileFromPodVolume(
ctx context.Context,
namespace string,
podName string,
containerName string,
@@ -391,16 +391,20 @@ func ReadFileFromPodVolume(
) (string, string, error) {
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
"--", "cat", fmt.Sprintf("/%s/%s", volume, filename)}
if workerOS == common.WorkerOSWindows {
arg = []string{"exec", "-n", namespace, "-c", containerName, podName,
"--", "cmd", "/c", fmt.Sprintf("type C:\\%s\\%s", volume, filename)}
"--", "cmd", "/c", "type", fmt.Sprintf("C:\\%s\\%s", volume, filename)}
}
cmd := exec.CommandContext(ctx, "kubectl", arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
cmd := exec.CommandContext(context.Background(), "kubectl", arg...)
fmt.Printf("kubectl exec cmd =%v\n", cmd)
stdout, stderr, err := veleroexec.RunCommand(cmd)
fmt.Printf("stdout: %s\n", stdout)
fmt.Printf("stderr: %s\n", stderr)
fmt.Printf("err: %v\n", err)
return stdout, stderr, err
}

View File

@@ -29,6 +29,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
common "github.com/vmware-tanzu/velero/test/util/common"
)
const (
@@ -37,7 +38,8 @@ const (
PollInterval = 2 * time.Second
PollTimeout = 15 * time.Minute
DefaultContainerName = "container-busybox"
TestImage = "busybox:1.37.0"
LinuxTestImage = "busybox:1.37.0"
WindowTestImage = "mcr.microsoft.com/windows/nanoserver:ltsc2022"
)
// DeploymentBuilder builds Deployment objects.
@@ -50,30 +52,100 @@ func (d *DeploymentBuilder) Result() *appsv1api.Deployment {
}
// newDeployment returns a RollingUpdate Deployment with a fake container image
func NewDeployment(name, ns string, replicas int32, labels map[string]string, imageRegistryProxy string) *DeploymentBuilder {
imageAddress := TestImage
if imageRegistryProxy != "" {
imageAddress = path.Join(imageRegistryProxy, TestImage)
func NewDeployment(
name, ns string,
replicas int32,
labels map[string]string,
imageRegistryProxy string,
workerOS string,
) *DeploymentBuilder {
// Default to Linux environment
imageAddress := LinuxTestImage
command := []string{"sleep", "infinity"}
args := make([]string, 0)
var affinity corev1api.Affinity
var tolerations []corev1api.Toleration
if workerOS == common.WorkerOSLinux && imageRegistryProxy != "" {
imageAddress = path.Join(imageRegistryProxy, LinuxTestImage)
}
containerSecurityContext := &corev1api.SecurityContext{
AllowPrivilegeEscalation: boolptr.False(),
Capabilities: &corev1api.Capabilities{
Drop: []corev1api.Capability{"ALL"},
},
RunAsNonRoot: boolptr.True(),
RunAsUser: func(i int64) *int64 { return &i }(65534),
RunAsGroup: func(i int64) *int64 { return &i }(65534),
SeccompProfile: &corev1api.SeccompProfile{
Type: corev1api.SeccompProfileTypeRuntimeDefault,
},
}
podSecurityContext := &corev1api.PodSecurityContext{
FSGroup: func(i int64) *int64 { return &i }(65534),
FSGroupChangePolicy: func(policy corev1api.PodFSGroupChangePolicy) *corev1api.PodFSGroupChangePolicy { return &policy }(corev1api.FSGroupChangeAlways),
}
// Settings for Windows
if workerOS == common.WorkerOSWindows {
imageAddress = WindowTestImage
command = []string{"cmd"}
args = []string{"/c", "ping -t localhost > NUL"}
affinity = corev1api.Affinity{
NodeAffinity: &corev1api.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
{
MatchExpressions: []corev1api.NodeSelectorRequirement{
{
Key: "kubernetes.io/os",
Values: []string{common.WorkerOSWindows},
Operator: corev1api.NodeSelectorOpIn,
},
},
},
},
},
},
}
tolerations = []corev1api.Toleration{
{
Effect: corev1api.TaintEffectNoSchedule,
Key: "os",
Value: common.WorkerOSWindows,
},
{
Effect: corev1api.TaintEffectNoExecute,
Key: "os",
Value: common.WorkerOSWindows,
},
}
whetherToRunAsRoot := false
containerSecurityContext = &corev1api.SecurityContext{
RunAsNonRoot: &whetherToRunAsRoot,
}
containerUserName := "ContainerAdministrator"
podSecurityContext = &corev1api.PodSecurityContext{
WindowsOptions: &corev1api.WindowsSecurityContextOptions{
RunAsUserName: &containerUserName,
},
}
}
containers := []corev1api.Container{
{
Name: DefaultContainerName,
Image: imageAddress,
Command: []string{"sleep", "1000000"},
Command: command,
Args: args,
// Make pod obeys the restricted pod security standards.
SecurityContext: &corev1api.SecurityContext{
AllowPrivilegeEscalation: boolptr.False(),
Capabilities: &corev1api.Capabilities{
Drop: []corev1api.Capability{"ALL"},
},
RunAsNonRoot: boolptr.True(),
RunAsUser: func(i int64) *int64 { return &i }(65534),
RunAsGroup: func(i int64) *int64 { return &i }(65534),
SeccompProfile: &corev1api.SeccompProfile{
Type: corev1api.SeccompProfileTypeRuntimeDefault,
},
},
SecurityContext: containerSecurityContext,
},
}
@@ -100,11 +172,10 @@ func NewDeployment(name, ns string, replicas int32, labels map[string]string, im
Labels: labels,
},
Spec: corev1api.PodSpec{
SecurityContext: &corev1api.PodSecurityContext{
FSGroup: func(i int64) *int64 { return &i }(65534),
FSGroupChangePolicy: func(policy corev1api.PodFSGroupChangePolicy) *corev1api.PodFSGroupChangePolicy { return &policy }(corev1api.FSGroupChangeAlways),
},
Containers: containers,
SecurityContext: podSecurityContext,
Containers: containers,
Affinity: &affinity,
Tolerations: tolerations,
},
},
},
@@ -127,10 +198,6 @@ func (d *DeploymentBuilder) WithVolume(volumes []*corev1api.Volume) *DeploymentB
return d
}
func CreateDeploy(c clientset.Interface, ns string, deployment *appsv1api.Deployment) error {
_, err := c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
return err
}
func CreateDeployment(c clientset.Interface, ns string, deployment *appsv1api.Deployment) (*appsv1api.Deployment, error) {
return c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
}

View File

@@ -53,8 +53,12 @@ func CreateNamespaceWithLabel(ctx context.Context, client TestClient, namespace
ns := builder.ForNamespace(namespace).Result()
ns.Labels = label
// Add label to avoid PSA check.
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
if _, ok := ns.Labels["pod-security.kubernetes.io/enforce"]; !ok {
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
}
if _, ok := ns.Labels["pod-security.kubernetes.io/enforce-version"]; !ok {
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
}
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil

View File

@@ -11,8 +11,13 @@ import (
common "github.com/vmware-tanzu/velero/test/util/common"
)
func GetWorkerNodes(ctx context.Context) ([]string, error) {
getCMD := exec.CommandContext(ctx, "kubectl", "get", "node", "-o", "json")
func GetWorkerNodes(ctx context.Context, workerOS string) ([]string, error) {
getCMD := exec.CommandContext(
ctx,
"kubectl", "get", "node", "-l",
fmt.Sprintf("kubernetes.io/os=%s", workerOS),
"-o", "json",
)
fmt.Printf("kubectl get node cmd =%v\n", getCMD)
jsonBuf, err := common.CMDExecWithOutput(getCMD)

View File

@@ -18,14 +18,17 @@ package k8s
import (
"context"
"encoding/json"
"fmt"
"path"
"github.com/pkg/errors"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
common "github.com/vmware-tanzu/velero/test/util/common"
)
func CreatePod(
@@ -34,14 +37,88 @@ func CreatePod(
volumeNameList []string,
pvcAnn, ann map[string]string,
imageRegistryProxy string,
workerOS string,
) (*corev1api.Pod, error) {
if pvcName != "" && len(volumeNameList) != 1 {
return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty")
}
imageAddress := TestImage
if imageRegistryProxy != "" {
imageAddress = path.Join(imageRegistryProxy, TestImage)
// Default to Linux environment
imageAddress := LinuxTestImage
command := []string{"sleep", "infinity"}
args := make([]string, 0)
var affinity corev1api.Affinity
var tolerations []corev1api.Toleration
if workerOS == common.WorkerOSLinux && imageRegistryProxy != "" {
imageAddress = path.Join(imageRegistryProxy, LinuxTestImage)
}
containerSecurityContext := &corev1api.SecurityContext{
AllowPrivilegeEscalation: boolptr.False(),
Capabilities: &corev1api.Capabilities{
Drop: []corev1api.Capability{"ALL"},
},
RunAsNonRoot: boolptr.True(),
RunAsUser: func(i int64) *int64 { return &i }(65534),
RunAsGroup: func(i int64) *int64 { return &i }(65534),
SeccompProfile: &corev1api.SeccompProfile{
Type: corev1api.SeccompProfileTypeRuntimeDefault,
},
}
podSecurityContext := &corev1api.PodSecurityContext{
FSGroup: func(i int64) *int64 { return &i }(65534),
FSGroupChangePolicy: func(policy corev1api.PodFSGroupChangePolicy) *corev1api.PodFSGroupChangePolicy { return &policy }(corev1api.FSGroupChangeAlways),
}
// Settings for Windows
if workerOS == common.WorkerOSWindows {
imageAddress = WindowTestImage
command = []string{"cmd"}
args = []string{"/c", "ping -t localhost > NUL"}
affinity = corev1api.Affinity{
NodeAffinity: &corev1api.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
{
MatchExpressions: []corev1api.NodeSelectorRequirement{
{
Key: "kubernetes.io/os",
Values: []string{common.WorkerOSWindows},
Operator: corev1api.NodeSelectorOpIn,
},
},
},
},
},
},
}
tolerations = []corev1api.Toleration{
{
Effect: corev1api.TaintEffectNoSchedule,
Key: "os",
Value: common.WorkerOSWindows,
},
{
Effect: corev1api.TaintEffectNoExecute,
Key: "os",
Value: common.WorkerOSWindows,
},
}
whetherToRunAsRoot := false
containerSecurityContext = &corev1api.SecurityContext{
RunAsNonRoot: &whetherToRunAsRoot,
}
containerUserName := "ContainerAdministrator"
podSecurityContext = &corev1api.PodSecurityContext{
WindowsOptions: &corev1api.WindowsSecurityContextOptions{
RunAsUserName: &containerUserName,
},
}
}
volumes := []corev1api.Volume{}
@@ -82,32 +159,20 @@ func CreatePod(
Annotations: ann,
},
Spec: corev1api.PodSpec{
SecurityContext: &corev1api.PodSecurityContext{
FSGroup: func(i int64) *int64 { return &i }(65534),
FSGroupChangePolicy: func(policy corev1api.PodFSGroupChangePolicy) *corev1api.PodFSGroupChangePolicy { return &policy }(corev1api.FSGroupChangeAlways),
},
SecurityContext: podSecurityContext,
Containers: []corev1api.Container{
{
Name: name,
Image: imageAddress,
Command: []string{"sleep", "3600"},
VolumeMounts: vmList,
// Make pod obeys the restricted pod security standards.
SecurityContext: &corev1api.SecurityContext{
AllowPrivilegeEscalation: boolptr.False(),
Capabilities: &corev1api.Capabilities{
Drop: []corev1api.Capability{"ALL"},
},
RunAsNonRoot: boolptr.True(),
RunAsUser: func(i int64) *int64 { return &i }(65534),
RunAsGroup: func(i int64) *int64 { return &i }(65534),
SeccompProfile: &corev1api.SeccompProfile{
Type: corev1api.SeccompProfileTypeRuntimeDefault,
},
},
Name: name,
Image: imageAddress,
Command: command,
Args: args,
VolumeMounts: vmList,
SecurityContext: containerSecurityContext,
},
},
Volumes: volumes,
Volumes: volumes,
Affinity: &affinity,
Tolerations: tolerations,
},
}
@@ -134,7 +199,25 @@ func AddAnnotationToPod(ctx context.Context, client TestClient, namespace, podNa
newPod.Annotations = newAnn
fmt.Println(newPod.Annotations)
return client.ClientGo.CoreV1().Pods(namespace).Update(ctx, newPod, metav1.UpdateOptions{})
// Strategic merge patch to add/update label
patch := map[string]any{
"metadata": map[string]any{
"annotations": newAnn,
},
}
patchBytes, err := json.Marshal(patch)
if err != nil {
fmt.Println("fail to marshal patch for pod: ", err.Error())
return nil, err
}
return client.ClientGo.CoreV1().Pods(namespace).Patch(
ctx,
newPod.Name,
types.StrategicMergePatchType,
patchBytes,
metav1.PatchOptions{},
)
}
func ListPods(ctx context.Context, client TestClient, namespace string) (*corev1api.PodList, error) {

View File

@@ -83,7 +83,7 @@ func RunKibishiiTests(
) error {
pvCount := len(KibishiiPVCNameList)
client := *veleroCfg.ClientToInstallVelero
timeOutContext, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
timeOutContext, ctxCancel := context.WithTimeout(context.Background(), time.Minute*15)
defer ctxCancel()
veleroCLI := veleroCfg.VeleroCLI
providerName := veleroCfg.CloudProvider
@@ -208,11 +208,10 @@ func RunKibishiiTests(
fmt.Printf("Re-populate volume %s\n", time.Now().Format("2006-01-02 15:04:05"))
for _, pod := range KibishiiPodNameList {
// To ensure Kibishii verification result is accurate
ClearKibishiiData(timeOutContext, kibishiiNamespace, pod, "kibishii", "data")
ClearKibishiiData(kibishiiNamespace, pod, "kibishii", "data", veleroCfg.WorkerOS)
CreateFileContent := fileBaseContent + pod
err := CreateFileToPod(
timeOutContext,
kibishiiNamespace,
pod,
"kibishii",
@@ -789,6 +788,13 @@ func KibishiiVerifyAfterRestore(
if err := waitForKibishiiPods(oneHourTimeout, client, kibishiiNamespace); err != nil {
return errors.Wrapf(err, "Failed to wait for ready status of kibishii pods in %s", kibishiiNamespace)
}
// TODO - check that namespace exists
fmt.Printf("running kibishii verify\n")
if err := verifyData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {
return errors.Wrap(err, "Failed to verify data generated by kibishii")
}
if incrementalFileName != "" {
for _, pod := range KibishiiPodNameList {
exist, err := FileExistInPV(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName, workerOS)
@@ -801,19 +807,18 @@ func KibishiiVerifyAfterRestore(
}
}
}
// TODO - check that namespace exists
fmt.Printf("running kibishii verify\n")
if err := verifyData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {
return errors.Wrap(err, "Failed to verify data generated by kibishii")
}
return nil
}
func ClearKibishiiData(ctx context.Context, namespace, podName, containerName, dir string) error {
func ClearKibishiiData(namespace, podName, containerName, dir, workerOS string) error {
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
"--", "/bin/sh", "-c", "rm -rf /" + dir + "/*"}
cmd := exec.CommandContext(ctx, "kubectl", arg...)
if workerOS == common.WorkerOSWindows {
arg = []string{"exec", "-n", namespace, "-c", containerName, podName,
"--", "cmd", "/c", fmt.Sprintf("del /Q C:\\%s\\*", dir)}
}
cmd := exec.CommandContext(context.Background(), "kubectl", arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
return cmd.Run()
}