From 81c5b6692d13d000b7d78068fd711bc2d9f66cdd Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Tue, 9 Sep 2025 14:00:59 +0800 Subject: [PATCH 1/6] backupPVC to different node Signed-off-by: Lyndon-Li --- changelogs/unreleased/9233-Lyndon-Li | 1 + pkg/controller/data_upload_controller.go | 9 ++ pkg/exposer/csi_snapshot.go | 30 +++++ pkg/exposer/csi_snapshot_priority_test.go | 2 + pkg/exposer/csi_snapshot_test.go | 136 ++++++++++++++++++++++ pkg/util/kube/pvc_pv.go | 16 +++ pkg/util/third_party.go | 4 + 7 files changed, 198 insertions(+) create mode 100644 changelogs/unreleased/9233-Lyndon-Li diff --git a/changelogs/unreleased/9233-Lyndon-Li b/changelogs/unreleased/9233-Lyndon-Li new file mode 100644 index 000000000..492765e63 --- /dev/null +++ b/changelogs/unreleased/9233-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #9229, add intolerateSourceNode backupPVC option \ No newline at end of file diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index d0467a843..46704e5b1 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -916,6 +916,13 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC) } + pv := &corev1api.PersistentVolume{} + if err := r.client.Get(context.Background(), types.NamespacedName{ + Name: pvc.Spec.VolumeName, + }, pv); err != nil { + return nil, errors.Wrapf(err, "failed to get source PV %s", pvc.Spec.VolumeName) + } + nodeOS := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), log) if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil { @@ -963,6 +970,8 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload return &exposer.CSISnapshotExposeParam{ SnapshotName: du.Spec.CSISnapshot.VolumeSnapshot, SourceNamespace: du.Spec.SourceNamespace, + SourcePVCName: pvc.Name, + SourcePVName: pv.Name, StorageClass: du.Spec.CSISnapshot.StorageClass, HostingPodLabels: hostingPodLabels, HostingPodAnnotations: hostingPodAnnotation, diff --git a/pkg/exposer/csi_snapshot.go b/pkg/exposer/csi_snapshot.go index 531330f62..50b7c976f 100644 --- a/pkg/exposer/csi_snapshot.go +++ b/pkg/exposer/csi_snapshot.go @@ -35,6 +35,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/nodeagent" velerotypes "github.com/vmware-tanzu/velero/pkg/types" + "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/csi" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -48,6 +49,12 @@ type CSISnapshotExposeParam struct { // SourceNamespace is the original namespace of the volume that the snapshot is taken for SourceNamespace string + // SourcePVCName is the original name of the PVC that the snapshot is taken for + SourcePVCName string + + // SourcePVCName is the name of PV for SourcePVC + SourcePVName string + // AccessMode defines the mode to access the snapshot AccessMode string @@ -189,6 +196,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O backupPVCReadOnly := false spcNoRelabeling := false backupPVCAnnotations := map[string]string{} + intoleratableNodes := []string{} if value, exists := csiExposeParam.BackupPVCConfig[csiExposeParam.StorageClass]; exists { if value.StorageClass != "" { backupPVCStorageClass = value.StorageClass @@ -206,6 +214,14 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O if len(value.Annotations) > 0 { backupPVCAnnotations = value.Annotations } + + if _, found := backupPVCAnnotations[util.VSphereCNSFastCloneAnno]; found { + if n, err := kube.GetPVAttachedNodes(ctx, csiExposeParam.SourcePVName, e.kubeClient.StorageV1()); err != nil { + curLog.WithField("source PV", csiExposeParam.SourcePVName).WithError(err).Warn("Failed to get attached node for source PV, ignore intolerable nodes") + } else { + intoleratableNodes = n + } + } } backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, backupPVCStorageClass, csiExposeParam.AccessMode, volumeSize, backupPVCReadOnly, backupPVCAnnotations) @@ -236,6 +252,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O spcNoRelabeling, csiExposeParam.NodeOS, csiExposeParam.PriorityClassName, + intoleratableNodes, ) if err != nil { return errors.Wrap(err, "error to create backup pod") @@ -564,6 +581,7 @@ func (e *csiSnapshotExposer) createBackupPod( spcNoRelabeling bool, nodeOS string, priorityClassName string, + intoleratableNodes []string, ) (*corev1api.Pod, error) { podName := ownerObject.Name @@ -664,6 +682,18 @@ func (e *csiSnapshotExposer) createBackupPod( } var podAffinity *corev1api.Affinity + if len(intoleratableNodes) > 0 { + if affinity == nil { + affinity = &kube.LoadAffinity{} + } + + affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{ + Key: "kubernetes.io/hostname", + Values: intoleratableNodes, + Operator: metav1.LabelSelectorOpNotIn, + }) + } + if affinity != nil { podAffinity = kube.ToSystemAffinity([]*kube.LoadAffinity{affinity}) } diff --git a/pkg/exposer/csi_snapshot_priority_test.go b/pkg/exposer/csi_snapshot_priority_test.go index 236d15acb..345d5b327 100644 --- a/pkg/exposer/csi_snapshot_priority_test.go +++ b/pkg/exposer/csi_snapshot_priority_test.go @@ -153,6 +153,7 @@ func TestCreateBackupPodWithPriorityClass(t *testing.T) { false, // spcNoRelabeling kube.NodeOSLinux, tc.expectedPriorityClass, + nil, ) require.NoError(t, err, tc.description) @@ -237,6 +238,7 @@ func TestCreateBackupPodWithMissingConfigMap(t *testing.T) { false, // spcNoRelabeling kube.NodeOSLinux, "", // empty priority class since config map is missing + nil, ) // Should succeed even when config map is missing diff --git a/pkg/exposer/csi_snapshot_test.go b/pkg/exposer/csi_snapshot_test.go index 9bb4b95b8..95e5789e7 100644 --- a/pkg/exposer/csi_snapshot_test.go +++ b/pkg/exposer/csi_snapshot_test.go @@ -39,8 +39,11 @@ import ( velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" velerotypes "github.com/vmware-tanzu/velero/pkg/types" + "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/kube" + + storagev1api "k8s.io/api/storage/v1" ) type reactor struct { @@ -156,6 +159,31 @@ func TestExpose(t *testing.T) { }, } + pvName := "pv-1" + volumeAttachement1 := &storagev1api.VolumeAttachment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "va1", + }, + Spec: storagev1api.VolumeAttachmentSpec{ + Source: storagev1api.VolumeAttachmentSource{ + PersistentVolumeName: &pvName, + }, + NodeName: "node-1", + }, + } + + volumeAttachement2 := &storagev1api.VolumeAttachment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "va2", + }, + Spec: storagev1api.VolumeAttachmentSpec{ + Source: storagev1api.VolumeAttachmentSource{ + PersistentVolumeName: &pvName, + }, + NodeName: "node-2", + }, + } + tests := []struct { name string snapshotClientObj []runtime.Object @@ -624,6 +652,114 @@ func TestExpose(t *testing.T) { expectedBackupPVCStorageClass: "fake-sc-read-only", expectedAffinity: nil, }, + { + name: "IntolerateSourceNode, get source node fail", + ownerBackup: backup, + exposeParam: CSISnapshotExposeParam{ + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + SourcePVName: pvName, + StorageClass: "fake-sc", + AccessMode: AccessModeFileSystem, + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, + BackupPVCConfig: map[string]velerotypes.BackupPVC{ + "fake-sc": { + Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"}, + }, + }, + Affinity: nil, + }, + snapshotClientObj: []runtime.Object{ + vsObject, + vscObj, + }, + kubeClientObj: []runtime.Object{ + daemonSet, + }, + kubeReactors: []reactor{ + { + verb: "list", + resource: "volumeattachments", + reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("fake-create-error") + }, + }, + }, + expectedAffinity: nil, + }, + { + name: "IntolerateSourceNode, get empty source node", + ownerBackup: backup, + exposeParam: CSISnapshotExposeParam{ + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + SourcePVName: pvName, + StorageClass: "fake-sc", + AccessMode: AccessModeFileSystem, + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, + BackupPVCConfig: map[string]velerotypes.BackupPVC{ + "fake-sc": { + Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"}, + }, + }, + Affinity: nil, + }, + snapshotClientObj: []runtime.Object{ + vsObject, + vscObj, + }, + kubeClientObj: []runtime.Object{ + daemonSet, + }, + expectedAffinity: nil, + }, + { + name: "IntolerateSourceNode, get source nodes", + ownerBackup: backup, + exposeParam: CSISnapshotExposeParam{ + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + SourcePVName: pvName, + StorageClass: "fake-sc", + AccessMode: AccessModeFileSystem, + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, + BackupPVCConfig: map[string]velerotypes.BackupPVC{ + "fake-sc": { + Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"}, + }, + }, + Affinity: nil, + }, + snapshotClientObj: []runtime.Object{ + vsObject, + vscObj, + }, + kubeClientObj: []runtime.Object{ + daemonSet, + volumeAttachement1, + volumeAttachement2, + }, + expectedAffinity: &corev1api.Affinity{ + NodeAffinity: &corev1api.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{ + NodeSelectorTerms: []corev1api.NodeSelectorTerm{ + { + MatchExpressions: []corev1api.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: corev1api.NodeSelectorOpNotIn, + Values: []string{"node-1", "node-2"}, + }, + }, + }, + }, + }, + }, + }, + }, } for _, test := range tests { diff --git a/pkg/util/kube/pvc_pv.go b/pkg/util/kube/pvc_pv.go index 634d79127..e18d33c77 100644 --- a/pkg/util/kube/pvc_pv.go +++ b/pkg/util/kube/pvc_pv.go @@ -554,3 +554,19 @@ func GetPVAttachedNode(ctx context.Context, pv string, storageClient storagev1.S return "", nil } + +func GetPVAttachedNodes(ctx context.Context, pv string, storageClient storagev1.StorageV1Interface) ([]string, error) { + vaList, err := storageClient.VolumeAttachments().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "error listing volumeattachment") + } + + nodes := []string{} + for _, va := range vaList.Items { + if va.Spec.Source.PersistentVolumeName != nil && *va.Spec.Source.PersistentVolumeName == pv { + nodes = append(nodes, va.Spec.NodeName) + } + } + + return nodes, nil +} diff --git a/pkg/util/third_party.go b/pkg/util/third_party.go index e85dc4a24..400c7a898 100644 --- a/pkg/util/third_party.go +++ b/pkg/util/third_party.go @@ -28,3 +28,7 @@ var ThirdPartyTolerations = []string{ "kubernetes.azure.com/scalesetpriority", "CriticalAddonsOnly", } + +const ( + VSphereCNSFastCloneAnno = "csi.vsphere.volume/fast-provisioning" +) From 25a7ef0e875ecca3d4a1ca86606b508f47f14580 Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Thu, 11 Sep 2025 14:49:41 +0800 Subject: [PATCH 2/6] backupPVC to different node Signed-off-by: Lyndon-Li --- changelogs/unreleased/9233-Lyndon-Li | 2 +- pkg/exposer/csi_snapshot.go | 5 +++-- pkg/exposer/csi_snapshot_test.go | 14 ++++++++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/changelogs/unreleased/9233-Lyndon-Li b/changelogs/unreleased/9233-Lyndon-Li index 492765e63..f6dd7213a 100644 --- a/changelogs/unreleased/9233-Lyndon-Li +++ b/changelogs/unreleased/9233-Lyndon-Li @@ -1 +1 @@ -Fix issue #9229, add intolerateSourceNode backupPVC option \ No newline at end of file +Fix issue #9229, don't attach backupPVC to the source node \ No newline at end of file diff --git a/pkg/exposer/csi_snapshot.go b/pkg/exposer/csi_snapshot.go index 50b7c976f..781739ff5 100644 --- a/pkg/exposer/csi_snapshot.go +++ b/pkg/exposer/csi_snapshot.go @@ -52,7 +52,7 @@ type CSISnapshotExposeParam struct { // SourcePVCName is the original name of the PVC that the snapshot is taken for SourcePVCName string - // SourcePVCName is the name of PV for SourcePVC + // SourcePVName is the name of PV for SourcePVC SourcePVName string // AccessMode defines the mode to access the snapshot @@ -217,7 +217,8 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O if _, found := backupPVCAnnotations[util.VSphereCNSFastCloneAnno]; found { if n, err := kube.GetPVAttachedNodes(ctx, csiExposeParam.SourcePVName, e.kubeClient.StorageV1()); err != nil { - curLog.WithField("source PV", csiExposeParam.SourcePVName).WithError(err).Warn("Failed to get attached node for source PV, ignore intolerable nodes") + curLog.WithField("source PV", csiExposeParam.SourcePVName).WithError(err).Warnf("Failed to get attached node for source PV, ignore %s annotation", util.VSphereCNSFastCloneAnno) + delete(backupPVCAnnotations, util.VSphereCNSFastCloneAnno) } else { intoleratableNodes = n } diff --git a/pkg/exposer/csi_snapshot_test.go b/pkg/exposer/csi_snapshot_test.go index 95e5789e7..7e8e6d883 100644 --- a/pkg/exposer/csi_snapshot_test.go +++ b/pkg/exposer/csi_snapshot_test.go @@ -197,6 +197,7 @@ func TestExpose(t *testing.T) { expectedReadOnlyPVC bool expectedBackupPVCStorageClass string expectedAffinity *corev1api.Affinity + expectedPVCAnnotation map[string]string }{ { name: "wait vs ready fail", @@ -686,7 +687,8 @@ func TestExpose(t *testing.T) { }, }, }, - expectedAffinity: nil, + expectedAffinity: nil, + expectedPVCAnnotation: nil, }, { name: "IntolerateSourceNode, get empty source node", @@ -713,7 +715,8 @@ func TestExpose(t *testing.T) { kubeClientObj: []runtime.Object{ daemonSet, }, - expectedAffinity: nil, + expectedAffinity: nil, + expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"}, }, { name: "IntolerateSourceNode, get source nodes", @@ -759,6 +762,7 @@ func TestExpose(t *testing.T) { }, }, }, + expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"}, }, } @@ -841,6 +845,12 @@ func TestExpose(t *testing.T) { if test.expectedAffinity != nil { assert.Equal(t, test.expectedAffinity, backupPod.Spec.Affinity) } + + if test.expectedPVCAnnotation != nil { + assert.Equal(t, test.expectedPVCAnnotation, backupPVC.Annotations) + } else { + assert.Empty(t, backupPVC.Annotations) + } } else { assert.EqualError(t, err, test.err) } From 4ade8cf8a2ebcf80cab841e39756042930a0ceea Mon Sep 17 00:00:00 2001 From: Scott Seago Date: Mon, 22 Sep 2025 11:50:05 -0400 Subject: [PATCH 3/6] Add option for privileged fs-backup pod Signed-off-by: Scott Seago --- changelogs/unreleased/9295-sseago | 1 + pkg/cmd/cli/nodeagent/server.go | 6 +++-- .../pod_volume_backup_controller.go | 5 +++- .../pod_volume_backup_controller_test.go | 3 ++- .../pod_volume_restore_controller.go | 5 +++- .../pod_volume_restore_controller_test.go | 2 +- pkg/exposer/pod_volume.go | 12 ++++++++-- pkg/exposer/pod_volume_test.go | 23 +++++++++++++++++++ pkg/types/node_agent.go | 3 +++ .../docs/main/customize-installation.md | 2 ++ 10 files changed, 54 insertions(+), 8 deletions(-) create mode 100644 changelogs/unreleased/9295-sseago diff --git a/changelogs/unreleased/9295-sseago b/changelogs/unreleased/9295-sseago new file mode 100644 index 000000000..92f44c7ba --- /dev/null +++ b/changelogs/unreleased/9295-sseago @@ -0,0 +1 @@ +Add option for privileged fs-backup pod diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 873e03beb..d3563c0f5 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -308,6 +308,8 @@ func (s *nodeAgentServer) run() { s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig) } + privilegedFsBackup := s.dataPathConfigs != nil && s.dataPathConfigs.PrivilegedFsBackup + podResources := corev1api.ResourceRequirements{} if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil { if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil { @@ -327,12 +329,12 @@ func (s *nodeAgentServer) run() { } } - pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass) + pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup) if err := pvbReconciler.SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup) } - pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass) + pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup) if err := pvrReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller") } diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index 3a446379f..625ec8337 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -60,7 +60,7 @@ const ( // NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager, counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements, - metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeBackupReconciler { + metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeBackupReconciler { return &PodVolumeBackupReconciler{ client: client, mgr: mgr, @@ -77,6 +77,7 @@ func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kub exposer: exposer.NewPodVolumeExposer(kubeClient, logger), cancelledPVB: make(map[string]time.Time), dataMovePriorityClass: dataMovePriorityClass, + privileged: privileged, } } @@ -97,6 +98,7 @@ type PodVolumeBackupReconciler struct { resourceTimeout time.Duration cancelledPVB map[string]time.Time dataMovePriorityClass string + privileged bool } // +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete @@ -837,6 +839,7 @@ func (r *PodVolumeBackupReconciler) setupExposeParam(pvb *velerov1api.PodVolumeB Resources: r.podResources, // Priority class name for the data mover pod, retrieved from node-agent-configmap PriorityClassName: r.dataMovePriorityClass, + Privileged: r.privileged, } } diff --git a/pkg/controller/pod_volume_backup_controller_test.go b/pkg/controller/pod_volume_backup_controller_test.go index 51e75edb2..a76b32b58 100644 --- a/pkg/controller/pod_volume_backup_controller_test.go +++ b/pkg/controller/pod_volume_backup_controller_test.go @@ -151,7 +151,8 @@ func initPVBReconcilerWithError(needError ...error) (*PodVolumeBackupReconciler, corev1api.ResourceRequirements{}, metrics.NewServerMetrics(), velerotest.NewLogger(), - "", // dataMovePriorityClass + "", // dataMovePriorityClass + false, // privileged ), nil } diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index ce0d312a0..0ed06b980 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -56,7 +56,7 @@ import ( func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager, counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements, - logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeRestoreReconciler { + logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeRestoreReconciler { return &PodVolumeRestoreReconciler{ client: client, mgr: mgr, @@ -72,6 +72,7 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku exposer: exposer.NewPodVolumeExposer(kubeClient, logger), cancelledPVR: make(map[string]time.Time), dataMovePriorityClass: dataMovePriorityClass, + privileged: privileged, } } @@ -90,6 +91,7 @@ type PodVolumeRestoreReconciler struct { resourceTimeout time.Duration cancelledPVR map[string]time.Time dataMovePriorityClass string + privileged bool } // +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete @@ -896,6 +898,7 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume Resources: r.podResources, // Priority class name for the data mover pod, retrieved from node-agent-configmap PriorityClassName: r.dataMovePriorityClass, + Privileged: r.privileged, } } diff --git a/pkg/controller/pod_volume_restore_controller_test.go b/pkg/controller/pod_volume_restore_controller_test.go index 409672c32..e993815b5 100644 --- a/pkg/controller/pod_volume_restore_controller_test.go +++ b/pkg/controller/pod_volume_restore_controller_test.go @@ -617,7 +617,7 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj [] dataPathMgr := datapath.NewManager(1) - return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), ""), nil + return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false), nil } func TestPodVolumeRestoreReconcile(t *testing.T) { diff --git a/pkg/exposer/pod_volume.go b/pkg/exposer/pod_volume.go index ea1fb2d1f..6747ceeed 100644 --- a/pkg/exposer/pod_volume.go +++ b/pkg/exposer/pod_volume.go @@ -73,6 +73,9 @@ type PodVolumeExposeParam struct { // PriorityClassName is the priority class name for the data mover pod PriorityClassName string + + // Privileged indicates whether to create the pod with a privileged container + Privileged bool } // PodVolumeExposer is the interfaces for a pod volume exposer @@ -153,7 +156,7 @@ func (e *podVolumeExposer) Expose(ctx context.Context, ownerObject corev1api.Obj curLog.WithField("path", path).Infof("Host path is retrieved for pod %s, volume %s", param.ClientPodName, param.ClientPodVolume) - hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName) + hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged) if err != nil { return errors.Wrapf(err, "error to create hosting pod") } @@ -269,7 +272,7 @@ func (e *podVolumeExposer) CleanUp(ctx context.Context, ownerObject corev1api.Ob } func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject corev1api.ObjectReference, exposeType string, hostPath string, - operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string) (*corev1api.Pod, error) { + operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool) (*corev1api.Pod, error) { hostingPodName := ownerObject.Name containerName := string(ownerObject.UID) @@ -327,6 +330,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor args = append(args, podInfo.logLevelArgs...) var securityCtx *corev1api.PodSecurityContext + var containerSecurityCtx *corev1api.SecurityContext nodeSelector := map[string]string{} podOS := corev1api.PodOS{} if nodeOS == kube.NodeOSWindows { @@ -359,6 +363,9 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor securityCtx = &corev1api.PodSecurityContext{ RunAsUser: &userID, } + containerSecurityCtx = &corev1api.SecurityContext{ + Privileged: &privileged, + } nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux podOS.Name = kube.NodeOSLinux @@ -394,6 +401,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor Env: podInfo.env, EnvFrom: podInfo.envFrom, Resources: resources, + SecurityContext: containerSecurityCtx, }, }, PriorityClassName: priorityClassName, diff --git a/pkg/exposer/pod_volume_test.go b/pkg/exposer/pod_volume_test.go index f36fda4f4..3172733f4 100644 --- a/pkg/exposer/pod_volume_test.go +++ b/pkg/exposer/pod_volume_test.go @@ -190,6 +190,29 @@ func TestPodVolumeExpose(t *testing.T) { return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil }, }, + { + name: "succeed with privileged pod", + ownerBackup: backup, + exposeParam: PodVolumeExposeParam{ + ClientNamespace: "fake-ns", + ClientPodName: "fake-client-pod", + ClientPodVolume: "fake-client-volume", + Privileged: true, + }, + kubeClientObj: []runtime.Object{ + podWithNode, + node, + daemonSet, + }, + funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) { + return datapath.AccessPoint{ + ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", + }, nil + }, + funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) { + return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil + }, + }, } for _, test := range tests { diff --git a/pkg/types/node_agent.go b/pkg/types/node_agent.go index 778aefcf1..b335df275 100644 --- a/pkg/types/node_agent.go +++ b/pkg/types/node_agent.go @@ -84,4 +84,7 @@ type NodeAgentConfigs struct { // PriorityClassName is the priority class name for data mover pods created by the node agent PriorityClassName string `json:"priorityClassName,omitempty"` + + // PrivilegedFsBackup determines whether to create fs-backup pods as privileged pods + PrivilegedFsBackup bool `json:"privilegedFsBackup,omitempty"` } diff --git a/site/content/docs/main/customize-installation.md b/site/content/docs/main/customize-installation.md index 2ac23e5cd..d62945a35 100644 --- a/site/content/docs/main/customize-installation.md +++ b/site/content/docs/main/customize-installation.md @@ -23,6 +23,8 @@ By default, `velero install` does not install Velero's [File System Backup][3]. If you've already run `velero install` without the `--use-node-agent` flag, you can run the same command again, including the `--use-node-agent` flag, to add the file system backup to your existing install. +Note that for some use cases (including installation on OpenShift clusters) the fs-backup pods must run in a Privileged security context. This is configured through the node-agent configmap (see below) by setting `privilegedFsBackup` to `true` in the configmap. + ## CSI Snapshot Data Movement Velero node-agent is required by [CSI Snapshot Data Movement][12] when Velero built-in data mover is used. By default, `velero install` does not install Velero's node-agent. To enable it, specify the `--use-node-agent` flag. From cabb04575e5c5055e83b3ff87d2ec079d56d6933 Mon Sep 17 00:00:00 2001 From: Xun Jiang Date: Sat, 27 Sep 2025 23:33:37 +0800 Subject: [PATCH 4/6] Fix the push action invalid variable ref issue. Signed-off-by: Xun Jiang --- .github/workflows/push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 8e1bc1219..a9f19b09f 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -12,7 +12,7 @@ jobs: get-go-version: uses: ./.github/workflows/get-go-version.yaml with: - ref: ${ github.ref } + ref: ${{ github.ref }} build: name: Build From f8938e7feddfd9810f3d14c44e55b34b4020cab4 Mon Sep 17 00:00:00 2001 From: Xun Jiang/Bruce Jiang <59276555+blackpiglet@users.noreply.github.com> Date: Tue, 30 Sep 2025 03:08:05 +0800 Subject: [PATCH 5/6] VerifyJSONConfigs verify every elements in Data. (#9302) Add error message in the velero install CLI output if VerifyJSONConfigs fail. Only allow one element in node-agent-configmap's Data. Signed-off-by: Xun Jiang --- changelogs/unreleased/9302-blackpiglet | 1 + pkg/cmd/cli/install/install.go | 10 ++++------ pkg/nodeagent/node_agent.go | 4 ++++ pkg/nodeagent/node_agent_test.go | 9 +++++++++ pkg/util/kube/utils.go | 11 ++++++----- 5 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 changelogs/unreleased/9302-blackpiglet diff --git a/changelogs/unreleased/9302-blackpiglet b/changelogs/unreleased/9302-blackpiglet new file mode 100644 index 000000000..63576a535 --- /dev/null +++ b/changelogs/unreleased/9302-blackpiglet @@ -0,0 +1 @@ +VerifyJSONConfigs verify every elements in Data. diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index c7a7dfe7a..6698010bd 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -545,24 +545,22 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er return fmt.Errorf("fail to create go-client %w", err) } - // If either Linux or Windows node-agent is installed, and the node-agent-configmap - // is specified, need to validate the ConfigMap. - if (o.UseNodeAgent || o.UseNodeAgentWindows) && len(o.NodeAgentConfigMap) > 0 { + if len(o.NodeAgentConfigMap) > 0 { if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.NodeAgentConfigMap, &velerotypes.NodeAgentConfigs{}); err != nil { - return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid", o.NodeAgentConfigMap) + return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid: %w", o.NodeAgentConfigMap, err) } } if len(o.RepoMaintenanceJobConfigMap) > 0 { if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.RepoMaintenanceJobConfigMap, &velerotypes.JobConfigs{}); err != nil { - return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid", o.RepoMaintenanceJobConfigMap) + return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid: %w", o.RepoMaintenanceJobConfigMap, err) } } if len(o.BackupRepoConfigMap) > 0 { config := make(map[string]any) if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.BackupRepoConfigMap, &config); err != nil { - return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid", o.BackupRepoConfigMap) + return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid: %w", o.BackupRepoConfigMap, err) } } diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go index 7268b589a..a5de2465c 100644 --- a/pkg/nodeagent/node_agent.go +++ b/pkg/nodeagent/node_agent.go @@ -143,6 +143,10 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int return nil, errors.Errorf("data is not available in config map %s", configName) } + if len(cm.Data) > 1 { + return nil, errors.Errorf("more than one keys are found in ConfigMap %s's data. only expect one", configName) + } + jsonString := "" for _, v := range cm.Data { jsonString = v diff --git a/pkg/nodeagent/node_agent_test.go b/pkg/nodeagent/node_agent_test.go index bdc1085b4..cb46ee569 100644 --- a/pkg/nodeagent/node_agent_test.go +++ b/pkg/nodeagent/node_agent_test.go @@ -249,6 +249,7 @@ func TestGetConfigs(t *testing.T) { cmWithValidData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"loadConcurrency\":{\"globalConfig\": 5}}").Result() cmWithPriorityClass := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"high-priority\"}").Result() cmWithPriorityClassAndOther := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"low-priority\", \"loadConcurrency\":{\"globalConfig\": 3}}").Result() + cmWithMultipleKeysInData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key-1", "{}", "fake-key-2", "{}").Result() tests := []struct { name string @@ -331,6 +332,14 @@ func TestGetConfigs(t *testing.T) { }, }, }, + { + name: "ConfigMap's Data has more than one key", + namespace: "fake-ns", + kubeClientObj: []runtime.Object{ + cmWithMultipleKeysInData, + }, + expectErr: "more than one keys are found in ConfigMap node-agent-config's data. only expect one", + }, } for _, test := range tests { diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 002070376..5e5e97603 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -371,15 +371,16 @@ func VerifyJSONConfigs(ctx context.Context, namespace string, crClient client.Cl return errors.Errorf("data is not available in ConfigMap %s", configName) } + // Verify all the keys in ConfigMap's data. jsonString := "" for _, v := range cm.Data { jsonString = v - } - configs := configType - err = json.Unmarshal([]byte(jsonString), configs) - if err != nil { - return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName) + configs := configType + err = json.Unmarshal([]byte(jsonString), configs) + if err != nil { + return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName) + } } return nil From e6aab8ca93ba1cd2ff6ed85c65248806d570fe7c Mon Sep 17 00:00:00 2001 From: lyndon-li <98304688+Lyndon-Li@users.noreply.github.com> Date: Fri, 10 Oct 2025 02:13:43 +0800 Subject: [PATCH 6/6] add events to diagnose (#9296) Signed-off-by: Lyndon-Li --- changelogs/unreleased/9296-Lyndon-Li | 1 + pkg/exposer/csi_snapshot.go | 11 ++- pkg/exposer/csi_snapshot_test.go | 75 +++++++++++++++++++ pkg/exposer/generic_restore.go | 9 ++- pkg/exposer/generic_restore_test.go | 58 +++++++++++++++ pkg/exposer/pod_volume.go | 7 +- pkg/exposer/pod_volume_test.go | 44 +++++++++++ pkg/util/csi/volume_snapshot.go | 10 ++- pkg/util/csi/volume_snapshot_test.go | 73 ++++++++++++++++++- pkg/util/kube/pod.go | 10 ++- pkg/util/kube/pod_test.go | 105 ++++++++++++++++++++++++++- pkg/util/kube/pvc_pv.go | 14 +++- pkg/util/kube/pvc_pv_test.go | 77 +++++++++++++++++++- 13 files changed, 479 insertions(+), 15 deletions(-) create mode 100644 changelogs/unreleased/9296-Lyndon-Li diff --git a/changelogs/unreleased/9296-Lyndon-Li b/changelogs/unreleased/9296-Lyndon-Li new file mode 100644 index 000000000..696943ede --- /dev/null +++ b/changelogs/unreleased/9296-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #9267, add events to data mover prepare diagnostic \ No newline at end of file diff --git a/pkg/exposer/csi_snapshot.go b/pkg/exposer/csi_snapshot.go index 781739ff5..d20638d7a 100644 --- a/pkg/exposer/csi_snapshot.go +++ b/pkg/exposer/csi_snapshot.go @@ -381,8 +381,13 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err) } + events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + diag += fmt.Sprintf("error listing events, err: %v\n", err) + } + if pod != nil { - diag += kube.DiagnosePod(pod) + diag += kube.DiagnosePod(pod, events) if pod.Spec.NodeName != "" { if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil { @@ -392,7 +397,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor } if pvc != nil { - diag += kube.DiagnosePVC(pvc) + diag += kube.DiagnosePVC(pvc, events) if pvc.Spec.VolumeName != "" { if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil { @@ -404,7 +409,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor } if vs != nil { - diag += csi.DiagnoseVS(vs) + diag += csi.DiagnoseVS(vs, events) if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" { if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil { diff --git a/pkg/exposer/csi_snapshot_test.go b/pkg/exposer/csi_snapshot_test.go index 7e8e6d883..d419b6126 100644 --- a/pkg/exposer/csi_snapshot_test.go +++ b/pkg/exposer/csi_snapshot_test.go @@ -1288,6 +1288,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-pod-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -1313,6 +1314,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-pod-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -1341,6 +1343,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-pvc-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -1359,6 +1362,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-pvc-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -1404,6 +1408,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-vs-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -1419,6 +1424,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-vs-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -1436,6 +1442,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-vs-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -1633,6 +1640,74 @@ PVC velero/fake-backup, phase Pending, binding to fake-pv PV fake-pv, phase Pending, reason , message fake-pv-message VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle +end diagnose CSI exposer`, + }, + { + name: "with events", + ownerBackup: backup, + kubeClientObj: []runtime.Object{ + &backupPodWithNodeName, + &backupPVCWithVolumeName, + &backupPV, + &nodeAgentPod, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"}, + Reason: "reason-1", + Message: "message-1", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-2", + Message: "message-2", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"}, + Reason: "reason-3", + Message: "message-3", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"}, + Reason: "reason-4", + Message: "message-4", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-5"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-5", + Message: "message-5", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-6"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-6", + Message: "message-6", + }, + }, + snapshotClientObj: []runtime.Object{ + &backupVSWithVSC, + &backupVSC, + }, + expected: `begin diagnose CSI exposer +Pod velero/fake-backup, phase Pending, node name fake-node +Pod condition Initialized, status True, reason , message fake-pod-message +Pod event reason reason-2, message message-2 +Pod event reason reason-6, message message-6 +PVC velero/fake-backup, phase Pending, binding to fake-pv +PVC event reason reason-3, message message-3 +PV fake-pv, phase Pending, reason , message fake-pv-message +VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message +VS event reason reason-4, message message-4 +VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle end diagnose CSI exposer`, }, } diff --git a/pkg/exposer/generic_restore.go b/pkg/exposer/generic_restore.go index 26019d5d4..8691eedfc 100644 --- a/pkg/exposer/generic_restore.go +++ b/pkg/exposer/generic_restore.go @@ -287,8 +287,13 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err) } + events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + diag += fmt.Sprintf("error listing events, err: %v\n", err) + } + if pod != nil { - diag += kube.DiagnosePod(pod) + diag += kube.DiagnosePod(pod, events) if pod.Spec.NodeName != "" { if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil { @@ -298,7 +303,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject } if pvc != nil { - diag += kube.DiagnosePVC(pvc) + diag += kube.DiagnosePVC(pvc, events) if pvc.Spec.VolumeName != "" { if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil { diff --git a/pkg/exposer/generic_restore_test.go b/pkg/exposer/generic_restore_test.go index b5679889b..2e528d6a2 100644 --- a/pkg/exposer/generic_restore_test.go +++ b/pkg/exposer/generic_restore_test.go @@ -549,6 +549,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-restore", + UID: "fake-pod-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: restore.APIVersion, @@ -574,6 +575,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-restore", + UID: "fake-pod-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: restore.APIVersion, @@ -602,6 +604,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-restore", + UID: "fake-pvc-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: restore.APIVersion, @@ -620,6 +623,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-restore", + UID: "fake-pvc-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: restore.APIVersion, @@ -758,6 +762,60 @@ Pod velero/fake-restore, phase Pending, node name fake-node Pod condition Initialized, status True, reason , message fake-pod-message PVC velero/fake-restore, phase Pending, binding to fake-pv PV fake-pv, phase Pending, reason , message fake-pv-message +end diagnose restore exposer`, + }, + { + name: "with events", + ownerRestore: restore, + kubeClientObj: []runtime.Object{ + &restorePodWithNodeName, + &restorePVCWithVolumeName, + &restorePV, + &nodeAgentPod, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"}, + Reason: "reason-1", + Message: "message-1", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-2", + Message: "message-2", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"}, + Reason: "reason-3", + Message: "message-3", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-4"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-4", + Message: "message-4", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-5"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-5", + Message: "message-5", + }, + }, + expected: `begin diagnose restore exposer +Pod velero/fake-restore, phase Pending, node name fake-node +Pod condition Initialized, status True, reason , message fake-pod-message +Pod event reason reason-2, message message-2 +Pod event reason reason-5, message message-5 +PVC velero/fake-restore, phase Pending, binding to fake-pv +PVC event reason reason-3, message message-3 +PV fake-pv, phase Pending, reason , message fake-pv-message end diagnose restore exposer`, }, } diff --git a/pkg/exposer/pod_volume.go b/pkg/exposer/pod_volume.go index 6747ceeed..591600eb3 100644 --- a/pkg/exposer/pod_volume.go +++ b/pkg/exposer/pod_volume.go @@ -251,8 +251,13 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err) } + events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + diag += fmt.Sprintf("error listing events, err: %v\n", err) + } + if pod != nil { - diag += kube.DiagnosePod(pod) + diag += kube.DiagnosePod(pod, events) if pod.Spec.NodeName != "" { if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil { diff --git a/pkg/exposer/pod_volume_test.go b/pkg/exposer/pod_volume_test.go index 3172733f4..f48e9376b 100644 --- a/pkg/exposer/pod_volume_test.go +++ b/pkg/exposer/pod_volume_test.go @@ -466,6 +466,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-pod-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -491,6 +492,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1.DefaultNamespace, Name: "fake-backup", + UID: "fake-pod-uid", OwnerReferences: []metav1.OwnerReference{ { APIVersion: backup.APIVersion, @@ -587,6 +589,48 @@ end diagnose pod volume exposer`, expected: `begin diagnose pod volume exposer Pod velero/fake-backup, phase Pending, node name fake-node Pod condition Initialized, status True, reason , message fake-pod-message +end diagnose pod volume exposer`, + }, + { + name: "with events", + ownerBackup: backup, + kubeClientObj: []runtime.Object{ + &backupPodWithNodeName, + &nodeAgentPod, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"}, + Reason: "reason-1", + Message: "message-1", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-2", + Message: "message-2", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-3"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-3", + Message: "message-3", + }, + &corev1api.Event{ + ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"}, + Type: corev1api.EventTypeWarning, + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Reason: "reason-4", + Message: "message-4", + }, + }, + expected: `begin diagnose pod volume exposer +Pod velero/fake-backup, phase Pending, node name fake-node +Pod condition Initialized, status True, reason , message fake-pod-message +Pod event reason reason-2, message message-2 +Pod event reason reason-4, message message-4 end diagnose pod volume exposer`, }, } diff --git a/pkg/util/csi/volume_snapshot.go b/pkg/util/csi/volume_snapshot.go index 8e59dd69f..57e6f2e1d 100644 --- a/pkg/util/csi/volume_snapshot.go +++ b/pkg/util/csi/volume_snapshot.go @@ -689,7 +689,7 @@ func WaitUntilVSCHandleIsReady( return vsc, nil } -func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string { +func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot, events *corev1api.EventList) string { vscName := "" readyToUse := false errMessage := "" @@ -710,6 +710,14 @@ func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string { diag := fmt.Sprintf("VS %s/%s, bind to %s, readyToUse %v, errMessage %s\n", vs.Namespace, vs.Name, vscName, readyToUse, errMessage) + if events != nil { + for _, e := range events.Items { + if e.InvolvedObject.UID == vs.UID && e.Type == corev1api.EventTypeWarning { + diag += fmt.Sprintf("VS event reason %s, message %s\n", e.Reason, e.Message) + } + } + } + return diag } diff --git a/pkg/util/csi/volume_snapshot_test.go b/pkg/util/csi/volume_snapshot_test.go index 91c9a1ea3..2f735559c 100644 --- a/pkg/util/csi/volume_snapshot_test.go +++ b/pkg/util/csi/volume_snapshot_test.go @@ -1699,6 +1699,7 @@ func TestDiagnoseVS(t *testing.T) { testCases := []struct { name string vs *snapshotv1api.VolumeSnapshot + events *corev1api.EventList expected string }{ { @@ -1781,11 +1782,81 @@ func TestDiagnoseVS(t *testing.T) { }, expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage fake-message\n", }, + { + name: "VS with VSC and empty event", + vs: &snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-vs", + Namespace: "fake-ns", + }, + Status: &snapshotv1api.VolumeSnapshotStatus{ + BoundVolumeSnapshotContentName: &vscName, + ReadyToUse: &readyToUse, + Error: &snapshotv1api.VolumeSnapshotError{}, + }, + }, + events: &corev1api.EventList{}, + expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n", + }, + { + name: "VS with VSC and events", + vs: &snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-vs", + Namespace: "fake-ns", + UID: "fake-vs-uid", + }, + Status: &snapshotv1api.VolumeSnapshotStatus{ + BoundVolumeSnapshotContentName: &vscName, + ReadyToUse: &readyToUse, + Error: &snapshotv1api.VolumeSnapshotError{}, + }, + }, + events: &corev1api.EventList{Items: []corev1api.Event{ + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-1", + Message: "message-1", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-2", + Message: "message-2", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-3", + Message: "message-3", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"}, + Type: corev1api.EventTypeNormal, + Reason: "reason-4", + Message: "message-4", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"}, + Type: corev1api.EventTypeNormal, + Reason: "reason-5", + Message: "message-5", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-6", + Message: "message-6", + }, + }}, + expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \nVS event reason reason-3, message message-3\nVS event reason reason-6, message message-6\n", + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - diag := DiagnoseVS(tc.vs) + diag := DiagnoseVS(tc.vs, tc.events) assert.Equal(t, tc.expected, diag) }) } diff --git a/pkg/util/kube/pod.go b/pkg/util/kube/pod.go index 6b9cf7d58..86aa2e47b 100644 --- a/pkg/util/kube/pod.go +++ b/pkg/util/kube/pod.go @@ -268,13 +268,21 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity { return nil } -func DiagnosePod(pod *corev1api.Pod) string { +func DiagnosePod(pod *corev1api.Pod, events *corev1api.EventList) string { diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName) for _, condition := range pod.Status.Conditions { diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message) } + if events != nil { + for _, e := range events.Items { + if e.InvolvedObject.UID == pod.UID && e.Type == corev1api.EventTypeWarning { + diag += fmt.Sprintf("Pod event reason %s, message %s\n", e.Reason, e.Message) + } + } + } + return diag } diff --git a/pkg/util/kube/pod_test.go b/pkg/util/kube/pod_test.go index f01d5ab35..ba930019e 100644 --- a/pkg/util/kube/pod_test.go +++ b/pkg/util/kube/pod_test.go @@ -896,10 +896,11 @@ func TestDiagnosePod(t *testing.T) { testCases := []struct { name string pod *corev1api.Pod + events *corev1api.EventList expected string }{ { - name: "pod with all info", + name: "pod with all info but event", pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "fake-pod", @@ -928,11 +929,111 @@ func TestDiagnosePod(t *testing.T) { }, expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n", }, + { + name: "pod with all info and empty event list", + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pod", + Namespace: "fake-ns", + }, + Spec: corev1api.PodSpec{ + NodeName: "fake-node", + }, + Status: corev1api.PodStatus{ + Phase: corev1api.PodPending, + Conditions: []corev1api.PodCondition{ + { + Type: corev1api.PodInitialized, + Status: corev1api.ConditionTrue, + Reason: "fake-reason-1", + Message: "fake-message-1", + }, + { + Type: corev1api.PodScheduled, + Status: corev1api.ConditionFalse, + Reason: "fake-reason-2", + Message: "fake-message-2", + }, + }, + }, + }, + events: &corev1api.EventList{}, + expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n", + }, + { + name: "pod with all info and events", + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pod", + Namespace: "fake-ns", + UID: "fake-pod-uid", + }, + Spec: corev1api.PodSpec{ + NodeName: "fake-node", + }, + Status: corev1api.PodStatus{ + Phase: corev1api.PodPending, + Conditions: []corev1api.PodCondition{ + { + Type: corev1api.PodInitialized, + Status: corev1api.ConditionTrue, + Reason: "fake-reason-1", + Message: "fake-message-1", + }, + { + Type: corev1api.PodScheduled, + Status: corev1api.ConditionFalse, + Reason: "fake-reason-2", + Message: "fake-message-2", + }, + }, + }, + }, + events: &corev1api.EventList{Items: []corev1api.Event{ + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-1", + Message: "message-1", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-2", + Message: "message-2", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-3", + Message: "message-3", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Type: corev1api.EventTypeNormal, + Reason: "reason-4", + Message: "message-4", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Type: corev1api.EventTypeNormal, + Reason: "reason-5", + Message: "message-5", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-6", + Message: "message-6", + }, + }}, + expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\nPod event reason reason-3, message message-3\nPod event reason reason-6, message message-6\n", + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - diag := DiagnosePod(tc.pod) + diag := DiagnosePod(tc.pod, tc.events) assert.Equal(t, tc.expected, diag) }) } diff --git a/pkg/util/kube/pvc_pv.go b/pkg/util/kube/pvc_pv.go index e18d33c77..786cef2a5 100644 --- a/pkg/util/kube/pvc_pv.go +++ b/pkg/util/kube/pvc_pv.go @@ -463,8 +463,18 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl return pvc, nil } -func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string { - return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName) +func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim, events *corev1api.EventList) string { + diag := fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName) + + if events != nil { + for _, e := range events.Items { + if e.InvolvedObject.UID == pvc.UID && e.Type == corev1api.EventTypeWarning { + diag += fmt.Sprintf("PVC event reason %s, message %s\n", e.Reason, e.Message) + } + } + } + + return diag } func DiagnosePV(pv *corev1api.PersistentVolume) string { diff --git a/pkg/util/kube/pvc_pv_test.go b/pkg/util/kube/pvc_pv_test.go index f52cdeb98..d94efa62e 100644 --- a/pkg/util/kube/pvc_pv_test.go +++ b/pkg/util/kube/pvc_pv_test.go @@ -1593,10 +1593,11 @@ func TestDiagnosePVC(t *testing.T) { testCases := []struct { name string pvc *corev1api.PersistentVolumeClaim + events *corev1api.EventList expected string }{ { - name: "pvc with all info", + name: "pvc with all info but events", pvc: &corev1api.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "fake-pvc", @@ -1611,11 +1612,83 @@ func TestDiagnosePVC(t *testing.T) { }, expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n", }, + { + name: "pvc with all info and empty events", + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pvc", + Namespace: "fake-ns", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "fake-pv", + }, + Status: corev1api.PersistentVolumeClaimStatus{ + Phase: corev1api.ClaimPending, + }, + }, + events: &corev1api.EventList{}, + expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n", + }, + { + name: "pvc with all info and events", + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pvc", + Namespace: "fake-ns", + UID: "fake-pvc-uid", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "fake-pv", + }, + Status: corev1api.PersistentVolumeClaimStatus{ + Phase: corev1api.ClaimPending, + }, + }, + events: &corev1api.EventList{Items: []corev1api.Event{ + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-1", + Message: "message-1", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-2", + Message: "message-2", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-3", + Message: "message-3", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"}, + Type: corev1api.EventTypeNormal, + Reason: "reason-4", + Message: "message-4", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"}, + Type: corev1api.EventTypeNormal, + Reason: "reason-5", + Message: "message-5", + }, + { + InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"}, + Type: corev1api.EventTypeWarning, + Reason: "reason-6", + Message: "message-6", + }, + }}, + expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\nPVC event reason reason-3, message message-3\nPVC event reason reason-6, message message-6\n", + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - diag := DiagnosePVC(tc.pvc) + diag := DiagnosePVC(tc.pvc, tc.events) assert.Equal(t, tc.expected, diag) }) }