From dc3da29f3ee666162527f149fbcbe9dd6ba3fa61 Mon Sep 17 00:00:00 2001 From: Shubham Pampattiwar Date: Tue, 18 Nov 2025 15:40:30 -0800 Subject: [PATCH] Apply volume policies to VolumeGroupSnapshot PVC filtering VolumeGroupSnapshots were querying all PVCs with matching labels directly from the cluster without respecting volume policies. This caused errors when labeled PVCs included both CSI and non-CSI volumes, or volumes from different CSI drivers that were excluded by policies. This change filters PVCs by volume policy before VGS grouping, ensuring only PVCs that should be snapshotted are included in the group. A warning is logged when PVCs are excluded from VGS due to volume policy. Fixes #9344 Signed-off-by: Shubham Pampattiwar --- pkg/backup/actions/csi/pvc_action.go | 60 ++++- pkg/backup/actions/csi/pvc_action_test.go | 274 ++++++++++++++++++++++ 2 files changed, 332 insertions(+), 2 deletions(-) diff --git a/pkg/backup/actions/csi/pvc_action.go b/pkg/backup/actions/csi/pvc_action.go index c91d7a2b4..62c51accc 100644 --- a/pkg/backup/actions/csi/pvc_action.go +++ b/pkg/backup/actions/csi/pvc_action.go @@ -621,8 +621,30 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference( return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace) } + // Filter PVCs by volume policy + filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup) + if err != nil { + return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group) + } + + // Warn if any PVCs were filtered out + if len(filteredPVCs) < len(groupedPVCs) { + for _, originalPVC := range groupedPVCs { + found := false + for _, filteredPVC := range filteredPVCs { + if originalPVC.Name == filteredPVC.Name { + found = true + break + } + } + if !found { + p.log.Warnf("PVC %s/%s has VolumeGroupSnapshot label %s=%s but is excluded by volume policy", originalPVC.Namespace, originalPVC.Name, vgsLabelKey, group) + } + } + } + // Determine the CSI driver for the grouped PVCs - driver, err := p.determineCSIDriver(groupedPVCs) + driver, err := p.determineCSIDriver(filteredPVCs) if err != nil { return nil, errors.Wrapf(err, "failed to determine CSI driver for PVCs in VolumeGroupSnapshot group %q", group) } @@ -643,7 +665,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference( } // Wait for all the VS objects associated with the VGS to have status and VGS Name (VS readiness is checked in legacy flow) and get the PVC-to-VS map - vsMap, err := p.waitForVGSAssociatedVS(ctx, groupedPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration) + vsMap, err := p.waitForVGSAssociatedVS(ctx, filteredPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration) if err != nil { return nil, errors.Wrapf(err, "timeout waiting for VolumeSnapshots to have status created via VolumeGroupSnapshot %s", newVGS.Name) } @@ -734,6 +756,40 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la return pvcList.Items, nil } +func (p *pvcBackupItemAction) filterPVCsByVolumePolicy( + pvcs []corev1api.PersistentVolumeClaim, + backup *velerov1api.Backup, +) ([]corev1api.PersistentVolumeClaim, error) { + var filteredPVCs []corev1api.PersistentVolumeClaim + + for _, pvc := range pvcs { + // Convert PVC to unstructured for ShouldPerformSnapshotWithBackup + pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc) + if err != nil { + return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name) + } + unstructuredPVC := &unstructured.Unstructured{Object: pvcMap} + + // Check if this PVC should be snapshotted according to volume policies + shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup( + unstructuredPVC, + kuberesource.PersistentVolumeClaims, + *backup, + p.crClient, + p.log, + ) + if err != nil { + return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name) + } + + if shouldSnapshot { + filteredPVCs = append(filteredPVCs, pvc) + } + } + + return filteredPVCs, nil +} + func (p *pvcBackupItemAction) determineCSIDriver( pvcs []corev1api.PersistentVolumeClaim, ) (string, error) { diff --git a/pkg/backup/actions/csi/pvc_action_test.go b/pkg/backup/actions/csi/pvc_action_test.go index 795e3c038..8589a740e 100644 --- a/pkg/backup/actions/csi/pvc_action_test.go +++ b/pkg/backup/actions/csi/pvc_action_test.go @@ -586,6 +586,280 @@ func TestListGroupedPVCs(t *testing.T) { } } +func TestFilterPVCsByVolumePolicy(t *testing.T) { + tests := []struct { + name string + pvcs []corev1api.PersistentVolumeClaim + pvs []corev1api.PersistentVolume + volumePolicyStr string + expectCount int + expectError bool + }{ + { + name: "All PVCs should be included when no volume policy", + pvcs: []corev1api.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-1", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-1", + StorageClassName: pointer.String("sc-1"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-2", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-2", + StorageClassName: pointer.String("sc-1"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + }, + pvs: []corev1api.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-1"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-2"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"}, + }, + }, + }, + }, + expectCount: 2, + }, + { + name: "Filter out NFS PVC by volume policy", + pvcs: []corev1api.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-csi", + StorageClassName: pointer.String("sc-1"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-nfs", + StorageClassName: pointer.String("sc-nfs"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + }, + pvs: []corev1api.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + NFS: &corev1api.NFSVolumeSource{ + Server: "nfs-server", + Path: "/export", + }, + }, + }, + }, + }, + volumePolicyStr: ` +version: v1 +volumePolicies: +- conditions: + nfs: {} + action: + type: skip +`, + expectCount: 1, + }, + { + name: "All PVCs filtered out by volume policy", + pvcs: []corev1api.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-1", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-nfs-1", + StorageClassName: pointer.String("sc-nfs"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-2", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-nfs-2", + StorageClassName: pointer.String("sc-nfs"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + }, + pvs: []corev1api.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-1"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + NFS: &corev1api.NFSVolumeSource{ + Server: "nfs-server", + Path: "/export/1", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-2"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + NFS: &corev1api.NFSVolumeSource{ + Server: "nfs-server", + Path: "/export/2", + }, + }, + }, + }, + }, + volumePolicyStr: ` +version: v1 +volumePolicies: +- conditions: + nfs: {} + action: + type: skip +`, + expectCount: 0, + }, + { + name: "Filter out non-CSI PVCs from mixed driver group", + pvcs: []corev1api.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-linstor", + Namespace: "ns-1", + Labels: map[string]string{"app.kubernetes.io/instance": "myapp"}, + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-linstor", + StorageClassName: pointer.String("sc-linstor"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-nfs", + Namespace: "ns-1", + Labels: map[string]string{"app.kubernetes.io/instance": "myapp"}, + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-nfs", + StorageClassName: pointer.String("sc-nfs"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + }, + pvs: []corev1api.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-linstor"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{Driver: "linstor.csi.linbit.com"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + NFS: &corev1api.NFSVolumeSource{ + Server: "nfs-server", + Path: "/export", + }, + }, + }, + }, + }, + volumePolicyStr: ` +version: v1 +volumePolicies: +- conditions: + nfs: {} + action: + type: skip +`, + expectCount: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{} + for i := range tt.pvs { + objs = append(objs, &tt.pvs[i]) + } + + client := velerotest.NewFakeControllerRuntimeClient(t, objs...) + + backup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{}, + } + + // Add volume policy ConfigMap if specified + if tt.volumePolicyStr != "" { + cm := &corev1api.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "volume-policy", + Namespace: "velero", + }, + Data: map[string]string{ + "volume-policy": tt.volumePolicyStr, + }, + } + require.NoError(t, client.Create(context.Background(), cm)) + + backup.Spec.ResourcePolicy = &corev1api.TypedLocalObjectReference{ + Kind: "ConfigMap", + Name: "volume-policy", + } + } + + action := &pvcBackupItemAction{ + log: velerotest.NewLogger(), + crClient: client, + } + + result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Len(t, result, tt.expectCount) + + // For mixed driver scenarios, verify filtered result can determine single CSI driver + if tt.name == "Filter out non-CSI PVCs from mixed driver group" && len(result) > 0 { + driver, err := action.determineCSIDriver(result) + require.NoError(t, err, "After filtering, determineCSIDriver should not fail with multiple drivers error") + require.Equal(t, "linstor.csi.linbit.com", driver, "Should have the Linstor driver after filtering out NFS") + } + } + }) + } +} + func TestDetermineCSIDriver(t *testing.T) { tests := []struct { name string