From 987edf50372b1aaa0ef1349f70dca644051a32a5 Mon Sep 17 00:00:00 2001 From: Shubham Pampattiwar Date: Mon, 15 Dec 2025 13:55:32 -0800 Subject: [PATCH] Add global VolumeHelper caching in CSI PVC BIA plugin Address review feedback to have a global VolumeHelper instance per plugin process instead of creating one on each ShouldPerformSnapshot call. Changes: - Add volumeHelper, cachedForBackup, and mu fields to pvcBackupItemAction struct for caching the VolumeHelper per backup - Add getOrCreateVolumeHelper() method for thread-safe lazy initialization - Update Execute() to use cached VolumeHelper via ShouldPerformSnapshotWithVolumeHelper() - Update filterPVCsByVolumePolicy() to accept VolumeHelper parameter - Add ShouldPerformSnapshotWithVolumeHelper() that accepts optional VolumeHelper for reuse across multiple calls - Add NewVolumeHelperForBackup() factory function for BIA plugins - Add comprehensive unit tests for both nil and non-nil VolumeHelper paths This completes the fix for issue #9179 by ensuring the PVC-to-Pod cache is built once per backup and reused across all PVC processing, avoiding O(N*M) complexity. Fixes #9179 Signed-off-by: Shubham Pampattiwar --- pkg/backup/actions/csi/pvc_action.go | 61 ++- pkg/backup/actions/csi/pvc_action_test.go | 238 +++++++++- .../volumehelper/volume_policy_helper.go | 121 +++++ .../volumehelper/volume_policy_helper_test.go | 440 ++++++++++++++++++ 4 files changed, 855 insertions(+), 5 deletions(-) create mode 100644 pkg/plugin/utils/volumehelper/volume_policy_helper_test.go diff --git a/pkg/backup/actions/csi/pvc_action.go b/pkg/backup/actions/csi/pvc_action.go index 62c51accc..c5a83c07e 100644 --- a/pkg/backup/actions/csi/pvc_action.go +++ b/pkg/backup/actions/csi/pvc_action.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strconv" + "sync" "time" "k8s.io/client-go/util/retry" @@ -44,6 +45,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" + internalvolumehelper "github.com/vmware-tanzu/velero/internal/volumehelper" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" veleroclient "github.com/vmware-tanzu/velero/pkg/client" @@ -72,6 +74,16 @@ const ( type pvcBackupItemAction struct { log logrus.FieldLogger crClient crclient.Client + + // volumeHelper caches the VolumeHelper instance for the current backup. + // This avoids the O(N*M) performance issue when there are many PVCs and pods. + // See issue #9179 for details. + volumeHelper internalvolumehelper.VolumeHelper + // cachedForBackup tracks which backup the volumeHelper was built for. + // If the backup UID changes, we need to rebuild the cache. + cachedForBackup types.UID + // mu protects volumeHelper and cachedForBackup for concurrent access. + mu sync.Mutex } // AppliesTo returns information indicating that the PVCBackupItemAction @@ -97,6 +109,31 @@ func (p *pvcBackupItemAction) validateBackup(backup velerov1api.Backup) (valid b return true } +// getOrCreateVolumeHelper returns a cached VolumeHelper for the given backup. +// If the backup UID has changed or no VolumeHelper exists, a new one is created. +// This avoids the O(N*M) performance issue when there are many PVCs and pods. +// See issue #9179 for details. +func (p *pvcBackupItemAction) getOrCreateVolumeHelper(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Check if we already have a VolumeHelper for this backup + if p.volumeHelper != nil && p.cachedForBackup == backup.UID { + return p.volumeHelper, nil + } + + // Build a new VolumeHelper with cache for this backup + p.log.Infof("Building VolumeHelper with PVC-to-Pod cache for backup %s/%s", backup.Namespace, backup.Name) + vh, err := volumehelper.NewVolumeHelperForBackup(*backup, p.crClient, p.log, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to create VolumeHelper for backup") + } + + p.volumeHelper = vh + p.cachedForBackup = backup.UID + return vh, nil +} + func (p *pvcBackupItemAction) validatePVCandPV( pvc corev1api.PersistentVolumeClaim, item runtime.Unstructured, @@ -248,12 +285,19 @@ func (p *pvcBackupItemAction) Execute( return item, nil, "", nil, nil } - shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup( + // Get or create the cached VolumeHelper for this backup + vh, err := p.getOrCreateVolumeHelper(backup) + if err != nil { + return nil, nil, "", nil, err + } + + shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper( item, kuberesource.PersistentVolumeClaims, *backup, p.crClient, p.log, + vh, ) if err != nil { return nil, nil, "", nil, err @@ -621,8 +665,14 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference( return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace) } + // Get the cached VolumeHelper for filtering PVCs by volume policy + vh, err := p.getOrCreateVolumeHelper(backup) + if err != nil { + return nil, errors.Wrapf(err, "failed to get VolumeHelper for filtering PVCs in group %q", group) + } + // Filter PVCs by volume policy - filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup) + filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup, vh) if err != nil { return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group) } @@ -759,11 +809,12 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la func (p *pvcBackupItemAction) filterPVCsByVolumePolicy( pvcs []corev1api.PersistentVolumeClaim, backup *velerov1api.Backup, + vh internalvolumehelper.VolumeHelper, ) ([]corev1api.PersistentVolumeClaim, error) { var filteredPVCs []corev1api.PersistentVolumeClaim for _, pvc := range pvcs { - // Convert PVC to unstructured for ShouldPerformSnapshotWithBackup + // Convert PVC to unstructured for ShouldPerformSnapshotWithVolumeHelper pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc) if err != nil { return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name) @@ -771,12 +822,14 @@ func (p *pvcBackupItemAction) filterPVCsByVolumePolicy( unstructuredPVC := &unstructured.Unstructured{Object: pvcMap} // Check if this PVC should be snapshotted according to volume policies - shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup( + // Uses the cached VolumeHelper for better performance with many PVCs/pods + shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper( unstructuredPVC, kuberesource.PersistentVolumeClaims, *backup, p.crClient, p.log, + vh, ) if err != nil { return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name) diff --git a/pkg/backup/actions/csi/pvc_action_test.go b/pkg/backup/actions/csi/pvc_action_test.go index 6280b3ebd..eeb6d16e2 100644 --- a/pkg/backup/actions/csi/pvc_action_test.go +++ b/pkg/backup/actions/csi/pvc_action_test.go @@ -842,7 +842,9 @@ volumePolicies: crClient: client, } - result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup) + // Pass nil for VolumeHelper in tests - it will fall back to creating a new one per call + // This is the expected behavior for testing and third-party plugins + result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup, nil) if tt.expectError { require.Error(t, err) } else { @@ -860,6 +862,111 @@ volumePolicies: } } +// TestFilterPVCsByVolumePolicyWithVolumeHelper tests filterPVCsByVolumePolicy when a +// pre-created VolumeHelper is passed (non-nil). This exercises the cached path used +// by the CSI PVC BIA plugin for better performance. +func TestFilterPVCsByVolumePolicyWithVolumeHelper(t *testing.T) { + // Create test PVCs and PVs + pvcs := []corev1api.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-csi", + StorageClassName: pointer.String("sc-csi"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"}, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-nfs", + StorageClassName: pointer.String("sc-nfs"), + }, + Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound}, + }, + } + + pvs := []corev1api.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"}, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + NFS: &corev1api.NFSVolumeSource{ + Server: "nfs-server", + Path: "/export", + }, + }, + }, + }, + } + + // Create fake client with PVs + objs := []runtime.Object{} + for i := range pvs { + objs = append(objs, &pvs[i]) + } + client := velerotest.NewFakeControllerRuntimeClient(t, objs...) + + // Create backup with volume policy that skips NFS volumes + volumePolicyStr := ` +version: v1 +volumePolicies: +- conditions: + nfs: {} + action: + type: skip +` + cm := &corev1api.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "volume-policy", + Namespace: "velero", + }, + Data: map[string]string{ + "volume-policy": volumePolicyStr, + }, + } + require.NoError(t, client.Create(t.Context(), cm)) + + backup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{ + ResourcePolicy: &corev1api.TypedLocalObjectReference{ + Kind: "ConfigMap", + Name: "volume-policy", + }, + }, + } + + action := &pvcBackupItemAction{ + log: velerotest.NewLogger(), + crClient: client, + } + + // Create a VolumeHelper using the same method the plugin would use + vh, err := action.getOrCreateVolumeHelper(backup) + require.NoError(t, err) + require.NotNil(t, vh) + + // Test with the pre-created VolumeHelper (non-nil path) + result, err := action.filterPVCsByVolumePolicy(pvcs, backup, vh) + require.NoError(t, err) + + // Should filter out the NFS PVC, leaving only the CSI PVC + require.Len(t, result, 1) + require.Equal(t, "pvc-csi", result[0].Name) +} + func TestDetermineCSIDriver(t *testing.T) { tests := []struct { name string @@ -1959,3 +2066,132 @@ func TestPVCRequestSize(t *testing.T) { }) } } + +// TestGetOrCreateVolumeHelper tests the VolumeHelper caching behavior +func TestGetOrCreateVolumeHelper(t *testing.T) { + tests := []struct { + name string + setup func() (*pvcBackupItemAction, *velerov1api.Backup, *velerov1api.Backup) + wantSameCache bool + }{ + { + name: "Returns same VolumeHelper for same backup UID", + setup: func() (*pvcBackupItemAction, *velerov1api.Backup, *velerov1api.Backup) { + client := velerotest.NewFakeControllerRuntimeClient(t) + action := &pvcBackupItemAction{ + log: velerotest.NewLogger(), + crClient: client, + } + backup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + UID: types.UID("test-uid-1"), + }, + } + return action, backup, backup // Same backup instance + }, + wantSameCache: true, + }, + { + name: "Returns new VolumeHelper for different backup UID", + setup: func() (*pvcBackupItemAction, *velerov1api.Backup, *velerov1api.Backup) { + client := velerotest.NewFakeControllerRuntimeClient(t) + action := &pvcBackupItemAction{ + log: velerotest.NewLogger(), + crClient: client, + } + backup1 := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup-1", + Namespace: "velero", + UID: types.UID("test-uid-1"), + }, + } + backup2 := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup-2", + Namespace: "velero", + UID: types.UID("test-uid-2"), + }, + } + return action, backup1, backup2 // Different backup instances + }, + wantSameCache: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + action, backup1, backup2 := tt.setup() + + // Get VolumeHelper for first backup + vh1, err := action.getOrCreateVolumeHelper(backup1) + require.NoError(t, err) + require.NotNil(t, vh1) + + // Get VolumeHelper for second backup + vh2, err := action.getOrCreateVolumeHelper(backup2) + require.NoError(t, err) + require.NotNil(t, vh2) + + if tt.wantSameCache { + // Same backup UID should return same VolumeHelper pointer + require.Same(t, vh1, vh2, "Expected same VolumeHelper instance for same backup UID") + } else { + // Different backup UID should return different VolumeHelper pointer + require.NotSame(t, vh1, vh2, "Expected different VolumeHelper instance for different backup UID") + } + }) + } +} + +// TestGetOrCreateVolumeHelperConcurrency tests thread-safety of VolumeHelper caching +func TestGetOrCreateVolumeHelperConcurrency(t *testing.T) { + client := velerotest.NewFakeControllerRuntimeClient(t) + action := &pvcBackupItemAction{ + log: velerotest.NewLogger(), + crClient: client, + } + backup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + UID: types.UID("test-uid"), + }, + } + + // Run multiple goroutines concurrently to get VolumeHelper + const numGoroutines = 10 + results := make(chan interface{}, numGoroutines) + errors := make(chan error, numGoroutines) + + for i := 0; i < numGoroutines; i++ { + go func() { + vh, err := action.getOrCreateVolumeHelper(backup) + if err != nil { + errors <- err + return + } + results <- vh + }() + } + + // Collect all results + var volumeHelpers []interface{} + for i := 0; i < numGoroutines; i++ { + select { + case vh := <-results: + volumeHelpers = append(volumeHelpers, vh) + case err := <-errors: + t.Fatalf("Unexpected error: %v", err) + } + } + + // All goroutines should get the same VolumeHelper instance + require.Len(t, volumeHelpers, numGoroutines) + firstVH := volumeHelpers[0] + for i := 1; i < len(volumeHelpers); i++ { + require.Same(t, firstVH, volumeHelpers[i], "All goroutines should get the same VolumeHelper instance") + } +} diff --git a/pkg/plugin/utils/volumehelper/volume_policy_helper.go b/pkg/plugin/utils/volumehelper/volume_policy_helper.go index 0a7960faa..715db15d9 100644 --- a/pkg/plugin/utils/volumehelper/volume_policy_helper.go +++ b/pkg/plugin/utils/volumehelper/volume_policy_helper.go @@ -17,7 +17,10 @@ limitations under the License. package volumehelper import ( + "context" + "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" crclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -40,6 +43,35 @@ func ShouldPerformSnapshotWithBackup( crClient crclient.Client, logger logrus.FieldLogger, ) (bool, error) { + return ShouldPerformSnapshotWithVolumeHelper( + unstructured, + groupResource, + backup, + crClient, + logger, + nil, // no cached VolumeHelper, will create one + ) +} + +// ShouldPerformSnapshotWithVolumeHelper is like ShouldPerformSnapshotWithBackup +// but accepts an optional VolumeHelper. If vh is non-nil, it will be used directly, +// avoiding the overhead of creating a new VolumeHelper on each call. +// This is useful for BIA plugins that process multiple PVCs during a single backup +// and want to reuse the same VolumeHelper (with its internal cache) across calls. +func ShouldPerformSnapshotWithVolumeHelper( + unstructured runtime.Unstructured, + groupResource schema.GroupResource, + backup velerov1api.Backup, + crClient crclient.Client, + logger logrus.FieldLogger, + vh volumehelper.VolumeHelper, +) (bool, error) { + // If a VolumeHelper is provided, use it directly + if vh != nil { + return vh.ShouldPerformSnapshot(unstructured, groupResource) + } + + // Otherwise, create a new VolumeHelper (original behavior for third-party plugins) resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup( backup, crClient, @@ -60,3 +92,92 @@ func ShouldPerformSnapshotWithBackup( return volumeHelperImpl.ShouldPerformSnapshot(unstructured, groupResource) } + +// NewVolumeHelperForBackup creates a VolumeHelper for the given backup with a PVC-to-Pod cache. +// The cache is built for the provided namespaces list to avoid O(N*M) complexity when there +// are many PVCs and pods. See issue #9179 for details. +// +// This function is intended for BIA plugins to create a VolumeHelper once and reuse it +// across multiple Execute() calls for the same backup. +// +// If namespaces is nil or empty, the function will resolve the namespace list from the backup spec. +// If backup.Spec.IncludedNamespaces is empty (meaning all namespaces), it will list all namespaces +// from the cluster. +func NewVolumeHelperForBackup( + backup velerov1api.Backup, + crClient crclient.Client, + logger logrus.FieldLogger, + namespaces []string, +) (volumehelper.VolumeHelper, error) { + // If no namespaces provided, resolve from backup spec + if len(namespaces) == 0 { + var err error + namespaces, err = resolveNamespacesForBackup(backup, crClient) + if err != nil { + logger.WithError(err).Warn("Failed to resolve namespaces for cache, proceeding without cache") + namespaces = nil + } + } + + resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup( + backup, + crClient, + logger, + ) + if err != nil { + return nil, err + } + + return volumehelper.NewVolumeHelperImplWithNamespaces( + resourcePolicies, + backup.Spec.SnapshotVolumes, + logger, + crClient, + boolptr.IsSetToTrue(backup.Spec.DefaultVolumesToFsBackup), + true, + namespaces, + ) +} + +// resolveNamespacesForBackup determines which namespaces will be backed up. +// If IncludedNamespaces is specified, it returns those (excluding any in ExcludedNamespaces). +// If IncludedNamespaces is empty (meaning all namespaces), it lists all namespaces from the cluster. +func resolveNamespacesForBackup(backup velerov1api.Backup, crClient crclient.Client) ([]string, error) { + // If specific namespaces are included, use those + if len(backup.Spec.IncludedNamespaces) > 0 { + // Filter out excluded namespaces + excludeSet := make(map[string]bool) + for _, ns := range backup.Spec.ExcludedNamespaces { + excludeSet[ns] = true + } + + var namespaces []string + for _, ns := range backup.Spec.IncludedNamespaces { + if !excludeSet[ns] { + namespaces = append(namespaces, ns) + } + } + return namespaces, nil + } + + // IncludedNamespaces is empty, meaning all namespaces - list from cluster + nsList := &corev1api.NamespaceList{} + if err := crClient.List(context.Background(), nsList); err != nil { + return nil, err + } + + // Filter out excluded namespaces + excludeSet := make(map[string]bool) + for _, ns := range backup.Spec.ExcludedNamespaces { + excludeSet[ns] = true + } + + var namespaces []string + for _, ns := range nsList.Items { + if !excludeSet[ns.Name] { + namespaces = append(namespaces, ns.Name) + } + } + + return namespaces, nil +} diff --git a/pkg/plugin/utils/volumehelper/volume_policy_helper_test.go b/pkg/plugin/utils/volumehelper/volume_policy_helper_test.go new file mode 100644 index 000000000..b4d835a76 --- /dev/null +++ b/pkg/plugin/utils/volumehelper/volume_policy_helper_test.go @@ -0,0 +1,440 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumehelper + +import ( + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/kuberesource" + velerotest "github.com/vmware-tanzu/velero/pkg/test" +) + +func TestShouldPerformSnapshotWithBackup(t *testing.T) { + tests := []struct { + name string + pvc *corev1api.PersistentVolumeClaim + pv *corev1api.PersistentVolume + backup *velerov1api.Backup + wantSnapshot bool + wantError bool + }{ + { + name: "Returns true when snapshotVolumes not set", + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "test-pv", + }, + Status: corev1api.PersistentVolumeClaimStatus{ + Phase: corev1api.ClaimBound, + }, + }, + pv: &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{ + Driver: "test-driver", + }, + }, + ClaimRef: &corev1api.ObjectReference{ + Namespace: "default", + Name: "test-pvc", + }, + }, + }, + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + }, + wantSnapshot: true, + wantError: false, + }, + { + name: "Returns false when snapshotVolumes is false", + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "test-pv", + }, + Status: corev1api.PersistentVolumeClaimStatus{ + Phase: corev1api.ClaimBound, + }, + }, + pv: &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{ + Driver: "test-driver", + }, + }, + ClaimRef: &corev1api.ObjectReference{ + Namespace: "default", + Name: "test-pvc", + }, + }, + }, + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{ + SnapshotVolumes: boolPtr(false), + }, + }, + wantSnapshot: false, + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake client with PV and PVC + client := velerotest.NewFakeControllerRuntimeClient(t, tt.pv, tt.pvc) + + // Convert PVC to unstructured + pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.pvc) + require.NoError(t, err) + unstructuredPVC := &unstructured.Unstructured{Object: pvcMap} + + logger := logrus.New() + + // Call the function under test - this is the wrapper for third-party plugins + result, err := ShouldPerformSnapshotWithBackup( + unstructuredPVC, + kuberesource.PersistentVolumeClaims, + *tt.backup, + client, + logger, + ) + + if tt.wantError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.wantSnapshot, result) + } + }) + } +} + +func boolPtr(b bool) *bool { + return &b +} + +func TestShouldPerformSnapshotWithVolumeHelper(t *testing.T) { + tests := []struct { + name string + pvc *corev1api.PersistentVolumeClaim + pv *corev1api.PersistentVolume + backup *velerov1api.Backup + wantSnapshot bool + wantError bool + }{ + { + name: "Returns true with nil VolumeHelper when snapshotVolumes not set", + pvc: &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "test-pv", + }, + Status: corev1api.PersistentVolumeClaimStatus{ + Phase: corev1api.ClaimBound, + }, + }, + pv: &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{ + Driver: "test-driver", + }, + }, + ClaimRef: &corev1api.ObjectReference{ + Namespace: "default", + Name: "test-pvc", + }, + }, + }, + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + }, + wantSnapshot: true, + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake client with PV + client := velerotest.NewFakeControllerRuntimeClient(t, tt.pv, tt.pvc) + + // Convert PVC to unstructured + pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.pvc) + require.NoError(t, err) + unstructuredPVC := &unstructured.Unstructured{Object: pvcMap} + + logger := logrus.New() + + // Call the function under test with nil VolumeHelper + // This exercises the fallback path that creates a new VolumeHelper per call + result, err := ShouldPerformSnapshotWithVolumeHelper( + unstructuredPVC, + kuberesource.PersistentVolumeClaims, + *tt.backup, + client, + logger, + nil, // Pass nil for VolumeHelper - exercises fallback path + ) + + if tt.wantError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.wantSnapshot, result) + } + }) + } +} + +// TestShouldPerformSnapshotWithNonNilVolumeHelper tests the ShouldPerformSnapshotWithVolumeHelper +// function when a pre-created VolumeHelper is passed. This exercises the cached path used +// by BIA plugins for better performance. +func TestShouldPerformSnapshotWithNonNilVolumeHelper(t *testing.T) { + pvc := &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "test-pv", + }, + Status: corev1api.PersistentVolumeClaimStatus{ + Phase: corev1api.ClaimBound, + }, + } + + pv := &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + CSI: &corev1api.CSIPersistentVolumeSource{ + Driver: "test-driver", + }, + }, + ClaimRef: &corev1api.ObjectReference{ + Namespace: "default", + Name: "test-pvc", + }, + }, + } + + backup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{ + IncludedNamespaces: []string{"default"}, + }, + } + + // Create fake client with PV and PVC + client := velerotest.NewFakeControllerRuntimeClient(t, pv, pvc) + + logger := logrus.New() + + // Create VolumeHelper using the factory function + vh, err := NewVolumeHelperForBackup(*backup, client, logger, []string{"default"}) + require.NoError(t, err) + require.NotNil(t, vh) + + // Convert PVC to unstructured + pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvc) + require.NoError(t, err) + unstructuredPVC := &unstructured.Unstructured{Object: pvcMap} + + // Call with non-nil VolumeHelper - exercises the cached path + result, err := ShouldPerformSnapshotWithVolumeHelper( + unstructuredPVC, + kuberesource.PersistentVolumeClaims, + *backup, + client, + logger, + vh, // Pass non-nil VolumeHelper - exercises cached path + ) + + require.NoError(t, err) + require.True(t, result, "Should return true for snapshot when snapshotVolumes not set") +} + +func TestNewVolumeHelperForBackup(t *testing.T) { + tests := []struct { + name string + backup *velerov1api.Backup + namespaces []string + wantError bool + }{ + { + name: "Creates VolumeHelper with explicit namespaces", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{ + IncludedNamespaces: []string{"ns1", "ns2"}, + }, + }, + namespaces: []string{"ns1", "ns2"}, + wantError: false, + }, + { + name: "Creates VolumeHelper with namespaces from backup spec", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{ + IncludedNamespaces: []string{"ns1", "ns2"}, + }, + }, + namespaces: nil, // Will be resolved from backup spec + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := velerotest.NewFakeControllerRuntimeClient(t) + logger := logrus.New() + + vh, err := NewVolumeHelperForBackup(*tt.backup, client, logger, tt.namespaces) + + if tt.wantError { + require.Error(t, err) + require.Nil(t, vh) + } else { + require.NoError(t, err) + require.NotNil(t, vh) + } + }) + } +} + +func TestResolveNamespacesForBackup(t *testing.T) { + tests := []struct { + name string + backup *velerov1api.Backup + existingNS []string + expectedResult []string + }{ + { + name: "Returns included namespaces", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{ + IncludedNamespaces: []string{"ns1", "ns2", "ns3"}, + }, + }, + existingNS: []string{"ns1", "ns2", "ns3", "ns4"}, + expectedResult: []string{"ns1", "ns2", "ns3"}, + }, + { + name: "Excludes specified namespaces from included list", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{ + IncludedNamespaces: []string{"ns1", "ns2", "ns3"}, + ExcludedNamespaces: []string{"ns2"}, + }, + }, + existingNS: []string{"ns1", "ns2", "ns3", "ns4"}, + expectedResult: []string{"ns1", "ns3"}, + }, + { + name: "Returns all namespaces when IncludedNamespaces is empty", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "velero", + }, + Spec: velerov1api.BackupSpec{}, + }, + existingNS: []string{"default", "kube-system", "app-ns"}, + expectedResult: []string{"default", "kube-system", "app-ns"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake client with namespaces + var objects []runtime.Object + for _, ns := range tt.existingNS { + objects = append(objects, &corev1api.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + }) + } + client := velerotest.NewFakeControllerRuntimeClient(t, objects...) + + result, err := resolveNamespacesForBackup(*tt.backup, client) + require.NoError(t, err) + require.ElementsMatch(t, tt.expectedResult, result) + }) + } +}