Extend Volume Policies feature to support more actions

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

fix volume policy action execution

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

remove unused files

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

add changelog file

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

fix CI linter errors

fix linter errors

address pr review comments

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

fix via make update cmd

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

address PR feedback and add tests

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

fix codespell

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

fix ci linter checks

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

remove volsToExclude processing from volume policy logic and add tests

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

fix ci linter issue

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
This commit is contained in:
Shubham Pampattiwar
2024-04-09 13:46:10 -07:00
parent 22b94654a4
commit 8d2bef2486
14 changed files with 1348 additions and 63 deletions

View File

@@ -0,0 +1 @@
Implementation for Extending VolumePolicies to support more actions

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.22
go 1.22.0
require (
cloud.google.com/go/storage v1.40.0

View File

@@ -27,8 +27,13 @@ type VolumeActionType string
const (
// currently only support configmap type of resource config
ConfigmapRefType string = "configmap"
Skip VolumeActionType = "skip"
ConfigmapRefType string = "configmap"
// skip action implies the volume would be skipped from the backup operation
Skip VolumeActionType = "skip"
// fs-backup action implies that the volume would be backed up via file system copy method using the uploader(kopia/restic) configured by the user
FSBackup VolumeActionType = "fs-backup"
// snapshot action can have 3 different meaning based on velero configuration and backup spec - cloud provider based snapshots, local csi snapshots and datamover snapshots
Snapshot VolumeActionType = "snapshot"
)
// Action defined as one action for a specific way of backup
@@ -40,16 +45,16 @@ type Action struct {
}
// volumePolicy defined policy to conditions to match Volumes and related action to handle matched Volumes
type volumePolicy struct {
type VolumePolicy struct {
// Conditions defined list of conditions to match Volumes
Conditions map[string]interface{} `yaml:"conditions"`
Action Action `yaml:"action"`
}
// resourcePolicies currently defined slice of volume policies to handle backup
type resourcePolicies struct {
type ResourcePolicies struct {
Version string `yaml:"version"`
VolumePolicies []volumePolicy `yaml:"volumePolicies"`
VolumePolicies []VolumePolicy `yaml:"volumePolicies"`
// we may support other resource policies in the future, and they could be added separately
// OtherResourcePolicies []OtherResourcePolicy
}
@@ -60,8 +65,8 @@ type Policies struct {
// OtherPolicies
}
func unmarshalResourcePolicies(yamlData *string) (*resourcePolicies, error) {
resPolicies := &resourcePolicies{}
func unmarshalResourcePolicies(yamlData *string) (*ResourcePolicies, error) {
resPolicies := &ResourcePolicies{}
err := decodeStruct(strings.NewReader(*yamlData), resPolicies)
if err != nil {
return nil, fmt.Errorf("failed to decode yaml data into resource policies %v", err)
@@ -69,7 +74,7 @@ func unmarshalResourcePolicies(yamlData *string) (*resourcePolicies, error) {
return resPolicies, nil
}
func (p *Policies) buildPolicy(resPolicies *resourcePolicies) error {
func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error {
for _, vp := range resPolicies.VolumePolicies {
con, err := unmarshalVolConditions(vp.Conditions)
if err != nil {
@@ -162,7 +167,7 @@ func GetResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
}
policies := &Policies{}
if err := policies.buildPolicy(resPolicies); err != nil {
if err := policies.BuildPolicy(resPolicies); err != nil {
return nil, errors.WithStack(err)
}

View File

@@ -121,9 +121,9 @@ func TestLoadResourcePolicies(t *testing.T) {
}
func TestGetResourceMatchedAction(t *testing.T) {
resPolicies := &resourcePolicies{
resPolicies := &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -136,7 +136,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
},
},
{
Action: Action{Type: "volume-snapshot"},
Action: Action{Type: "snapshot"},
Conditions: map[string]interface{}{
"capacity": "10,100Gi",
"storageClass": []string{"gp2", "ebs-sc"},
@@ -147,7 +147,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
},
},
{
Action: Action{Type: "file-system-backup"},
Action: Action{Type: "fs-backup"},
Conditions: map[string]interface{}{
"storageClass": []string{"gp2", "ebs-sc"},
"csi": interface{}(
@@ -179,7 +179,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
storageClass: "ebs-sc",
csi: &csiVolumeSource{Driver: "aws.efs.csi.driver"},
},
expectedAction: &Action{Type: "volume-snapshot"},
expectedAction: &Action{Type: "snapshot"},
},
{
name: "dismatch all policies",
@@ -195,7 +195,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
policies := &Policies{}
err := policies.buildPolicy(resPolicies)
err := policies.BuildPolicy(resPolicies)
if err != nil {
t.Errorf("Failed to build policy with error %v", err)
}
@@ -237,9 +237,9 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) {
// Check that the returned resourcePolicies object contains the expected data
assert.Equal(t, "v1", resPolicies.version)
assert.Len(t, resPolicies.volumePolicies, 1)
policies := resourcePolicies{
policies := ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Conditions: map[string]interface{}{
"capacity": "0,10Gi",
@@ -251,7 +251,7 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) {
},
}
p := &Policies{}
err = p.buildPolicy(&policies)
err = p.BuildPolicy(&policies)
if err != nil {
t.Fatalf("failed to build policy with error %v", err)
}
@@ -424,7 +424,7 @@ volumePolicies:
}
assert.Nil(t, err)
policies := &Policies{}
err = policies.buildPolicy(resPolicies)
err = policies.BuildPolicy(resPolicies)
assert.Nil(t, err)
action, err := policies.GetMatchAction(tc.vol)
assert.Nil(t, err)

View File

@@ -82,7 +82,11 @@ func decodeStruct(r io.Reader, s interface{}) error {
// validate check action format
func (a *Action) validate() error {
// validate Type
if a.Type != Skip {
valid := false
if a.Type == Skip || a.Type == Snapshot || a.Type == FSBackup {
valid = true
}
if !valid {
return fmt.Errorf("invalid action type %s", a.Type)
}

View File

@@ -84,14 +84,14 @@ func TestCapacityConditionValidate(t *testing.T) {
func TestValidate(t *testing.T) {
testCases := []struct {
name string
res *resourcePolicies
res *ResourcePolicies
wantErr bool
}{
{
name: "unknown key in yaml",
res: &resourcePolicies{
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -110,9 +110,9 @@ func TestValidate(t *testing.T) {
},
{
name: "error format of capacity",
res: &resourcePolicies{
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -130,9 +130,9 @@ func TestValidate(t *testing.T) {
},
{
name: "error format of storageClass",
res: &resourcePolicies{
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -150,9 +150,9 @@ func TestValidate(t *testing.T) {
},
{
name: "error format of csi",
res: &resourcePolicies{
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -167,9 +167,9 @@ func TestValidate(t *testing.T) {
},
{
name: "unsupported version",
res: &resourcePolicies{
res: &ResourcePolicies{
Version: "v2",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -186,9 +186,9 @@ func TestValidate(t *testing.T) {
},
{
name: "unsupported action",
res: &resourcePolicies{
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "unsupported"},
Conditions: map[string]interface{}{
@@ -205,9 +205,9 @@ func TestValidate(t *testing.T) {
},
{
name: "error format of nfs",
res: &resourcePolicies{
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -221,10 +221,10 @@ func TestValidate(t *testing.T) {
wantErr: true,
},
{
name: "supported formart volume policies",
res: &resourcePolicies{
name: "supported format volume policies",
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []volumePolicy{
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "skip"},
Conditions: map[string]interface{}{
@@ -245,11 +245,86 @@ func TestValidate(t *testing.T) {
},
wantErr: false,
},
{
name: "supported format volume policies, action type snapshot",
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "snapshot"},
Conditions: map[string]interface{}{
"capacity": "0,10Gi",
"storageClass": []string{"gp2", "ebs-sc"},
"csi": interface{}(
map[string]interface{}{
"driver": "aws.efs.csi.driver",
}),
"nfs": interface{}(
map[string]interface{}{
"server": "192.168.20.90",
"path": "/mnt/data/",
}),
},
},
},
},
wantErr: false,
},
{
name: "supported format volume policies, action type fs-backup",
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: "fs-backup"},
Conditions: map[string]interface{}{
"capacity": "0,10Gi",
"storageClass": []string{"gp2", "ebs-sc"},
"csi": interface{}(
map[string]interface{}{
"driver": "aws.efs.csi.driver",
}),
"nfs": interface{}(
map[string]interface{}{
"server": "192.168.20.90",
"path": "/mnt/data/",
}),
},
},
},
},
wantErr: false,
},
{
name: "supported format volume policies, action type fs-backup and snapshot",
res: &ResourcePolicies{
Version: "v1",
VolumePolicies: []VolumePolicy{
{
Action: Action{Type: Snapshot},
Conditions: map[string]interface{}{
"storageClass": []string{"gp2"},
},
},
{
Action: Action{Type: FSBackup},
Conditions: map[string]interface{}{
"nfs": interface{}(
map[string]interface{}{
"server": "192.168.20.90",
"path": "/mnt/data/",
}),
},
},
},
},
wantErr: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
policies := &Policies{}
err1 := policies.buildPolicy(tc.res)
err1 := policies.BuildPolicy(tc.res)
err2 := policies.Validate()
if tc.wantErr {

View File

@@ -0,0 +1,197 @@
package volumehelper
import (
"fmt"
"strings"
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
pdvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
)
type VolumeHelper interface {
GetVolumesForFSBackup(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExcludePVC bool, kbclient kbclient.Client) ([]string, []string, error)
ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource, kbclient kbclient.Client) (bool, error)
}
type VolumeHelperImpl struct {
VolumePolicy *resourcepolicies.Policies
SnapshotVolumes *bool
Logger logrus.FieldLogger
}
func (v *VolumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource, kbclient kbclient.Client) (bool, error) {
// check if volume policy exists and also check if the object(pv/pvc) fits a volume policy criteria and see if the associated action is snapshot
// if it is not snapshot then skip the code path for snapshotting the PV/PVC
pvc := new(corev1api.PersistentVolumeClaim)
pv := new(corev1api.PersistentVolume)
var err error
if groupResource == kuberesource.PersistentVolumeClaims {
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pvc); err != nil {
return false, err
}
pv, err = kubeutil.GetPVForPVC(pvc, kbclient)
if err != nil {
return false, err
}
}
if groupResource == kuberesource.PersistentVolumes {
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pv); err != nil {
return false, err
}
}
if v.VolumePolicy != nil {
action, err := v.VolumePolicy.GetMatchAction(pv)
if err != nil {
return false, err
}
// Also account for SnapshotVolumes flag on backup CR
if action != nil && action.Type == resourcepolicies.Snapshot && boolptr.IsSetToTrue(v.SnapshotVolumes) {
v.Logger.Infof(fmt.Sprintf("performing snapshot action for pv %s as it satisfies the volume policy criteria and snapshotVolumes is set to true", pv.Name))
return true, nil
}
v.Logger.Infof(fmt.Sprintf("skipping snapshot action for pv %s possibly due to not satisfying the volume policy criteria or snapshotVolumes is not true", pv.Name))
return false, nil
}
return false, nil
}
func (v *VolumeHelperImpl) GetVolumesForFSBackup(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExcludePVC bool, kbclient kbclient.Client) ([]string, []string, error) {
// Check if there is a fs-backup/snapshot volume policy specified by the user, if yes then use the volume policy approach to
// get the list volumes for fs-backup else go via the legacy annotation based approach
var fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, volsToProcessByLegacyApproach = make([]string, 0), make([]string, 0), make([]string, 0)
var err error
if v.VolumePolicy != nil {
// Get the list of volumes to back up using pod volume backup for the given pod matching fs-backup volume policy action
v.Logger.Infof("Volume Policy specified by the user, using volume policy approach to segregate pod volumes for fs-backup")
// GetVolumesMatchingFSBackupAction return 3 list of Volumes:
// fsBackupVolumePolicyVols: Volumes that have a matching fs-backup action from the volume policy specified by the user
// nonFsBackupVolumePolicyVols: Volumes that have an action matching from the volume policy specified by the user, but it is not fs-backup action
// volsToProcessByLegacyApproach: Volumes that did not have any matching action i.e. action was nil from the volume policy specified by the user, these volumes will be processed via the legacy annotations based approach (fallback option)
fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, volsToProcessByLegacyApproach, err = v.GetVolumesMatchingFSBackupAction(pod, v.VolumePolicy, backupExcludePVC, kbclient)
if err != nil {
return fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, err
}
// if volsToProcessByLegacyApproach is empty then no need to sue legacy approach as fallback option return from here
if len(volsToProcessByLegacyApproach) == 0 {
return fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, nil
}
}
// process legacy annotation based approach, this will done when:
// 1. volume policy os specified by the user
// 2. And there are some volumes for which the volume policy approach did not get any supported matching actions
if v.VolumePolicy != nil && len(volsToProcessByLegacyApproach) > 0 {
v.Logger.Infof("volume policy specified by the user but there are volumes with no matching action, using legacy approach based on annotations as a fallback for those volumes")
includedVolumesFromLegacyFallBack, optedOutVolumesFromLegacyFallBack := pdvolumeutil.GetVolumesByPod(pod, defaultVolumesToFsBackup, backupExcludePVC, volsToProcessByLegacyApproach)
// merge the volumePolicy approach and legacy Fallback lists
fsBackupVolumePolicyVols = append(fsBackupVolumePolicyVols, includedVolumesFromLegacyFallBack...)
nonFsBackupVolumePolicyVols = append(nonFsBackupVolumePolicyVols, optedOutVolumesFromLegacyFallBack...)
return fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, nil
}
// Normal legacy workflow
// Get the list of volumes to back up using pod volume backup from the pod's annotations.
// We will also pass the list of volume that did not have any supported volume policy action matched in legacy approach so that
// those volumes get processed via legacy annotation based approach, this is a fallback option on annotation based legacy approach
v.Logger.Infof("fs-backup or snapshot Volume Policy not specified by the user, using legacy approach based on annotations")
includedVolumes, optedOutVolumes := pdvolumeutil.GetVolumesByPod(pod, defaultVolumesToFsBackup, backupExcludePVC, volsToProcessByLegacyApproach)
return includedVolumes, optedOutVolumes, nil
}
// GetVolumesMatchingFSBackupAction returns a list of volume names to backup for the provided pod having fs-backup volume policy action
func (v *VolumeHelperImpl) GetVolumesMatchingFSBackupAction(pod *corev1api.Pod, volumePolicies *resourcepolicies.Policies, backupExcludePVC bool, kbclient kbclient.Client) ([]string, []string, []string, error) {
FSBackupActionMatchingVols := []string{}
FSBackupNonActionMatchingVols := []string{}
NoActionMatchingVols := []string{}
for i, vol := range pod.Spec.Volumes {
if !v.ShouldIncludeVolumeInBackup(vol, backupExcludePVC) {
continue
}
if vol.PersistentVolumeClaim != nil {
// fetch the associated PVC first
pvc, err := kubeutil.GetPVCForPodVolume(&pod.Spec.Volumes[i], pod, kbclient)
if err != nil {
return FSBackupActionMatchingVols, FSBackupNonActionMatchingVols, NoActionMatchingVols, err
}
// now fetch the PV and call GetMatchAction on it
pv, err := kubeutil.GetPVForPVC(pvc, kbclient)
if err != nil {
return FSBackupActionMatchingVols, FSBackupNonActionMatchingVols, NoActionMatchingVols, err
}
// now get the action for pv
action, err := volumePolicies.GetMatchAction(pv)
if err != nil {
return FSBackupActionMatchingVols, FSBackupNonActionMatchingVols, NoActionMatchingVols, err
}
// Record volume list having no matched action so that they are processed in legacy fallback option
if action == nil {
NoActionMatchingVols = append(NoActionMatchingVols, vol.Name)
}
// Now if the matched action is not nil and is `fs-backup` then add that Volume to the FSBackupActionMatchingVols
// else add that volume to FSBackupNonActionMatchingVols
// we already tracked the volume not matching any kind actions supported by volume policy in NoActionMatchingVols
// The NoActionMatchingVols list will be processed via legacy annotation based approach as a fallback option
if action != nil && action.Type == resourcepolicies.FSBackup {
FSBackupActionMatchingVols = append(FSBackupActionMatchingVols, vol.Name)
} else if action != nil {
FSBackupNonActionMatchingVols = append(FSBackupNonActionMatchingVols, vol.Name)
}
}
}
return FSBackupActionMatchingVols, FSBackupNonActionMatchingVols, NoActionMatchingVols, nil
}
func (v *VolumeHelperImpl) ShouldIncludeVolumeInBackup(vol corev1api.Volume, backupExcludePVC bool) bool {
includeVolumeInBackup := true
// cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods
// and therefore not accessible to the node agent daemon set.
if vol.HostPath != nil {
includeVolumeInBackup = false
}
// don't backup volumes mounting secrets. Secrets will be backed up separately.
if vol.Secret != nil {
includeVolumeInBackup = false
}
// don't backup volumes mounting ConfigMaps. ConfigMaps will be backed up separately.
if vol.ConfigMap != nil {
includeVolumeInBackup = false
}
// don't backup volumes mounted as projected volumes, all data in those come from kube state.
if vol.Projected != nil {
includeVolumeInBackup = false
}
// don't backup DownwardAPI volumes, all data in those come from kube state.
if vol.DownwardAPI != nil {
includeVolumeInBackup = false
}
if vol.PersistentVolumeClaim != nil && backupExcludePVC {
includeVolumeInBackup = false
}
// don't include volumes that mount the default service account token.
if strings.HasPrefix(vol.Name, "default-token") {
includeVolumeInBackup = false
}
return includeVolumeInBackup
}

View File

@@ -0,0 +1,701 @@
package volumehelper
import (
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/pointer"
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
)
func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
PVObjectGP2 := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "PersistentVolume",
"metadata": map[string]interface{}{
"name": "example-pv",
},
"spec": map[string]interface{}{
"capacity": map[string]interface{}{
"storage": "1Gi",
},
"volumeMode": "Filesystem",
"accessModes": []interface{}{"ReadWriteOnce"},
"persistentVolumeReclaimPolicy": "Retain",
"storageClassName": "gp2-csi",
"hostPath": map[string]interface{}{
"path": "/mnt/data",
},
},
},
}
PVObjectGP3 := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "PersistentVolume",
"metadata": map[string]interface{}{
"name": "example-pv",
},
"spec": map[string]interface{}{
"capacity": map[string]interface{}{
"storage": "1Gi",
},
"volumeMode": "Filesystem",
"accessModes": []interface{}{"ReadWriteOnce"},
"persistentVolumeReclaimPolicy": "Retain",
"storageClassName": "gp3-csi",
"hostPath": map[string]interface{}{
"path": "/mnt/data",
},
},
},
}
testCases := []struct {
name string
obj runtime.Unstructured
groupResource schema.GroupResource
resourcePolicies resourcepolicies.ResourcePolicies
snapshotVolumesFlag *bool
shouldSnapshot bool
expectedErr bool
}{
{
name: "Given PV object matches volume policy snapshot action snapshotVolumes flags is true returns true and no error",
obj: PVObjectGP2,
groupResource: kuberesource.PersistentVolumes,
resourcePolicies: resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp2-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.Snapshot,
},
},
},
},
snapshotVolumesFlag: pointer.Bool(true),
shouldSnapshot: true,
expectedErr: false,
},
{
name: "Given PV object matches volume policy snapshot action snapshotVolumes flags is false returns false and no error",
obj: PVObjectGP2,
groupResource: kuberesource.PersistentVolumes,
resourcePolicies: resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp2-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.Snapshot,
},
},
},
},
snapshotVolumesFlag: pointer.Bool(false),
shouldSnapshot: false,
expectedErr: false,
},
{
name: "Given PV object matches volume policy snapshot action snapshotVolumes flags is true returns false and no error",
obj: PVObjectGP3,
groupResource: kuberesource.PersistentVolumes,
resourcePolicies: resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp2-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.Snapshot,
},
},
},
},
snapshotVolumesFlag: pointer.Bool(true),
shouldSnapshot: false,
expectedErr: false,
},
{
name: "Given PVC object matches volume policy snapshot action snapshotVolumes flags is true return false and error case PVC not found",
obj: &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": map[string]interface{}{
"name": "example-pvc",
"namespace": "default",
},
"spec": map[string]interface{}{
"accessModes": []string{"ReadWriteOnce"},
"resources": map[string]interface{}{
"requests": map[string]interface{}{
"storage": "1Gi",
},
},
"storageClassName": "gp2-csi",
},
},
},
groupResource: kuberesource.PersistentVolumeClaims,
resourcePolicies: resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp2-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.Snapshot,
},
},
},
},
snapshotVolumesFlag: pointer.Bool(true),
shouldSnapshot: false,
expectedErr: true,
},
}
mockedPV := &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "example-pv",
},
Spec: corev1.PersistentVolumeSpec{
Capacity: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimRetain,
StorageClassName: "gp2-csi",
ClaimRef: &corev1.ObjectReference{
Name: "example-pvc",
Namespace: "default",
},
},
}
objs := []runtime.Object{mockedPV}
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
policies := tc.resourcePolicies
p := &resourcepolicies.Policies{}
err := p.BuildPolicy(&policies)
if err != nil {
t.Fatalf("failed to build policy with error %v", err)
}
vh := &VolumeHelperImpl{
VolumePolicy: p,
SnapshotVolumes: tc.snapshotVolumesFlag,
Logger: velerotest.NewLogger(),
}
ActualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(tc.obj, tc.groupResource, fakeClient)
if tc.expectedErr {
assert.NotNil(t, actualError, "Want error; Got nil error")
return
}
assert.Equalf(t, ActualShouldSnapshot, tc.shouldSnapshot, "Want shouldSnapshot as %v; Got shouldSnapshot as %v", tc.shouldSnapshot, ActualShouldSnapshot)
})
}
}
func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
testCases := []struct {
name string
vol corev1.Volume
backupExcludePVC bool
shouldInclude bool
}{
{
name: "volume has host path so do not include",
vol: corev1.Volume{
Name: "sample-volume",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "some-path",
},
},
},
backupExcludePVC: false,
shouldInclude: false,
},
{
name: "volume has secret mounted so do not include",
vol: corev1.Volume{
Name: "sample-volume",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "sample-secret",
Items: []corev1.KeyToPath{
{
Key: "username",
Path: "my-username",
},
},
},
},
},
backupExcludePVC: false,
shouldInclude: false,
},
{
name: "volume has configmap so do not include",
vol: corev1.Volume{
Name: "sample-volume",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "sample-cm",
},
},
},
},
backupExcludePVC: false,
shouldInclude: false,
},
{
name: "volume is mounted as project volume so do not include",
vol: corev1.Volume{
Name: "sample-volume",
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: []corev1.VolumeProjection{},
},
},
},
backupExcludePVC: false,
shouldInclude: false,
},
{
name: "volume has downwardAPI so do not include",
vol: corev1.Volume{
Name: "sample-volume",
VolumeSource: corev1.VolumeSource{
DownwardAPI: &corev1.DownwardAPIVolumeSource{
Items: []corev1.DownwardAPIVolumeFile{
{
Path: "labels",
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.labels",
},
},
},
},
},
},
backupExcludePVC: false,
shouldInclude: false,
},
{
name: "volume has pvc and backupExcludePVC is true so do not include",
vol: corev1.Volume{
Name: "sample-volume",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc",
},
},
},
backupExcludePVC: true,
shouldInclude: false,
},
{
name: "volume name has prefix default-token so do not include",
vol: corev1.Volume{
Name: "default-token-vol-name",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc",
},
},
},
backupExcludePVC: false,
shouldInclude: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
resourcePolicies := resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp2-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.Snapshot,
},
},
},
}
policies := resourcePolicies
p := &resourcepolicies.Policies{}
err := p.BuildPolicy(&policies)
if err != nil {
t.Fatalf("failed to build policy with error %v", err)
}
vh := &VolumeHelperImpl{
VolumePolicy: p,
SnapshotVolumes: pointer.Bool(true),
Logger: velerotest.NewLogger(),
}
actualShouldInclude := vh.ShouldIncludeVolumeInBackup(tc.vol, tc.backupExcludePVC)
assert.Equalf(t, actualShouldInclude, tc.shouldInclude, "Want shouldInclude as %v; Got actualShouldInclude as %v", tc.shouldInclude, actualShouldInclude)
})
}
}
var (
gp2csi = "gp2-csi"
gp3csi = "gp3-csi"
)
var (
samplePod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-pod",
Namespace: "sample-ns",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "sample-container",
Image: "sample-image",
VolumeMounts: []corev1.VolumeMount{
{
Name: "sample-vm",
MountPath: "/etc/pod-info",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "sample-volume-1",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-1",
},
},
},
{
Name: "sample-volume-2",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-2",
},
},
},
},
},
}
samplePVC1 = &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-pvc-1",
Namespace: "sample-ns",
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{},
},
StorageClassName: &gp2csi,
VolumeName: "sample-pv-1",
},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimBound,
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Capacity: corev1.ResourceList{},
},
}
samplePVC2 = &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-pvc-2",
Namespace: "sample-ns",
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{},
},
StorageClassName: &gp3csi,
VolumeName: "sample-pv-2",
},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimBound,
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Capacity: corev1.ResourceList{},
},
}
samplePV1 = &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-pv-1",
},
Spec: corev1.PersistentVolumeSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Capacity: corev1.ResourceList{},
ClaimRef: &corev1.ObjectReference{
Kind: "PersistentVolumeClaim",
Name: "sample-pvc-1",
Namespace: "sample-ns",
ResourceVersion: "1027",
UID: "7d28e566-ade7-4ed6-9e15-2e44d2fbcc08",
},
PersistentVolumeSource: corev1.PersistentVolumeSource{
CSI: &corev1.CSIPersistentVolumeSource{
Driver: "ebs.csi.aws.com",
FSType: "ext4",
VolumeAttributes: map[string]string{
"storage.kubernetes.io/csiProvisionerIdentity": "1582049697841-8081-hostpath.csi.k8s.io",
},
VolumeHandle: "e61f2b48-527a-11ea-b54f-cab6317018f1",
},
},
PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete,
StorageClassName: gp2csi,
},
Status: corev1.PersistentVolumeStatus{
Phase: corev1.VolumeBound,
},
}
samplePV2 = &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-pv-2",
},
Spec: corev1.PersistentVolumeSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Capacity: corev1.ResourceList{},
ClaimRef: &corev1.ObjectReference{
Kind: "PersistentVolumeClaim",
Name: "sample-pvc-2",
Namespace: "sample-ns",
ResourceVersion: "1027",
UID: "7d28e566-ade7-4ed6-9e15-2e44d2fbcc08",
},
PersistentVolumeSource: corev1.PersistentVolumeSource{
CSI: &corev1.CSIPersistentVolumeSource{
Driver: "ebs.csi.aws.com",
FSType: "ext4",
VolumeAttributes: map[string]string{
"storage.kubernetes.io/csiProvisionerIdentity": "1582049697841-8081-hostpath.csi.k8s.io",
},
VolumeHandle: "e61f2b48-527a-11ea-b54f-cab6317018f1",
},
},
PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete,
StorageClassName: gp3csi,
},
Status: corev1.PersistentVolumeStatus{
Phase: corev1.VolumeBound,
},
}
resourcePolicies1 = resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp2-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.FSBackup,
},
},
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp3-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.Snapshot,
},
},
},
}
resourcePolicies2 = resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp2-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.FSBackup,
},
},
},
}
resourcePolicies3 = resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"storageClass": []string{"gp4-csi"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.FSBackup,
},
},
},
}
)
func TestVolumeHelperImpl_GetVolumesMatchingFSBackupAction(t *testing.T) {
testCases := []struct {
name string
backupExcludePVC bool
resourcepoliciesApplied resourcepolicies.ResourcePolicies
FSBackupActionMatchingVols []string
FSBackupNonActionMatchingVols []string
NoActionMatchingVols []string
expectedErr bool
}{
{
name: "For a given pod with 2 volumes and volume policy we get one fs-backup action matching volume, one fs-back action non-matching volume but has some matching action and zero no action matching volumes",
backupExcludePVC: false,
resourcepoliciesApplied: resourcePolicies1,
FSBackupActionMatchingVols: []string{"sample-volume-1"},
FSBackupNonActionMatchingVols: []string{"sample-volume-2"},
NoActionMatchingVols: []string{},
expectedErr: false,
},
{
name: "For a given pod with 2 volumes and volume policy we get one fs-backup action matching volume, zero fs-backup action non-matching volume and one no action matching volumes",
backupExcludePVC: false,
resourcepoliciesApplied: resourcePolicies2,
FSBackupActionMatchingVols: []string{"sample-volume-1"},
FSBackupNonActionMatchingVols: []string{},
NoActionMatchingVols: []string{"sample-volume-2"},
expectedErr: false,
},
{
name: "For a given pod with 2 volumes and volume policy we get one fs-backup action matching volume, one fs-backup action non-matching volume and one no action matching volumes but backupExcludePVC is true so all returned list should be empty",
backupExcludePVC: true,
resourcepoliciesApplied: resourcePolicies2,
FSBackupActionMatchingVols: []string{},
FSBackupNonActionMatchingVols: []string{},
NoActionMatchingVols: []string{},
expectedErr: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
policies := tc.resourcepoliciesApplied
p := &resourcepolicies.Policies{}
err := p.BuildPolicy(&policies)
if err != nil {
t.Fatalf("failed to build policy with error %v", err)
}
vh := &VolumeHelperImpl{
VolumePolicy: p,
SnapshotVolumes: pointer.Bool(true),
Logger: velerotest.NewLogger(),
}
objs := []runtime.Object{samplePod, samplePVC1, samplePVC2, samplePV1, samplePV2}
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
gotFSBackupActionMatchingVols, gotFSBackupNonActionMatchingVols, gotNoActionMatchingVols, actualError := vh.GetVolumesMatchingFSBackupAction(samplePod, vh.VolumePolicy, tc.backupExcludePVC, fakeClient)
if tc.expectedErr {
assert.NotNil(t, actualError, "Want error; Got nil error")
return
}
assert.Nilf(t, actualError, "Want: nil error; Got: %v", actualError)
assert.Equalf(t, gotFSBackupActionMatchingVols, tc.FSBackupActionMatchingVols, "Want FSBackupActionMatchingVols as %v; Got gotFSBackupActionMatchingVols as %v", tc.FSBackupActionMatchingVols, gotFSBackupActionMatchingVols)
assert.Equalf(t, gotFSBackupNonActionMatchingVols, tc.FSBackupNonActionMatchingVols, "Want FSBackupNonActionMatchingVols as %v; Got gotFSBackupNonActionMatchingVols as %v", tc.FSBackupNonActionMatchingVols, gotFSBackupNonActionMatchingVols)
assert.Equalf(t, gotNoActionMatchingVols, tc.NoActionMatchingVols, "Want NoActionMatchingVols as %v; Got gotNoActionMatchingVols as %v", tc.NoActionMatchingVols, gotNoActionMatchingVols)
})
}
}
func TestVolumeHelperImpl_GetVolumesForFSBackup(t *testing.T) {
testCases := []struct {
name string
backupExcludePVC bool
defaultVolumesToFsBackup bool
resourcepoliciesApplied resourcepolicies.ResourcePolicies
includedVolumes []string
optedOutVolumes []string
expectedErr bool
}{
{
name: "For a given pod with 2 volumes and volume policy we get one fs-backup action matching volume, one fs-back action non-matching volume but matches snapshot action so no volumes for legacy fallback process, defaultvolumestofsbackup is false but no effect",
backupExcludePVC: false,
defaultVolumesToFsBackup: false,
resourcepoliciesApplied: resourcePolicies1,
includedVolumes: []string{"sample-volume-1"},
optedOutVolumes: []string{"sample-volume-2"},
},
{
name: "For a given pod with 2 volumes and volume policy we get one fs-backup action matching volume, one fs-back action non-matching volume but matches snapshot action so no volumes for legacy fallback process, defaultvolumestofsbackup is true but no effect",
backupExcludePVC: false,
defaultVolumesToFsBackup: true,
resourcepoliciesApplied: resourcePolicies1,
includedVolumes: []string{"sample-volume-1"},
optedOutVolumes: []string{"sample-volume-2"},
},
{
name: "For a given pod with 2 volumes and volume policy we get no volume matching fs-backup action defaultvolumesToFSBackup is false, no annotations, using legacy as fallback for non-action matching volumes",
backupExcludePVC: false,
defaultVolumesToFsBackup: false,
resourcepoliciesApplied: resourcePolicies3,
includedVolumes: []string{},
optedOutVolumes: []string{},
},
{
name: "For a given pod with 2 volumes and volume policy we get no volume matching fs-backup action defaultvolumesToFSBackup is true, no annotations, using legacy as fallback for non-action matching volumes",
backupExcludePVC: false,
defaultVolumesToFsBackup: true,
resourcepoliciesApplied: resourcePolicies3,
includedVolumes: []string{"sample-volume-1", "sample-volume-2"},
optedOutVolumes: []string{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
policies := tc.resourcepoliciesApplied
p := &resourcepolicies.Policies{}
err := p.BuildPolicy(&policies)
if err != nil {
t.Fatalf("failed to build policy with error %v", err)
}
vh := &VolumeHelperImpl{
VolumePolicy: p,
SnapshotVolumes: pointer.Bool(true),
Logger: velerotest.NewLogger(),
}
objs := []runtime.Object{samplePod, samplePVC1, samplePVC2, samplePV1, samplePV2}
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
gotIncludedVolumes, gotOptedOutVolumes, actualError := vh.GetVolumesForFSBackup(samplePod, tc.defaultVolumesToFsBackup, tc.backupExcludePVC, fakeClient)
if tc.expectedErr {
assert.NotNil(t, actualError, "Want error; Got nil error")
return
}
assert.Nilf(t, actualError, "Want: nil error; Got: %v", actualError)
assert.Equalf(t, tc.includedVolumes, gotIncludedVolumes, "Want includedVolumes as %v; Got gotIncludedVolumes as %v", tc.includedVolumes, gotIncludedVolumes)
assert.Equalf(t, tc.optedOutVolumes, gotOptedOutVolumes, "Want optedOutVolumes as %v; Got gotOptedOutVolumes as %v", tc.optedOutVolumes, gotOptedOutVolumes)
})
}
}

View File

@@ -24,6 +24,8 @@ import (
"strings"
"time"
"github.com/vmware-tanzu/velero/internal/volumehelper"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
@@ -52,7 +54,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
csiutil "github.com/vmware-tanzu/velero/pkg/util/csi"
pdvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
)
const (
@@ -187,6 +188,16 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
ib.trackSkippedPV(obj, groupResource, podVolumeApproach, fmt.Sprintf("opted out due to annotation in pod %s", podName), log)
}
// Instantiate volumepolicyhelper struct here
vh := &volumehelper.VolumeHelperImpl{
SnapshotVolumes: ib.backupRequest.Spec.SnapshotVolumes,
Logger: logger,
}
if ib.backupRequest.ResPolicies != nil {
vh.VolumePolicy = ib.backupRequest.ResPolicies
}
if groupResource == kuberesource.Pods {
// pod needs to be initialized for the unstructured converter
pod = new(corev1api.Pod)
@@ -195,14 +206,13 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
// nil it on error since it's not valid
pod = nil
} else {
// Get the list of volumes to back up using pod volume backup from the pod's annotations. Remove from this list
// Get the list of volumes to back up using pod volume backup from the pod's annotations or volume policy approach. Remove from this list
// any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario,
// where it's been backed up from another pod), since we don't need >1 backup per PVC.
includedVolumes, optedOutVolumes := pdvolumeutil.GetVolumesByPod(
pod,
boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToFsBackup),
!ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
)
includedVolumes, optedOutVolumes, err := vh.GetVolumesForFSBackup(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToFsBackup), !ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()), ib.kbClient)
if err != nil {
backupErrs = append(backupErrs, errors.WithStack(err))
}
for _, volume := range includedVolumes {
// track the volumes that are PVCs using the PVC snapshot tracker, so that when we backup PVCs/PVs
@@ -229,7 +239,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
// the group version of the object.
versionPath := resourceVersion(obj)
updatedObj, additionalItemFiles, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata, finalize)
updatedObj, additionalItemFiles, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata, finalize, vh)
if err != nil {
backupErrs = append(backupErrs, err)
@@ -255,7 +265,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
backupErrs = append(backupErrs, err)
}
if err := ib.takePVSnapshot(obj, log); err != nil {
if err := ib.takePVSnapshot(obj, log, vh); err != nil {
backupErrs = append(backupErrs, err)
}
}
@@ -351,6 +361,7 @@ func (ib *itemBackupper) executeActions(
name, namespace string,
metadata metav1.Object,
finalize bool,
vh *volumehelper.VolumeHelperImpl,
) (runtime.Unstructured, []FileForArchive, error) {
var itemFiles []FileForArchive
for _, action := range ib.backupRequest.ResolvedActions {
@@ -374,6 +385,19 @@ func (ib *itemBackupper) executeActions(
continue
}
if groupResource == kuberesource.PersistentVolumeClaims && actionName == csiBIAPluginName && vh.VolumePolicy != nil {
snapshotVolume, err := vh.ShouldPerformSnapshot(obj, kuberesource.PersistentVolumeClaims, ib.kbClient)
if err != nil {
return nil, itemFiles, errors.WithStack(err)
}
if !snapshotVolume {
log.Info(fmt.Sprintf("skipping csi volume snapshot for PVC %s as it does not fit the volume policy criteria specified by the user for snapshot action", namespace+"/"+name))
ib.trackSkippedPV(obj, kuberesource.PersistentVolumeClaims, volumeSnapshotApproach, "does not satisfy the criteria for volume policy based snapshot action", log)
continue
}
}
updatedItem, additionalItemIdentifiers, operationID, postOperationItems, err := action.Execute(obj, ib.backupRequest.Backup)
if err != nil {
return nil, itemFiles, errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name)
@@ -504,15 +528,9 @@ const (
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
// disk type and IOPS (if applicable) to be able to restore to current state later.
func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger) error {
func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger, vh *volumehelper.VolumeHelperImpl) error {
log.Info("Executing takePVSnapshot")
if boolptr.IsSetToFalse(ib.backupRequest.Spec.SnapshotVolumes) {
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
ib.trackSkippedPV(obj, kuberesource.PersistentVolumes, volumeSnapshotApproach, "backup has volume snapshots disabled", log)
return nil
}
pv := new(corev1api.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pv); err != nil {
return errors.WithStack(err)
@@ -520,6 +538,26 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
log = log.WithField("persistentVolume", pv.Name)
if vh.VolumePolicy != nil {
snapshotVolume, err := vh.ShouldPerformSnapshot(obj, kuberesource.PersistentVolumes, ib.kbClient)
if err != nil {
return err
}
if !snapshotVolume {
log.Info(fmt.Sprintf("skipping volume snapshot for PV %s as it does not fit the volume policy criteria specified by the user for snapshot action", pv.Name))
ib.trackSkippedPV(obj, kuberesource.PersistentVolumes, volumeSnapshotApproach, "does not satisfy the criteria for volume policy based snapshot action", log)
return nil
}
}
if boolptr.IsSetToFalse(ib.backupRequest.Spec.SnapshotVolumes) {
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
ib.trackSkippedPV(obj, kuberesource.PersistentVolumes, volumeSnapshotApproach, "backup has volume snapshots disabled", log)
return nil
}
// If this PV is claimed, see if we've already taken a (pod volume backup) snapshot of the contents
// of this PV. If so, don't take a snapshot.
if pv.Spec.ClaimRef != nil {

View File

@@ -758,7 +758,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
if _, ok := enabledRuntimeControllers[controller.Backup]; ok {
backupper, err := backup.NewKubernetesBackupper(
s.mgr.GetClient(),
s.crClient,
s.discoveryHelper,
client.NewDynamicFactory(s.dynamicClient),
podexec.NewPodCommandExecutor(s.kubeClientConfig, s.kubeClient.CoreV1().RESTClient()),

View File

@@ -386,3 +386,21 @@ func GetPVForPVC(
}
return pv, nil
}
func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crclient.Client) (*corev1api.PersistentVolumeClaim, error) {
if vol.PersistentVolumeClaim == nil {
return nil, errors.Errorf("volume %s/%s has no PVC associated with it", pod.Namespace, vol.Name)
}
pvc := &corev1api.PersistentVolumeClaim{}
err := crClient.Get(
context.TODO(),
crclient.ObjectKey{Name: vol.PersistentVolumeClaim.ClaimName, Namespace: pod.Namespace},
pvc,
)
if err != nil {
return nil, errors.Wrapf(err, "failed to get PVC %s for Volume %s/%s",
vol.PersistentVolumeClaim.ClaimName, pod.Namespace, vol.Name)
}
return pvc, nil
}

View File

@@ -1256,3 +1256,125 @@ func TestGetPVForPVC(t *testing.T) {
})
}
}
func TestGetPVCForPodVolume(t *testing.T) {
sampleVol := &corev1api.Volume{
Name: "sample-volume",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc",
},
},
}
sampleVol2 := &corev1api.Volume{
Name: "sample-volume",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-1",
},
},
}
sampleVol3 := &corev1api.Volume{
Name: "sample-volume",
VolumeSource: corev1api.VolumeSource{},
}
samplePod := &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-pod",
Namespace: "sample-ns",
},
Spec: corev1api.PodSpec{
Containers: []corev1api.Container{
{
Name: "sample-container",
Image: "sample-image",
VolumeMounts: []corev1api.VolumeMount{
{
Name: "sample-vm",
MountPath: "/etc/pod-info",
},
},
},
},
Volumes: []corev1api.Volume{
{
Name: "sample-volume",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc",
},
},
},
},
},
}
matchingPVC := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-pvc",
Namespace: "sample-ns",
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{},
},
StorageClassName: &csiStorageClass,
VolumeName: "test-csi-7d28e566-ade7-4ed6-9e15-2e44d2fbcc08",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Capacity: v1.ResourceList{},
},
}
testCases := []struct {
name string
vol *corev1api.Volume
pod *corev1api.Pod
expectedPVC *corev1api.PersistentVolumeClaim
expectedError bool
}{
{
name: "should find PVC for volume",
vol: sampleVol,
pod: samplePod,
expectedPVC: matchingPVC,
expectedError: false,
},
{
name: "should not find PVC for volume not found error case",
vol: sampleVol2,
pod: samplePod,
expectedPVC: nil,
expectedError: true,
},
{
name: "should not find PVC vol has no PVC, error case",
vol: sampleVol3,
pod: samplePod,
expectedPVC: nil,
expectedError: true,
},
}
objs := []runtime.Object{matchingPVC}
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actualPVC, actualError := GetPVCForPodVolume(tc.vol, samplePod, fakeClient)
if tc.expectedError {
assert.NotNil(t, actualError, "Want error; Got nil error")
assert.Nilf(t, actualPVC, "Want PV: nil; Got PV: %q", actualPVC)
return
}
assert.Nilf(t, actualError, "Want: nil error; Got: %v", actualError)
assert.Equalf(t, actualPVC.Name, tc.expectedPVC.Name, "Want PVC with name %q; Got PVC with name %q", tc.expectedPVC.Name, actualPVC)
})
}
}

View File

@@ -30,7 +30,7 @@ import (
)
// GetVolumesByPod returns a list of volume names to backup for the provided pod.
func GetVolumesByPod(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExcludePVC bool) ([]string, []string) {
func GetVolumesByPod(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExcludePVC bool, volsToProcessByLegacyApproach []string) ([]string, []string) {
// tracks the volumes that have been explicitly opted out of backup via the annotation in the pod
optedOutVolumes := make([]string, 0)
@@ -38,9 +38,13 @@ func GetVolumesByPod(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExclude
return GetVolumesToBackup(pod), optedOutVolumes
}
volsToExclude := getVolumesToExclude(pod)
volsToExclude := GetVolumesToExclude(pod)
podVolumes := []string{}
for _, pv := range pod.Spec.Volumes {
// Identify volume to process
// For normal case all the pod volume will be processed
// For case when volsToProcessByLegacyApproach is non-empty then only those volume will be processed
volsToProcess := GetVolumesToProcess(pod.Spec.Volumes, volsToProcessByLegacyApproach)
for _, pv := range volsToProcess {
// cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods
// and therefore not accessible to the node agent daemon set.
if pv.HostPath != nil {
@@ -96,7 +100,7 @@ func GetVolumesToBackup(obj metav1.Object) []string {
return strings.Split(backupsValue, ",")
}
func getVolumesToExclude(obj metav1.Object) []string {
func GetVolumesToExclude(obj metav1.Object) []string {
annotations := obj.GetAnnotations()
if annotations == nil {
return nil
@@ -112,7 +116,7 @@ func IsPVCDefaultToFSBackup(pvcNamespace, pvcName string, crClient crclient.Clie
}
for index := range pods {
vols, _ := GetVolumesByPod(&pods[index], defaultVolumesToFsBackup, false)
vols, _ := GetVolumesByPod(&pods[index], defaultVolumesToFsBackup, false, []string{})
if len(vols) > 0 {
volName, err := getPodVolumeNameForPVC(pods[index], pvcName)
if err != nil {
@@ -160,3 +164,29 @@ func getPodsUsingPVC(
return podsUsingPVC, nil
}
func GetVolumesToProcess(volumes []corev1api.Volume, volsToProcessByLegacyApproach []string) []corev1api.Volume {
volsToProcess := make([]corev1api.Volume, 0)
// return empty list when no volumes associated with pod
if len(volumes) == 0 {
return volsToProcess
}
// legacy approach as a fallback option case
if len(volsToProcessByLegacyApproach) > 0 {
for _, vol := range volumes {
// don't process volumes that are already matched for supported action in volume policy approach
if !util.Contains(volsToProcessByLegacyApproach, vol.Name) {
continue
}
// add volume that is not processed in volume policy approach
volsToProcess = append(volsToProcess, vol)
}
return volsToProcess
}
// normal case return the list as in
return volumes
}

View File

@@ -369,7 +369,7 @@ func TestGetVolumesByPod(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actualIncluded, actualOptedOut := GetVolumesByPod(tc.pod, tc.defaultVolumesToFsBackup, tc.backupExcludePVC)
actualIncluded, actualOptedOut := GetVolumesByPod(tc.pod, tc.defaultVolumesToFsBackup, tc.backupExcludePVC, []string{})
sort.Strings(tc.expected.included)
sort.Strings(actualIncluded)
@@ -792,3 +792,97 @@ func TestGetPodsUsingPVC(t *testing.T) {
})
}
}
func TestGetVolumesToProcess(t *testing.T) {
testCases := []struct {
name string
volumes []corev1api.Volume
volsToProcessByLegacyApproach []string
expectedVolumes []corev1api.Volume
}{
{
name: "pod has 2 volumes empty volsToProcessByLegacyApproach list return 2 volumes",
volumes: []corev1api.Volume{
{
Name: "sample-volume-1",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-1",
},
},
},
{
Name: "sample-volume-2",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-2",
},
},
},
},
volsToProcessByLegacyApproach: []string{},
expectedVolumes: []corev1api.Volume{
{
Name: "sample-volume-1",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-1",
},
},
},
{
Name: "sample-volume-2",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-2",
},
},
},
},
},
{
name: "pod has 2 volumes non-empty volsToProcessByLegacyApproach list returns 1 volumes",
volumes: []corev1api.Volume{
{
Name: "sample-volume-1",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-1",
},
},
},
{
Name: "sample-volume-2",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-2",
},
},
},
},
volsToProcessByLegacyApproach: []string{"sample-volume-2"},
expectedVolumes: []corev1api.Volume{
{
Name: "sample-volume-2",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: "sample-pvc-2",
},
},
},
},
},
{
name: "empty case, return empty list",
volumes: []corev1api.Volume{},
volsToProcessByLegacyApproach: []string{},
expectedVolumes: []corev1api.Volume{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actualVolumes := GetVolumesToProcess(tc.volumes, tc.volsToProcessByLegacyApproach)
assert.Equal(t, tc.expectedVolumes, actualVolumes, "Want Volumes List %v; Got Volumes List %v", tc.expectedVolumes, actualVolumes)
})
}
}