From c62a486765d2cab7b7339af8173f3acbb4a46161 Mon Sep 17 00:00:00 2001 From: Xun Jiang Date: Fri, 22 Aug 2025 15:01:32 +0800 Subject: [PATCH] Add ConfigMap parameters validation for install CLI and server start. Signed-off-by: Xun Jiang --- changelogs/unreleased/9200-blackpiglet | 1 + pkg/cmd/cli/install/install.go | 30 ++++++- pkg/cmd/cli/nodeagent/server.go | 26 +++--- pkg/cmd/cli/nodeagent/server_test.go | 67 +++++++-------- pkg/cmd/server/server.go | 19 ++++- pkg/cmd/server/server_test.go | 24 +++++- pkg/controller/data_download_controller.go | 5 +- .../data_download_controller_test.go | 7 +- pkg/controller/data_upload_controller.go | 5 +- pkg/controller/data_upload_controller_test.go | 5 +- pkg/exposer/csi_snapshot.go | 3 +- pkg/exposer/csi_snapshot_test.go | 12 +-- pkg/exposer/generic_restore.go | 3 +- pkg/nodeagent/node_agent.go | 66 +-------------- pkg/nodeagent/node_agent_test.go | 15 ++-- pkg/repository/maintenance/maintenance.go | 37 +++----- .../maintenance/maintenance_test.go | 30 +++---- pkg/types/node_agent.go | 84 +++++++++++++++++++ pkg/types/repo_maintenance.go | 34 ++++++++ pkg/util/kube/utils.go | 27 ++++++ pkg/util/kube/utils_test.go | 53 +++++++++++- 21 files changed, 370 insertions(+), 183 deletions(-) create mode 100644 changelogs/unreleased/9200-blackpiglet create mode 100644 pkg/types/node_agent.go create mode 100644 pkg/types/repo_maintenance.go diff --git a/changelogs/unreleased/9200-blackpiglet b/changelogs/unreleased/9200-blackpiglet new file mode 100644 index 000000000..4c13079d9 --- /dev/null +++ b/changelogs/unreleased/9200-blackpiglet @@ -0,0 +1 @@ +Add ConfigMap parameters validation for install CLI and server start. diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index 7079ec79b..c7a7dfe7a 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -23,8 +23,6 @@ import ( "strings" "time" - "github.com/vmware-tanzu/velero/pkg/uploader" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -37,6 +35,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" "github.com/vmware-tanzu/velero/pkg/cmd/util/output" "github.com/vmware-tanzu/velero/pkg/install" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" + "github.com/vmware-tanzu/velero/pkg/uploader" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -540,5 +540,31 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er return errors.New("--pod-volume-operation-timeout must be non-negative") } + crClient, err := f.KubebuilderClient() + if err != nil { + return fmt.Errorf("fail to create go-client %w", err) + } + + // If either Linux or Windows node-agent is installed, and the node-agent-configmap + // is specified, need to validate the ConfigMap. + if (o.UseNodeAgent || o.UseNodeAgentWindows) && len(o.NodeAgentConfigMap) > 0 { + if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.NodeAgentConfigMap, &velerotypes.NodeAgentConfigs{}); err != nil { + return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid", o.NodeAgentConfigMap) + } + } + + if len(o.RepoMaintenanceJobConfigMap) > 0 { + if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.RepoMaintenanceJobConfigMap, &velerotypes.JobConfigs{}); err != nil { + return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid", o.RepoMaintenanceJobConfigMap) + } + } + + if len(o.BackupRepoConfigMap) > 0 { + config := make(map[string]any) + if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.BackupRepoConfigMap, &config); err != nil { + return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid", o.BackupRepoConfigMap) + } + } + return nil } diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 0ff5b2f1c..873e03beb 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -26,6 +26,7 @@ import ( "time" "github.com/bombsimon/logrusr/v3" + snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" @@ -39,6 +40,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" + cacheutil "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" @@ -46,8 +48,6 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" "github.com/vmware-tanzu/velero/pkg/buildinfo" @@ -60,11 +60,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/exposer" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/nodeagent" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/logging" - - cacheutil "k8s.io/client-go/tools/cache" ) var ( @@ -140,7 +139,7 @@ type nodeAgentServer struct { kubeClient kubernetes.Interface csiSnapshotClient *snapshotv1client.Clientset dataPathMgr *datapath.Manager - dataPathConfigs *nodeagent.Configs + dataPathConfigs *velerotypes.NodeAgentConfigs vgdpCounter *exposer.VgdpCounter } @@ -252,7 +251,9 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi return nil, err } - s.getDataPathConfigs() + if err := s.getDataPathConfigs(); err != nil { + return nil, err + } s.dataPathMgr = datapath.NewManager(s.getDataPathConcurrentNum(defaultDataPathConcurrentNum)) return s, nil @@ -301,7 +302,7 @@ func (s *nodeAgentServer) run() { s.logger.Infof("Using customized loadAffinity %v", loadAffinity) } - var backupPVCConfig map[string]nodeagent.BackupPVC + var backupPVCConfig map[string]velerotypes.BackupPVC if s.dataPathConfigs != nil && s.dataPathConfigs.BackupPVCConfig != nil { backupPVCConfig = s.dataPathConfigs.BackupPVCConfig s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig) @@ -361,7 +362,7 @@ func (s *nodeAgentServer) run() { s.logger.WithError(err).Fatal("Unable to create the data upload controller") } - var restorePVCConfig nodeagent.RestorePVC + var restorePVCConfig velerotypes.RestorePVC if s.dataPathConfigs != nil && s.dataPathConfigs.RestorePVCConfig != nil { restorePVCConfig = *s.dataPathConfigs.RestorePVCConfig s.logger.Infof("Using customized restorePVC config %v", restorePVCConfig) @@ -546,19 +547,20 @@ func (s *nodeAgentServer) markLegacyPVRsFailed(client ctrlclient.Client) { var getConfigsFunc = nodeagent.GetConfigs -func (s *nodeAgentServer) getDataPathConfigs() { +func (s *nodeAgentServer) getDataPathConfigs() error { if s.config.nodeAgentConfig == "" { s.logger.Info("No node-agent configMap is specified") - return + return nil } configs, err := getConfigsFunc(s.ctx, s.namespace, s.kubeClient, s.config.nodeAgentConfig) if err != nil { - s.logger.WithError(err).Warnf("Failed to get node agent configs from configMap %s, ignore it", s.config.nodeAgentConfig) - return + s.logger.WithError(err).Errorf("Failed to get node agent configs from configMap %s, ignore it", s.config.nodeAgentConfig) + return err } s.dataPathConfigs = configs + return nil } func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int { diff --git a/pkg/cmd/cli/nodeagent/server_test.go b/pkg/cmd/cli/nodeagent/server_test.go index 1ecd3ea0d..cb1750e6b 100644 --- a/pkg/cmd/cli/nodeagent/server_test.go +++ b/pkg/cmd/cli/nodeagent/server_test.go @@ -33,6 +33,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/nodeagent" testutil "github.com/vmware-tanzu/velero/pkg/test" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" ) func Test_validatePodVolumesHostPath(t *testing.T) { @@ -130,17 +131,17 @@ func Test_validatePodVolumesHostPath(t *testing.T) { } func Test_getDataPathConfigs(t *testing.T) { - configs := &nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs := &velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: -1, }, } tests := []struct { name string - getFunc func(context.Context, string, kubernetes.Interface, string) (*nodeagent.Configs, error) + getFunc func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) configMapName string - expectConfigs *nodeagent.Configs + expectConfigs *velerotypes.NodeAgentConfigs expectLog string }{ { @@ -150,7 +151,7 @@ func Test_getDataPathConfigs(t *testing.T) { { name: "failed to get configs", configMapName: "node-agent-config", - getFunc: func(context.Context, string, kubernetes.Interface, string) (*nodeagent.Configs, error) { + getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) { return nil, errors.New("fake-get-error") }, expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it", @@ -158,7 +159,7 @@ func Test_getDataPathConfigs(t *testing.T) { { name: "configs cm not found", configMapName: "node-agent-config", - getFunc: func(context.Context, string, kubernetes.Interface, string) (*nodeagent.Configs, error) { + getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) { return nil, errors.New("fake-not-found-error") }, expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it", @@ -167,7 +168,7 @@ func Test_getDataPathConfigs(t *testing.T) { { name: "succeed", configMapName: "node-agent-config", - getFunc: func(context.Context, string, kubernetes.Interface, string) (*nodeagent.Configs, error) { + getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) { return configs, nil }, expectConfigs: configs, @@ -226,7 +227,7 @@ func Test_getDataPathConcurrentNum(t *testing.T) { tests := []struct { name string - configs nodeagent.Configs + configs velerotypes.NodeAgentConfigs setKubeClient bool kubeClientObj []runtime.Object expectNum int @@ -239,8 +240,8 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "global number is invalid", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: -1, }, }, @@ -249,8 +250,8 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "global number is valid", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, }, }, @@ -258,10 +259,10 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "node is not found", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, - PerNodeConfig: []nodeagent.RuledConfigs{ + PerNodeConfig: []velerotypes.RuledConfigs{ { Number: 100, }, @@ -274,10 +275,10 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "failed to get selector", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, - PerNodeConfig: []nodeagent.RuledConfigs{ + PerNodeConfig: []velerotypes.RuledConfigs{ { NodeSelector: invalidLabelSelector, Number: 100, @@ -292,10 +293,10 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "rule number is invalid", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, - PerNodeConfig: []nodeagent.RuledConfigs{ + PerNodeConfig: []velerotypes.RuledConfigs{ { NodeSelector: validLabelSelector1, Number: -1, @@ -310,10 +311,10 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "label doesn't match", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, - PerNodeConfig: []nodeagent.RuledConfigs{ + PerNodeConfig: []velerotypes.RuledConfigs{ { NodeSelector: validLabelSelector1, Number: -1, @@ -328,10 +329,10 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "match one rule", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, - PerNodeConfig: []nodeagent.RuledConfigs{ + PerNodeConfig: []velerotypes.RuledConfigs{ { NodeSelector: validLabelSelector1, Number: 66, @@ -346,10 +347,10 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "match multiple rules", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, - PerNodeConfig: []nodeagent.RuledConfigs{ + PerNodeConfig: []velerotypes.RuledConfigs{ { NodeSelector: validLabelSelector1, Number: 66, @@ -368,10 +369,10 @@ func Test_getDataPathConcurrentNum(t *testing.T) { }, { name: "match multiple rules 2", - configs: nodeagent.Configs{ - LoadConcurrency: &nodeagent.LoadConcurrency{ + configs: velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: globalNum, - PerNodeConfig: []nodeagent.RuledConfigs{ + PerNodeConfig: []velerotypes.RuledConfigs{ { NodeSelector: validLabelSelector1, Number: 36, diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index e7ade6d31..15c4afd95 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -26,9 +26,8 @@ import ( "strings" "time" - volumegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" - logrusr "github.com/bombsimon/logrusr/v3" + volumegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -83,6 +82,7 @@ import ( repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" repomanager "github.com/vmware-tanzu/velero/pkg/repository/manager" "github.com/vmware-tanzu/velero/pkg/restore" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -209,6 +209,21 @@ func newServer(f client.Factory, config *config.Config, logger *logrus.Logger) ( // Therefore, we must explicitly call it on the error paths in this function. ctx, cancelFunc := context.WithCancel(context.Background()) + if len(config.BackupRepoConfig) > 0 { + repoConfig := make(map[string]any) + if err := kube.VerifyJSONConfigs(ctx, f.Namespace(), crClient, config.BackupRepoConfig, &repoConfig); err != nil { + cancelFunc() + return nil, err + } + } + + if len(config.RepoMaintenanceJobConfig) > 0 { + if err := kube.VerifyJSONConfigs(ctx, f.Namespace(), crClient, config.RepoMaintenanceJobConfig, &velerotypes.JobConfigs{}); err != nil { + cancelFunc() + return nil, err + } + } + clientConfig, err := f.ClientConfig() if err != nil { cancelFunc() diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index ebf0b478b..1ea9d0022 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -33,6 +33,7 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/client/mocks" "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/constant" @@ -242,7 +243,28 @@ func Test_newServer(t *testing.T) { ClientBurst: 1, ClientPageSize: 100, }, logger) - assert.Error(t, err) + require.Error(t, err) + + invalidCM := builder.ForConfigMap("velero", "invalid").Data("invalid", "{\"a\": \"b}").Result() + crClient := velerotest.NewFakeControllerRuntimeClient(t, invalidCM) + + factory.On("KubeClient").Return(crClient, nil). + On("Client").Return(nil, nil). + On("DynamicClient").Return(nil, errors.New("error")) + _, err = newServer(factory, &config.Config{ + UploaderType: uploader.KopiaType, + BackupRepoConfig: "invalid", + }, logger) + require.Error(t, err) + + factory.On("KubeClient").Return(crClient, nil). + On("Client").Return(nil, nil). + On("DynamicClient").Return(nil, errors.New("error")) + _, err = newServer(factory, &config.Config{ + UploaderType: uploader.KopiaType, + RepoMaintenanceJobConfig: "invalid", + }, logger) + require.Error(t, err) } func Test_namespaceExists(t *testing.T) { diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index a8a2fc633..23328d387 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -49,6 +49,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/exposer" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/nodeagent" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -66,7 +67,7 @@ type DataDownloadReconciler struct { dataPathMgr *datapath.Manager vgdpCounter *exposer.VgdpCounter loadAffinity []*kube.LoadAffinity - restorePVCConfig nodeagent.RestorePVC + restorePVCConfig velerotypes.RestorePVC podResources corev1api.ResourceRequirements preparingTimeout time.Duration metrics *metrics.ServerMetrics @@ -81,7 +82,7 @@ func NewDataDownloadReconciler( dataPathMgr *datapath.Manager, counter *exposer.VgdpCounter, loadAffinity []*kube.LoadAffinity, - restorePVCConfig nodeagent.RestorePVC, + restorePVCConfig velerotypes.RestorePVC, podResources corev1api.ResourceRequirements, nodeName string, preparingTimeout time.Duration, diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index 048a1027c..d260056e6 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -46,13 +46,12 @@ import ( "github.com/vmware-tanzu/velero/pkg/datapath" datapathmockes "github.com/vmware-tanzu/velero/pkg/datapath/mocks" "github.com/vmware-tanzu/velero/pkg/exposer" + exposermockes "github.com/vmware-tanzu/velero/pkg/exposer/mocks" "github.com/vmware-tanzu/velero/pkg/metrics" - "github.com/vmware-tanzu/velero/pkg/nodeagent" velerotest "github.com/vmware-tanzu/velero/pkg/test" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/kube" - - exposermockes "github.com/vmware-tanzu/velero/pkg/exposer/mocks" ) const dataDownloadName string = "datadownload-1" @@ -130,7 +129,7 @@ func initDataDownloadReconcilerWithError(t *testing.T, objects []any, needError dataPathMgr := datapath.NewManager(1) - return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, nodeagent.RestorePVC{}, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), ""), nil + return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, velerotypes.RestorePVC{}, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), ""), nil } func TestDataDownloadReconcile(t *testing.T) { diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index 502385c14..d0467a843 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -50,6 +50,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/exposer" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/nodeagent" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -76,7 +77,7 @@ type DataUploadReconciler struct { dataPathMgr *datapath.Manager vgdpCounter *exposer.VgdpCounter loadAffinity []*kube.LoadAffinity - backupPVCConfig map[string]nodeagent.BackupPVC + backupPVCConfig map[string]velerotypes.BackupPVC podResources corev1api.ResourceRequirements preparingTimeout time.Duration metrics *metrics.ServerMetrics @@ -92,7 +93,7 @@ func NewDataUploadReconciler( dataPathMgr *datapath.Manager, counter *exposer.VgdpCounter, loadAffinity []*kube.LoadAffinity, - backupPVCConfig map[string]nodeagent.BackupPVC, + backupPVCConfig map[string]velerotypes.BackupPVC, podResources corev1api.ResourceRequirements, clock clocks.WithTickerAndDelayedExecution, nodeName string, diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index 157ccd8d6..037726325 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -22,8 +22,6 @@ import ( "testing" "time" - "github.com/vmware-tanzu/velero/pkg/nodeagent" - snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" snapshotFake "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/fake" "github.com/pkg/errors" @@ -56,6 +54,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/exposer" "github.com/vmware-tanzu/velero/pkg/metrics" velerotest "github.com/vmware-tanzu/velero/pkg/test" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -242,7 +241,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci dataPathMgr, nil, nil, - map[string]nodeagent.BackupPVC{}, + map[string]velerotypes.BackupPVC{}, corev1api.ResourceRequirements{}, testclocks.NewFakeClock(now), "test-node", diff --git a/pkg/exposer/csi_snapshot.go b/pkg/exposer/csi_snapshot.go index e404d5dcf..a886c60c3 100644 --- a/pkg/exposer/csi_snapshot.go +++ b/pkg/exposer/csi_snapshot.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/pkg/nodeagent" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/csi" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -75,7 +76,7 @@ type CSISnapshotExposeParam struct { Affinity []*kube.LoadAffinity // BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement - BackupPVCConfig map[string]nodeagent.BackupPVC + BackupPVCConfig map[string]velerotypes.BackupPVC // Resources defines the resource requirements of the hosting pod Resources corev1api.ResourceRequirements diff --git a/pkg/exposer/csi_snapshot_test.go b/pkg/exposer/csi_snapshot_test.go index d57d7699b..5dd745065 100644 --- a/pkg/exposer/csi_snapshot_test.go +++ b/pkg/exposer/csi_snapshot_test.go @@ -37,8 +37,8 @@ import ( clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/nodeagent" velerotest "github.com/vmware-tanzu/velero/pkg/test" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -423,7 +423,7 @@ func TestExpose(t *testing.T) { AccessMode: AccessModeFileSystem, OperationTimeout: time.Millisecond, ExposeTimeout: time.Millisecond, - BackupPVCConfig: map[string]nodeagent.BackupPVC{ + BackupPVCConfig: map[string]velerotypes.BackupPVC{ "fake-sc": { StorageClass: "fake-sc-read-only", ReadOnly: true, @@ -449,7 +449,7 @@ func TestExpose(t *testing.T) { AccessMode: AccessModeFileSystem, OperationTimeout: time.Millisecond, ExposeTimeout: time.Millisecond, - BackupPVCConfig: map[string]nodeagent.BackupPVC{ + BackupPVCConfig: map[string]velerotypes.BackupPVC{ "fake-sc": { StorageClass: "fake-sc-read-only", ReadOnly: true, @@ -476,7 +476,7 @@ func TestExpose(t *testing.T) { AccessMode: AccessModeFileSystem, OperationTimeout: time.Millisecond, ExposeTimeout: time.Millisecond, - BackupPVCConfig: map[string]nodeagent.BackupPVC{ + BackupPVCConfig: map[string]velerotypes.BackupPVC{ "fake-sc": { StorageClass: "fake-sc-read-only", }, @@ -551,7 +551,7 @@ func TestExpose(t *testing.T) { AccessMode: AccessModeFileSystem, OperationTimeout: time.Millisecond, ExposeTimeout: time.Millisecond, - BackupPVCConfig: map[string]nodeagent.BackupPVC{ + BackupPVCConfig: map[string]velerotypes.BackupPVC{ "fake-sc": { StorageClass: "fake-sc-read-only", }, @@ -607,7 +607,7 @@ func TestExpose(t *testing.T) { AccessMode: AccessModeFileSystem, OperationTimeout: time.Millisecond, ExposeTimeout: time.Millisecond, - BackupPVCConfig: map[string]nodeagent.BackupPVC{ + BackupPVCConfig: map[string]velerotypes.BackupPVC{ "fake-sc": { StorageClass: "fake-sc-read-only", }, diff --git a/pkg/exposer/generic_restore.go b/pkg/exposer/generic_restore.go index c8ac7fcce..26019d5d4 100644 --- a/pkg/exposer/generic_restore.go +++ b/pkg/exposer/generic_restore.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/pkg/nodeagent" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -65,7 +66,7 @@ type GenericRestoreExposeParam struct { NodeOS string // RestorePVCConfig is the config for restorePVC (intermediate PVC) of generic restore - RestorePVCConfig nodeagent.RestorePVC + RestorePVCConfig velerotypes.RestorePVC // LoadAffinity specifies the node affinity of the backup pod LoadAffinity []*kube.LoadAffinity diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go index d83a9eba3..7268b589a 100644 --- a/pkg/nodeagent/node_agent.go +++ b/pkg/nodeagent/node_agent.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/kubernetes" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -56,67 +57,6 @@ var ( ErrNodeAgentTolerationNotFound = errors.New("node-agent toleration not found") ) -type LoadConcurrency struct { - // GlobalConfig specifies the concurrency number to all nodes for which per-node config is not specified - GlobalConfig int `json:"globalConfig,omitempty"` - - // PerNodeConfig specifies the concurrency number to nodes matched by rules - PerNodeConfig []RuledConfigs `json:"perNodeConfig,omitempty"` - - // PrepareQueueLength specifies the max number of loads that are under expose - PrepareQueueLength int `json:"prepareQueueLength,omitempty"` -} - -type LoadAffinity struct { - // NodeSelector specifies the label selector to match nodes - NodeSelector metav1.LabelSelector `json:"nodeSelector"` -} - -type RuledConfigs struct { - // NodeSelector specifies the label selector to match nodes - NodeSelector metav1.LabelSelector `json:"nodeSelector"` - - // Number specifies the number value associated to the matched nodes - Number int `json:"number"` -} - -type BackupPVC struct { - // StorageClass is the name of storage class to be used by the backupPVC - StorageClass string `json:"storageClass,omitempty"` - - // ReadOnly sets the backupPVC's access mode as read only - ReadOnly bool `json:"readOnly,omitempty"` - - // SPCNoRelabeling sets Spec.SecurityContext.SELinux.Type to "spc_t" for the pod mounting the backupPVC - // ignored if ReadOnly is false - SPCNoRelabeling bool `json:"spcNoRelabeling,omitempty"` -} - -type RestorePVC struct { - // IgnoreDelayBinding indicates to ignore delay binding the restorePVC when it is in WaitForFirstConsumer mode - IgnoreDelayBinding bool `json:"ignoreDelayBinding,omitempty"` -} - -type Configs struct { - // LoadConcurrency is the config for data path load concurrency per node. - LoadConcurrency *LoadConcurrency `json:"loadConcurrency,omitempty"` - - // LoadAffinity is the config for data path load affinity. - LoadAffinity []*kube.LoadAffinity `json:"loadAffinity,omitempty"` - - // BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement - BackupPVCConfig map[string]BackupPVC `json:"backupPVC,omitempty"` - - // RestoreVCConfig is the config for restorePVC (intermediate PVC) of generic restore - RestorePVCConfig *RestorePVC `json:"restorePVC,omitempty"` - - // PodResources is the resource config for various types of pods launched by node-agent, i.e., data mover pods. - PodResources *kube.PodResources `json:"podResources,omitempty"` - - // PriorityClassName is the priority class name for data mover pods created by the node agent - PriorityClassName string `json:"priorityClassName,omitempty"` -} - func IsRunningOnLinux(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error { return isRunning(ctx, kubeClient, namespace, daemonSet) } @@ -193,7 +133,7 @@ func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace return &ds.Spec.Template.Spec, nil } -func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Interface, configName string) (*Configs, error) { +func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Interface, configName string) (*velerotypes.NodeAgentConfigs, error) { cm, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(ctx, configName, metav1.GetOptions{}) if err != nil { return nil, errors.Wrapf(err, "error to get node agent configs %s", configName) @@ -208,7 +148,7 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int jsonString = v } - configs := &Configs{} + configs := &velerotypes.NodeAgentConfigs{} err = json.Unmarshal([]byte(jsonString), configs) if err != nil { return nil, errors.Wrapf(err, "error to unmarshall configs from %s", configName) diff --git a/pkg/nodeagent/node_agent_test.go b/pkg/nodeagent/node_agent_test.go index aaf851d6e..bdc1085b4 100644 --- a/pkg/nodeagent/node_agent_test.go +++ b/pkg/nodeagent/node_agent_test.go @@ -31,6 +31,7 @@ import ( clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/vmware-tanzu/velero/pkg/builder" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -254,7 +255,7 @@ func TestGetConfigs(t *testing.T) { kubeClientObj []runtime.Object namespace string kubeReactors []reactor - expectResult *Configs + expectResult *velerotypes.NodeAgentConfigs expectErr string }{ { @@ -293,7 +294,7 @@ func TestGetConfigs(t *testing.T) { kubeClientObj: []runtime.Object{ cmWithoutCocurrentData, }, - expectResult: &Configs{}, + expectResult: &velerotypes.NodeAgentConfigs{}, }, { name: "success", @@ -301,8 +302,8 @@ func TestGetConfigs(t *testing.T) { kubeClientObj: []runtime.Object{ cmWithValidData, }, - expectResult: &Configs{ - LoadConcurrency: &LoadConcurrency{ + expectResult: &velerotypes.NodeAgentConfigs{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: 5, }, }, @@ -313,7 +314,7 @@ func TestGetConfigs(t *testing.T) { kubeClientObj: []runtime.Object{ cmWithPriorityClass, }, - expectResult: &Configs{ + expectResult: &velerotypes.NodeAgentConfigs{ PriorityClassName: "high-priority", }, }, @@ -323,9 +324,9 @@ func TestGetConfigs(t *testing.T) { kubeClientObj: []runtime.Object{ cmWithPriorityClassAndOther, }, - expectResult: &Configs{ + expectResult: &velerotypes.NodeAgentConfigs{ PriorityClassName: "low-priority", - LoadConcurrency: &LoadConcurrency{ + LoadConcurrency: &velerotypes.LoadConcurrency{ GlobalConfig: 3, }, }, diff --git a/pkg/repository/maintenance/maintenance.go b/pkg/repository/maintenance/maintenance.go index 9bc61c058..16a94535f 100644 --- a/pkg/repository/maintenance/maintenance.go +++ b/pkg/repository/maintenance/maintenance.go @@ -27,6 +27,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + appsv1api "k8s.io/api/apps/v1" batchv1api "k8s.io/api/batch/v1" corev1api "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -36,14 +37,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/util/kube" - - appsv1api "k8s.io/api/apps/v1" - - veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero" - "github.com/vmware-tanzu/velero/pkg/util/logging" + veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero" ) const ( @@ -58,21 +56,6 @@ const ( DefaultMaintenanceJobMemLimit = "0" ) -type JobConfigs struct { - // LoadAffinities is the config for repository maintenance job load affinity. - LoadAffinities []*kube.LoadAffinity `json:"loadAffinity,omitempty"` - - // PodResources is the config for the CPU and memory resources setting. - PodResources *kube.PodResources `json:"podResources,omitempty"` - - // KeepLatestMaintenanceJobs is the number of latest maintenance jobs to keep for the repository. - KeepLatestMaintenanceJobs *int `json:"keepLatestMaintenanceJobs,omitempty"` - - // PriorityClassName is the priority class name for the maintenance job pod - // Note: This is only read from the global configuration, not per-repository - PriorityClassName string `json:"priorityClassName,omitempty"` -} - func GenerateJobName(repo string) string { millisecond := time.Now().UTC().UnixMilli() // millisecond @@ -215,7 +198,7 @@ func getJobConfig( veleroNamespace string, repoMaintenanceJobConfig string, repo *velerov1api.BackupRepository, -) (*JobConfigs, error) { +) (*velerotypes.JobConfigs, error) { var cm corev1api.ConfigMap if err := client.Get( ctx, @@ -248,10 +231,10 @@ func getJobConfig( repoJobConfigKey := repo.Spec.VolumeNamespace + "-" + repo.Spec.BackupStorageLocation + "-" + repo.Spec.RepositoryType - var result *JobConfigs + var result *velerotypes.JobConfigs if _, ok := cm.Data[repoJobConfigKey]; ok { logger.Debugf("Find the repo maintenance config %s for repo %s", repoJobConfigKey, repo.Name) - result = new(JobConfigs) + result = new(velerotypes.JobConfigs) if err := json.Unmarshal([]byte(cm.Data[repoJobConfigKey]), result); err != nil { return nil, errors.Wrapf( err, @@ -265,10 +248,10 @@ func getJobConfig( logger.Debugf("Find the global repo maintenance config for repo %s", repo.Name) if result == nil { - result = new(JobConfigs) + result = new(velerotypes.JobConfigs) } - globalResult := new(JobConfigs) + globalResult := new(velerotypes.JobConfigs) if err := json.Unmarshal([]byte(cm.Data[GlobalKeyForRepoMaintenanceJobCM]), globalResult); err != nil { return nil, errors.Wrapf( @@ -466,7 +449,7 @@ func StartNewJob( return maintenanceJob.Name, nil } -func getPriorityClassName(ctx context.Context, cli client.Client, config *JobConfigs, logger logrus.FieldLogger) string { +func getPriorityClassName(ctx context.Context, cli client.Client, config *velerotypes.JobConfigs, logger logrus.FieldLogger) string { // Use the priority class name from the global job configuration if available // Note: Priority class is only read from global config, not per-repository if config != nil && config.PriorityClassName != "" { @@ -491,7 +474,7 @@ func buildJob( ctx context.Context, repo *velerov1api.BackupRepository, bslName string, - config *JobConfigs, + config *velerotypes.JobConfigs, logLevel logrus.Level, logFormat *logging.FormatFlag, logger logrus.FieldLogger, diff --git a/pkg/repository/maintenance/maintenance_test.go b/pkg/repository/maintenance/maintenance_test.go index f0a8568be..3dd12e5fa 100644 --- a/pkg/repository/maintenance/maintenance_test.go +++ b/pkg/repository/maintenance/maintenance_test.go @@ -27,6 +27,7 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + appsv1api "k8s.io/api/apps/v1" batchv1api "k8s.io/api/batch/v1" corev1api "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" @@ -41,11 +42,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/repository/provider" velerotest "github.com/vmware-tanzu/velero/pkg/test" + velerotypes "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/logging" - - appsv1api "k8s.io/api/apps/v1" ) func TestGenerateJobName1(t *testing.T) { @@ -408,7 +408,7 @@ func TestGetJobConfig(t *testing.T) { testCases := []struct { name string repoJobConfig *corev1api.ConfigMap - expectedConfig *JobConfigs + expectedConfig *velerotypes.JobConfigs expectedError error }{ { @@ -441,7 +441,7 @@ func TestGetJobConfig(t *testing.T) { "test-default-kopia": "{\"podResources\":{\"cpuRequest\":\"100m\",\"cpuLimit\":\"200m\",\"memoryRequest\":\"100Mi\",\"memoryLimit\":\"200Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"e2\"]}]}}]}", }, }, - expectedConfig: &JobConfigs{ + expectedConfig: &velerotypes.JobConfigs{ PodResources: &kube.PodResources{ CPURequest: "100m", CPULimit: "200m", @@ -475,7 +475,7 @@ func TestGetJobConfig(t *testing.T) { GlobalKeyForRepoMaintenanceJobCM: "{\"podResources\":{\"cpuRequest\":\"50m\",\"cpuLimit\":\"100m\",\"memoryRequest\":\"50Mi\",\"memoryLimit\":\"100Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"n2\"]}]}}]}", }, }, - expectedConfig: &JobConfigs{ + expectedConfig: &velerotypes.JobConfigs{ PodResources: &kube.PodResources{ CPURequest: "50m", CPULimit: "100m", @@ -510,7 +510,7 @@ func TestGetJobConfig(t *testing.T) { "test-default-kopia": "{\"podResources\":{\"cpuRequest\":\"100m\",\"cpuLimit\":\"200m\",\"memoryRequest\":\"100Mi\",\"memoryLimit\":\"200Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"e2\"]}]}}]}", }, }, - expectedConfig: &JobConfigs{ + expectedConfig: &velerotypes.JobConfigs{ KeepLatestMaintenanceJobs: &keepLatestMaintenanceJobs, PodResources: &kube.PodResources{ CPURequest: "100m", @@ -930,7 +930,7 @@ func TestBuildJob(t *testing.T) { testCases := []struct { name string - m *JobConfigs + m *velerotypes.JobConfigs deploy *appsv1api.Deployment logLevel logrus.Level logFormat *logging.FormatFlag @@ -946,7 +946,7 @@ func TestBuildJob(t *testing.T) { }{ { name: "Valid maintenance job without third party labels", - m: &JobConfigs{ + m: &velerotypes.JobConfigs{ PodResources: &kube.PodResources{ CPURequest: "100m", MemoryRequest: "128Mi", @@ -998,7 +998,7 @@ func TestBuildJob(t *testing.T) { }, { name: "Valid maintenance job with third party labels", - m: &JobConfigs{ + m: &velerotypes.JobConfigs{ PodResources: &kube.PodResources{ CPURequest: "100m", MemoryRequest: "128Mi", @@ -1047,7 +1047,7 @@ func TestBuildJob(t *testing.T) { }, { name: "Error getting Velero server deployment", - m: &JobConfigs{ + m: &velerotypes.JobConfigs{ PodResources: &kube.PodResources{ CPURequest: "100m", MemoryRequest: "128Mi", @@ -1307,7 +1307,7 @@ func mockBackupRepo() *velerov1api.BackupRepository { func TestGetPriorityClassName(t *testing.T) { testCases := []struct { name string - config *JobConfigs + config *velerotypes.JobConfigs priorityClassExists bool expectedValue string expectedLogContains string @@ -1315,7 +1315,7 @@ func TestGetPriorityClassName(t *testing.T) { }{ { name: "empty priority class name should return empty string", - config: &JobConfigs{PriorityClassName: ""}, + config: &velerotypes.JobConfigs{PriorityClassName: ""}, expectedValue: "", expectedLogContains: "", }, @@ -1327,7 +1327,7 @@ func TestGetPriorityClassName(t *testing.T) { }, { name: "existing priority class should log info and return name", - config: &JobConfigs{PriorityClassName: "high-priority"}, + config: &velerotypes.JobConfigs{PriorityClassName: "high-priority"}, priorityClassExists: true, expectedValue: "high-priority", expectedLogContains: "Validated priority class \\\"high-priority\\\" exists in cluster", @@ -1335,7 +1335,7 @@ func TestGetPriorityClassName(t *testing.T) { }, { name: "non-existing priority class should log warning and still return name", - config: &JobConfigs{PriorityClassName: "missing-priority"}, + config: &velerotypes.JobConfigs{PriorityClassName: "missing-priority"}, priorityClassExists: false, expectedValue: "missing-priority", expectedLogContains: "Priority class \\\"missing-priority\\\" not found in cluster", @@ -1465,7 +1465,7 @@ func TestBuildJobWithPriorityClassName(t *testing.T) { require.NoError(t, err) // Create minimal job configs and resources - jobConfig := &JobConfigs{ + jobConfig := &velerotypes.JobConfigs{ PriorityClassName: tc.priorityClassName, } logLevel := logrus.InfoLevel diff --git a/pkg/types/node_agent.go b/pkg/types/node_agent.go new file mode 100644 index 000000000..d0696dc95 --- /dev/null +++ b/pkg/types/node_agent.go @@ -0,0 +1,84 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/util/kube" +) + +type LoadConcurrency struct { + // GlobalConfig specifies the concurrency number to all nodes for which per-node config is not specified + GlobalConfig int `json:"globalConfig,omitempty"` + + // PerNodeConfig specifies the concurrency number to nodes matched by rules + PerNodeConfig []RuledConfigs `json:"perNodeConfig,omitempty"` + + // PrepareQueueLength specifies the max number of loads that are under expose + PrepareQueueLength int `json:"prepareQueueLength,omitempty"` +} + +type LoadAffinity struct { + // NodeSelector specifies the label selector to match nodes + NodeSelector metav1.LabelSelector `json:"nodeSelector"` +} + +type RuledConfigs struct { + // NodeSelector specifies the label selector to match nodes + NodeSelector metav1.LabelSelector `json:"nodeSelector"` + + // Number specifies the number value associated to the matched nodes + Number int `json:"number"` +} + +type BackupPVC struct { + // StorageClass is the name of storage class to be used by the backupPVC + StorageClass string `json:"storageClass,omitempty"` + + // ReadOnly sets the backupPVC's access mode as read only + ReadOnly bool `json:"readOnly,omitempty"` + + // SPCNoRelabeling sets Spec.SecurityContext.SELinux.Type to "spc_t" for the pod mounting the backupPVC + // ignored if ReadOnly is false + SPCNoRelabeling bool `json:"spcNoRelabeling,omitempty"` +} + +type RestorePVC struct { + // IgnoreDelayBinding indicates to ignore delay binding the restorePVC when it is in WaitForFirstConsumer mode + IgnoreDelayBinding bool `json:"ignoreDelayBinding,omitempty"` +} + +type NodeAgentConfigs struct { + // LoadConcurrency is the config for data path load concurrency per node. + LoadConcurrency *LoadConcurrency `json:"loadConcurrency,omitempty"` + + // LoadAffinity is the config for data path load affinity. + LoadAffinity []*kube.LoadAffinity `json:"loadAffinity,omitempty"` + + // BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement + BackupPVCConfig map[string]BackupPVC `json:"backupPVC,omitempty"` + + // RestoreVCConfig is the config for restorePVC (intermediate PVC) of generic restore + RestorePVCConfig *RestorePVC `json:"restorePVC,omitempty"` + + // PodResources is the resource config for various types of pods launched by node-agent, i.e., data mover pods. + PodResources *kube.PodResources `json:"podResources,omitempty"` + + // PriorityClassName is the priority class name for data mover pods created by the node agent + PriorityClassName string `json:"priorityClassName,omitempty"` +} diff --git a/pkg/types/repo_maintenance.go b/pkg/types/repo_maintenance.go new file mode 100644 index 000000000..40454b248 --- /dev/null +++ b/pkg/types/repo_maintenance.go @@ -0,0 +1,34 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "github.com/vmware-tanzu/velero/pkg/util/kube" + +type JobConfigs struct { + // LoadAffinities is the config for repository maintenance job load affinity. + LoadAffinities []*kube.LoadAffinity `json:"loadAffinity,omitempty"` + + // PodResources is the config for the CPU and memory resources setting. + PodResources *kube.PodResources `json:"podResources,omitempty"` + + // KeepLatestMaintenanceJobs is the number of latest maintenance jobs to keep for the repository. + KeepLatestMaintenanceJobs *int `json:"keepLatestMaintenanceJobs,omitempty"` + + // PriorityClassName is the priority class name for the maintenance job pod + // Note: This is only read from the global configuration, not per-repository + PriorityClassName string `json:"priorityClassName,omitempty"` +} diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 1720f614f..002070376 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -18,6 +18,7 @@ package kube import ( "context" + "encoding/json" "fmt" "strings" "time" @@ -34,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/label" @@ -357,3 +359,28 @@ func HasBackupLabel(o *metav1.ObjectMeta, backupName string) bool { } return o.Labels[velerov1api.BackupNameLabel] == label.GetValidName(backupName) } + +func VerifyJSONConfigs(ctx context.Context, namespace string, crClient client.Client, configName string, configType any) error { + cm := new(corev1api.ConfigMap) + err := crClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configName}, cm) + if err != nil { + return errors.Wrapf(err, "fail to find ConfigMap %s", configName) + } + + if cm.Data == nil { + return errors.Errorf("data is not available in ConfigMap %s", configName) + } + + jsonString := "" + for _, v := range cm.Data { + jsonString = v + } + + configs := configType + err = json.Unmarshal([]byte(jsonString), configs) + if err != nil { + return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName) + } + + return nil +} diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index ec2251744..df23903a0 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -33,12 +33,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/fake" "github.com/vmware-tanzu/velero/pkg/builder" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/uploader" - - "k8s.io/client-go/kubernetes/fake" ) func TestNamespaceAndName(t *testing.T) { @@ -681,3 +680,53 @@ func TestHasBackupLabel(t *testing.T) { assert.Equal(t, tc.expected, actual) } } + +func TestVerifyJsonConfigs(t *testing.T) { + testCases := []struct { + name string + configMapName string + configMap *corev1api.ConfigMap + configType any + expectedErr string + }{ + { + name: "ConfigMap not exist", + configMapName: "non-exist", + expectedErr: "fail to find ConfigMap non-exist: configmaps \"non-exist\" not found", + }, + { + name: "ConfigMap doesn't have data", + configMapName: "no-data", + expectedErr: "data is not available in ConfigMap no-data", + configMap: builder.ForConfigMap("velero", "no-data").Result(), + }, + { + name: "ConfigMap data is invalid", + configMapName: "invalid", + expectedErr: "error to unmarshall data from ConfigMap invalid: unexpected end of JSON input", + configMap: builder.ForConfigMap("velero", "invalid").Data("global", "{\"podResources\": {\"cpuRequest\": \"100m\", \"cpuLimit\": \"200m\", \"memoryRequest\": \"100Mi\", \"memoryLimit\": \"200Mi\"}, \"keepLatestMaintenanceJobs\": 1}", "other", "{\"podResources\": {\"cpuRequest\": \"100m\", \"cpuLimit\": \"200m\", \"memoryRequest\": \"100Mi\", \"memoryLimit\": \"200Mi\"}, \"keepLatestMaintenanceJobs: 1}").Result(), + }, + { + name: "Normal case", + configMapName: "normal", + configMap: builder.ForConfigMap("velero", "normal").Data("global", "{\"podResources\": {\"cpuRequest\": \"100m\", \"cpuLimit\": \"200m\", \"memoryRequest\": \"100Mi\", \"memoryLimit\": \"200Mi\"}, \"keepLatestMaintenanceJobs\": 1}", "other", "{\"podResources\": {\"cpuRequest\": \"100m\", \"cpuLimit\": \"200m\", \"memoryRequest\": \"100Mi\", \"memoryLimit\": \"200Mi\"}, \"keepLatestMaintenanceJobs\": 1}").Result(), + configType: make(map[string]any), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + objects := make([]runtime.Object, 0) + if tc.configMap != nil { + objects = append(objects, tc.configMap) + } + fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objects...) + err := VerifyJSONConfigs(t.Context(), "velero", fakeClient, tc.configMapName, tc.configMap) + if len(tc.expectedErr) > 0 { + require.EqualError(t, err, tc.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +}