Merge pull request #9147 from blackpiglet/9136_fix

Remove the repository maintenance job parameters from velero server.
This commit is contained in:
Xun Jiang/Bruce Jiang
2025-08-08 12:38:08 +08:00
committed by GitHub
8 changed files with 61 additions and 235 deletions

View File

@@ -0,0 +1 @@
Remove the repository maintenance job parameters from velero server.

View File

@@ -16,7 +16,6 @@ import (
podvolumeconfigs "github.com/vmware-tanzu/velero/pkg/podvolume/configs"
"github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/pkg/util/logging"
)
@@ -47,12 +46,6 @@ const (
defaultMaxConcurrentK8SConnections = 30
defaultDisableInformerCache = false
DefaultKeepLatestMaintenanceJobs = 3
DefaultMaintenanceJobCPURequest = "0"
DefaultMaintenanceJobCPULimit = "0"
DefaultMaintenanceJobMemRequest = "0"
DefaultMaintenanceJobMemLimit = "0"
DefaultItemBlockWorkerCount = 1
)
@@ -179,8 +172,6 @@ type Config struct {
CredentialsDirectory string
BackupRepoConfig string
RepoMaintenanceJobConfig string
PodResources kube.PodResources
KeepLatestMaintenanceJobs int
ItemBlockWorkerCount int
}
@@ -213,14 +204,7 @@ func GetDefaultConfig() *Config {
DisableInformerCache: defaultDisableInformerCache,
ScheduleSkipImmediately: false,
CredentialsDirectory: credentials.DefaultStoreDirectory(),
PodResources: kube.PodResources{
CPURequest: DefaultMaintenanceJobCPULimit,
CPULimit: DefaultMaintenanceJobCPURequest,
MemoryRequest: DefaultMaintenanceJobMemRequest,
MemoryLimit: DefaultMaintenanceJobMemLimit,
},
KeepLatestMaintenanceJobs: DefaultKeepLatestMaintenanceJobs,
ItemBlockWorkerCount: DefaultItemBlockWorkerCount,
ItemBlockWorkerCount: DefaultItemBlockWorkerCount,
}
return config
@@ -258,36 +242,6 @@ func (c *Config) BindFlags(flags *pflag.FlagSet) {
flags.BoolVar(&c.ScheduleSkipImmediately, "schedule-skip-immediately", c.ScheduleSkipImmediately, "Skip the first scheduled backup immediately after creating a schedule. Default is false (don't skip).")
flags.Var(&c.DefaultVolumeSnapshotLocations, "default-volume-snapshot-locations", "List of unique volume providers and default volume snapshot location (provider1:location-01,provider2:location-02,...)")
flags.IntVar(
&c.KeepLatestMaintenanceJobs,
"keep-latest-maintenance-jobs",
c.KeepLatestMaintenanceJobs,
"Number of latest maintenance jobs to keep each repository. Optional.",
)
flags.StringVar(
&c.PodResources.CPURequest,
"maintenance-job-cpu-request",
c.PodResources.CPURequest,
"CPU request for maintenance job. Default is no limit.",
)
flags.StringVar(
&c.PodResources.MemoryRequest,
"maintenance-job-mem-request",
c.PodResources.MemoryRequest,
"Memory request for maintenance job. Default is no limit.",
)
flags.StringVar(
&c.PodResources.CPULimit,
"maintenance-job-cpu-limit",
c.PodResources.CPULimit,
"CPU limit for maintenance job. Default is no limit.",
)
flags.StringVar(
&c.PodResources.MemoryLimit,
"maintenance-job-mem-limit",
c.PodResources.MemoryLimit,
"Memory limit for maintenance job. Default is no limit.",
)
flags.StringVar(
&c.BackupRepoConfig,
"backup-repository-configmap",

View File

@@ -9,11 +9,11 @@ import (
func TestGetDefaultConfig(t *testing.T) {
config := GetDefaultConfig()
assert.Equal(t, "0", config.PodResources.CPULimit)
assert.Equal(t, 1, config.ItemBlockWorkerCount)
}
func TestBindFlags(t *testing.T) {
config := GetDefaultConfig()
config.BindFlags(pflag.CommandLine)
assert.Equal(t, "0", config.PodResources.CPULimit)
assert.Equal(t, 1, config.ItemBlockWorkerCount)
}

View File

@@ -738,9 +738,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.repoManager,
s.config.RepoMaintenanceFrequency,
s.config.BackupRepoConfig,
s.config.KeepLatestMaintenanceJobs,
s.config.RepoMaintenanceJobConfig,
s.config.PodResources,
s.logLevel,
s.config.LogFormat,
).SetupWithManager(s.mgr); err != nil {

View File

@@ -57,17 +57,15 @@ const (
type BackupRepoReconciler struct {
client.Client
namespace string
logger logrus.FieldLogger
clock clocks.WithTickerAndDelayedExecution
maintenanceFrequency time.Duration
backupRepoConfig string
repositoryManager repomanager.Manager
keepLatestMaintenanceJobs int
repoMaintenanceConfig string
maintenanceJobResources kube.PodResources
logLevel logrus.Level
logFormat *logging.FormatFlag
namespace string
logger logrus.FieldLogger
clock clocks.WithTickerAndDelayedExecution
maintenanceFrequency time.Duration
backupRepoConfig string
repositoryManager repomanager.Manager
repoMaintenanceConfig string
logLevel logrus.Level
logFormat *logging.FormatFlag
}
func NewBackupRepoReconciler(
@@ -77,9 +75,7 @@ func NewBackupRepoReconciler(
repositoryManager repomanager.Manager,
maintenanceFrequency time.Duration,
backupRepoConfig string,
keepLatestMaintenanceJobs int,
repoMaintenanceConfig string,
maintenanceJobResources kube.PodResources,
logLevel logrus.Level,
logFormat *logging.FormatFlag,
) *BackupRepoReconciler {
@@ -91,9 +87,7 @@ func NewBackupRepoReconciler(
maintenanceFrequency,
backupRepoConfig,
repositoryManager,
keepLatestMaintenanceJobs,
repoMaintenanceConfig,
maintenanceJobResources,
logLevel,
logFormat,
}
@@ -275,15 +269,13 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, errors.Wrap(err, "error check and run repo maintenance jobs")
}
// Get the configured number of maintenance jobs to keep from ConfigMap, fallback to CLI parameter
keepJobs := r.keepLatestMaintenanceJobs
if configuredKeep, err := maintenance.GetKeepLatestMaintenanceJobs(ctx, r.Client, log, r.namespace, r.repoMaintenanceConfig, backupRepo); err != nil {
// Get the configured number of maintenance jobs to keep from ConfigMap
keepJobs, err := maintenance.GetKeepLatestMaintenanceJobs(ctx, r.Client, log, r.namespace, r.repoMaintenanceConfig, backupRepo)
if err != nil {
log.WithError(err).Warn("Failed to get keepLatestMaintenanceJobs from ConfigMap, using CLI parameter value")
} else if configuredKeep > 0 {
keepJobs = configuredKeep
}
if err := maintenance.DeleteOldJobs(r.Client, req.Name, keepJobs); err != nil {
if err := maintenance.DeleteOldJobs(r.Client, req.Name, keepJobs, log); err != nil {
log.WithError(err).Warn("Failed to delete old maintenance jobs")
}
}
@@ -496,7 +488,7 @@ func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel
log.Info("Running maintenance on backup repository")
job, err := funcStartMaintenanceJob(r.Client, ctx, req, r.repoMaintenanceConfig, r.maintenanceJobResources, r.logLevel, r.logFormat, log)
job, err := funcStartMaintenanceJob(r.Client, ctx, req, r.repoMaintenanceConfig, r.logLevel, r.logFormat, log)
if err != nil {
log.WithError(err).Warn("Starting repo maintenance failed")
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {

View File

@@ -39,7 +39,6 @@ import (
repomokes "github.com/vmware-tanzu/velero/pkg/repository/mocks"
repotypes "github.com/vmware-tanzu/velero/pkg/repository/types"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
"github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/pkg/util/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -63,9 +62,7 @@ func mockBackupRepoReconciler(t *testing.T, mockOn string, arg any, ret ...any)
mgr,
testMaintenanceFrequency,
"fake-repo-config",
3,
"",
kube.PodResources{},
logrus.InfoLevel,
nil,
)
@@ -176,11 +173,11 @@ func TestCheckNotReadyRepo(t *testing.T) {
})
}
func startMaintenanceJobFail(client.Client, context.Context, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
func startMaintenanceJobFail(client.Client, context.Context, *velerov1api.BackupRepository, string, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
return "", errors.New("fake-start-error")
}
func startMaintenanceJobSucceed(client.Client, context.Context, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
func startMaintenanceJobSucceed(client.Client, context.Context, *velerov1api.BackupRepository, string, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
return "fake-job-name", nil
}
@@ -243,7 +240,7 @@ func TestRunMaintenanceIfDue(t *testing.T) {
tests := []struct {
name string
repo *velerov1api.BackupRepository
startJobFunc func(client.Client, context.Context, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error)
startJobFunc func(client.Client, context.Context, *velerov1api.BackupRepository, string, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error)
waitJobFunc func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error)
expectedMaintenanceTime time.Time
expectedHistory []velerov1api.BackupRepositoryMaintenanceStatus
@@ -584,9 +581,7 @@ func TestGetRepositoryMaintenanceFrequency(t *testing.T) {
&mgr,
test.userDefinedFreq,
"",
3,
"",
kube.PodResources{},
logrus.InfoLevel,
nil,
)
@@ -718,11 +713,10 @@ func TestNeedInvalidBackupRepo(t *testing.T) {
nil,
time.Duration(0),
"",
3,
"",
kube.PodResources{},
logrus.InfoLevel,
nil)
nil,
)
need := reconciler.needInvalidBackupRepo(test.oldBSL, test.newBSL)
assert.Equal(t, test.expect, need)
@@ -1474,96 +1468,10 @@ func TestGetLastMaintenanceTimeFromHistory(t *testing.T) {
}
}
// This test verify the BackupRepository controller will keep no more jobs
// than the number of test case's keptJobNumber.
func TestDeleteOldMaintenanceJob(t *testing.T) {
now := time.Now().Round(time.Second)
tests := []struct {
name string
repo *velerov1api.BackupRepository
keptJobNumber int // The BackupRepository controller's keepLatestMaintenanceJobs parameter
expectNil bool
maintenanceJobs []batchv1api.Job
bsl *velerov1api.BackupStorageLocation
}{
{
name: "test maintenance job cleaning when repo is ready",
repo: &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Spec: velerov1api.BackupRepositorySpec{
MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency},
BackupStorageLocation: "default",
},
Status: velerov1api.BackupRepositoryStatus{
LastMaintenanceTime: &metav1.Time{Time: time.Now()},
RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: now.Add(-time.Minute)},
CompleteTimestamp: &metav1.Time{Time: now},
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
},
}, Phase: velerov1api.BackupRepositoryPhaseReady,
},
},
keptJobNumber: 1,
expectNil: true,
maintenanceJobs: []batchv1api.Job{
*builder.ForJob("velero", "job-01").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
*builder.ForJob("velero", "job-02").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
},
bsl: builder.ForBackupStorageLocation("velero", "default").Result(),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
crClient := velerotest.NewFakeControllerRuntimeClient(t, test.repo, test.bsl)
for _, job := range test.maintenanceJobs {
require.NoError(t, crClient.Create(t.Context(), &job))
}
repoLocker := repository.NewRepoLocker()
mgr := repomanager.NewManager("", crClient, repoLocker, nil, nil, nil)
reconciler := NewBackupRepoReconciler(
velerov1api.DefaultNamespace,
velerotest.NewLogger(),
crClient,
mgr,
time.Duration(0),
"",
test.keptJobNumber,
"",
kube.PodResources{},
logrus.InfoLevel,
nil,
)
_, err := reconciler.Reconcile(t.Context(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.repo.Namespace, Name: "repo"}})
if test.expectNil {
require.NoError(t, err)
} else {
require.Error(t, err)
}
if len(test.maintenanceJobs) > 0 {
jobList := new(batchv1api.JobList)
require.NoError(t, reconciler.Client.List(t.Context(), jobList, &client.ListOptions{Namespace: "velero"}))
assert.Len(t, jobList.Items, 1)
}
})
}
}
func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
tests := []struct {
name string
repo *velerov1api.BackupRepository
serverKeepJobs int
expectedKeptJobs int
maintenanceJobs []batchv1api.Job
bsl *velerov1api.BackupStorageLocation
@@ -1586,7 +1494,6 @@ func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
Phase: velerov1api.BackupRepositoryPhaseReady,
},
},
serverKeepJobs: 3,
expectedKeptJobs: 5,
maintenanceJobs: []batchv1api.Job{
*builder.ForJob("velero", "job-01").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
@@ -1624,7 +1531,6 @@ func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
Phase: velerov1api.BackupRepositoryPhaseReady,
},
},
serverKeepJobs: 3,
expectedKeptJobs: 2,
maintenanceJobs: []batchv1api.Job{
*builder.ForJob("velero", "job-01").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
@@ -1643,34 +1549,6 @@ func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
},
},
},
{
name: "test fallback to CLI parameter when no ConfigMap",
repo: &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Spec: velerov1api.BackupRepositorySpec{
MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency},
BackupStorageLocation: "default",
VolumeNamespace: "test-ns",
RepositoryType: "restic",
},
Status: velerov1api.BackupRepositoryStatus{
Phase: velerov1api.BackupRepositoryPhaseReady,
},
},
serverKeepJobs: 2,
expectedKeptJobs: 2,
maintenanceJobs: []batchv1api.Job{
*builder.ForJob("velero", "job-01").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
*builder.ForJob("velero", "job-02").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
*builder.ForJob("velero", "job-03").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
*builder.ForJob("velero", "job-04").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(),
},
bsl: builder.ForBackupStorageLocation("velero", "default").Result(),
repoMaintenanceJob: nil, // No ConfigMap
},
}
for _, test := range tests {
@@ -1700,9 +1578,7 @@ func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
mgr,
time.Duration(0),
"",
test.serverKeepJobs,
repoMaintenanceConfigName,
kube.PodResources{},
logrus.InfoLevel,
nil,
)
@@ -1759,9 +1635,7 @@ func TestInitializeRepoWithRepositoryTypes(t *testing.T) {
mgr,
testMaintenanceFrequency,
"",
3,
"",
kube.PodResources{},
logrus.InfoLevel,
nil,
)
@@ -1812,9 +1686,7 @@ func TestInitializeRepoWithRepositoryTypes(t *testing.T) {
mgr,
testMaintenanceFrequency,
"",
3,
"",
kube.PodResources{},
logrus.InfoLevel,
nil,
)
@@ -1864,9 +1736,7 @@ func TestInitializeRepoWithRepositoryTypes(t *testing.T) {
mgr,
testMaintenanceFrequency,
"",
3,
"",
kube.PodResources{},
logrus.InfoLevel,
nil,
)

View File

@@ -50,6 +50,12 @@ const (
RepositoryNameLabel = "velero.io/repo-name"
GlobalKeyForRepoMaintenanceJobCM = "global"
TerminationLogIndicator = "Repo maintenance error: "
DefaultKeepLatestMaintenanceJobs = 3
DefaultMaintenanceJobCPURequest = "0"
DefaultMaintenanceJobCPULimit = "0"
DefaultMaintenanceJobMemRequest = "0"
DefaultMaintenanceJobMemLimit = "0"
)
type JobConfigs struct {
@@ -79,7 +85,8 @@ func GenerateJobName(repo string) string {
}
// DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs
func DeleteOldJobs(cli client.Client, repo string, keep int) error {
func DeleteOldJobs(cli client.Client, repo string, keep int, logger logrus.FieldLogger) error {
logger.Infof("Start to delete old maintenance jobs. %d jobs will be kept.", keep)
// Get the maintenance job list by label
jobList := &batchv1api.JobList{}
err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
@@ -289,11 +296,14 @@ func getJobConfig(
}
}
logger.Debugf("Didn't find content for repository %s in cm %s", repo.Name, repoMaintenanceJobConfig)
return result, nil
}
// GetKeepLatestMaintenanceJobs returns the configured number of maintenance jobs to keep from the JobConfigs.
// If not configured in the ConfigMap, it returns 0 to indicate using the fallback value.
// Because the CLI configured Job kept number is deprecated,
// if not configured in the ConfigMap, it returns default value to indicate using the fallback value.
func GetKeepLatestMaintenanceJobs(
ctx context.Context,
client client.Client,
@@ -303,19 +313,19 @@ func GetKeepLatestMaintenanceJobs(
repo *velerov1api.BackupRepository,
) (int, error) {
if repoMaintenanceJobConfig == "" {
return 0, nil
return DefaultKeepLatestMaintenanceJobs, nil
}
config, err := getJobConfig(ctx, client, logger, veleroNamespace, repoMaintenanceJobConfig, repo)
if err != nil {
return 0, err
return DefaultKeepLatestMaintenanceJobs, err
}
if config != nil && config.KeepLatestMaintenanceJobs != nil {
return *config.KeepLatestMaintenanceJobs, nil
}
return 0, nil
return DefaultKeepLatestMaintenanceJobs, nil
}
// WaitJobComplete waits the completion of the specified maintenance job and return the BackupRepositoryMaintenanceStatus
@@ -402,8 +412,15 @@ func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1a
}
// StartNewJob creates a new maintenance job
func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, repoMaintenanceJobConfig string,
podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag, logger logrus.FieldLogger) (string, error) {
func StartNewJob(
cli client.Client,
ctx context.Context,
repo *velerov1api.BackupRepository,
repoMaintenanceJobConfig string,
logLevel logrus.Level,
logFormat *logging.FormatFlag,
logger logrus.FieldLogger,
) (string, error) {
bsl := &velerov1api.BackupStorageLocation{}
if err := cli.Get(ctx, client.ObjectKey{Namespace: repo.Namespace, Name: repo.Spec.BackupStorageLocation}, bsl); err != nil {
return "", errors.WithStack(err)
@@ -433,7 +450,7 @@ func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.Backu
log.Info("Starting maintenance repo")
maintenanceJob, err := buildJob(cli, ctx, repo, bsl.Name, jobConfig, podResources, logLevel, logFormat, log)
maintenanceJob, err := buildJob(cli, ctx, repo, bsl.Name, jobConfig, logLevel, logFormat, log)
if err != nil {
return "", errors.Wrap(err, "error to build maintenance job")
}
@@ -475,7 +492,6 @@ func buildJob(
repo *velerov1api.BackupRepository,
bslName string,
config *JobConfigs,
podResources kube.PodResources,
logLevel logrus.Level,
logFormat *logging.FormatFlag,
logger logrus.FieldLogger,
@@ -514,10 +530,10 @@ func buildJob(
image := veleroutil.GetVeleroServerImage(deployment)
// Set resource limits and requests
cpuRequest := podResources.CPURequest
memRequest := podResources.MemoryRequest
cpuLimit := podResources.CPULimit
memLimit := podResources.MemoryLimit
cpuRequest := DefaultMaintenanceJobCPURequest
memRequest := DefaultMaintenanceJobMemRequest
cpuLimit := DefaultMaintenanceJobCPULimit
memLimit := DefaultMaintenanceJobMemLimit
if config != nil && config.PodResources != nil {
cpuRequest = config.PodResources.CPURequest
memRequest = config.PodResources.MemoryRequest

View File

@@ -118,7 +118,7 @@ func TestDeleteOldJobs(t *testing.T) {
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
// Call the function
err := DeleteOldJobs(cli, repo, keep)
err := DeleteOldJobs(cli, repo, keep, velerotest.NewLogger())
require.NoError(t, err)
// Get the remaining jobs
@@ -388,6 +388,7 @@ func TestGetResultFromJob(t *testing.T) {
}
func TestGetJobConfig(t *testing.T) {
keepLatestMaintenanceJobs := 1
ctx := t.Context()
logger := logrus.New()
veleroNamespace := "velero"
@@ -505,11 +506,12 @@ func TestGetJobConfig(t *testing.T) {
Name: repoMaintenanceJobConfig,
},
Data: map[string]string{
GlobalKeyForRepoMaintenanceJobCM: "{\"podResources\":{\"cpuRequest\":\"50m\",\"cpuLimit\":\"100m\",\"memoryRequest\":\"50Mi\",\"memoryLimit\":\"100Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"n2\"]}]}}]}",
GlobalKeyForRepoMaintenanceJobCM: "{\"keepLatestMaintenanceJobs\":1,\"podResources\":{\"cpuRequest\":\"50m\",\"cpuLimit\":\"100m\",\"memoryRequest\":\"50Mi\",\"memoryLimit\":\"100Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"n2\"]}]}}]}",
"test-default-kopia": "{\"podResources\":{\"cpuRequest\":\"100m\",\"cpuLimit\":\"200m\",\"memoryRequest\":\"100Mi\",\"memoryLimit\":\"200Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"e2\"]}]}}]}",
},
},
expectedConfig: &JobConfigs{
KeepLatestMaintenanceJobs: &keepLatestMaintenanceJobs,
PodResources: &kube.PodResources{
CPURequest: "100m",
CPULimit: "200m",
@@ -1099,7 +1101,6 @@ func TestBuildJob(t *testing.T) {
param.BackupRepo,
param.BackupLocation.Name,
tc.m,
*tc.m.PodResources,
tc.logLevel,
tc.logFormat,
logrus.New(),
@@ -1179,7 +1180,7 @@ func TestGetKeepLatestMaintenanceJobs(t *testing.T) {
repoMaintenanceJobConfig: "",
configMap: nil,
repo: mockBackupRepo(),
expectedValue: 0,
expectedValue: 3,
expectError: false,
},
{
@@ -1187,7 +1188,7 @@ func TestGetKeepLatestMaintenanceJobs(t *testing.T) {
repoMaintenanceJobConfig: "non-existent-config",
configMap: nil,
repo: mockBackupRepo(),
expectedValue: 0,
expectedValue: 3,
expectError: false,
},
{
@@ -1236,7 +1237,7 @@ func TestGetKeepLatestMaintenanceJobs(t *testing.T) {
},
},
repo: mockBackupRepo(),
expectedValue: 0,
expectedValue: 3,
expectError: false,
},
{
@@ -1252,7 +1253,7 @@ func TestGetKeepLatestMaintenanceJobs(t *testing.T) {
},
},
repo: mockBackupRepo(),
expectedValue: 0,
expectedValue: 3,
expectError: true,
},
}
@@ -1467,18 +1468,12 @@ func TestBuildJobWithPriorityClassName(t *testing.T) {
jobConfig := &JobConfigs{
PriorityClassName: tc.priorityClassName,
}
podResources := kube.PodResources{
CPURequest: "100m",
MemoryRequest: "128Mi",
CPULimit: "200m",
MemoryLimit: "256Mi",
}
logLevel := logrus.InfoLevel
logFormat := logging.NewFormatFlag()
logFormat.Set("text")
// Call buildJob
job, err := buildJob(client, t.Context(), repo, "default", jobConfig, podResources, logLevel, logFormat, logrus.New())
job, err := buildJob(client, t.Context(), repo, "default", jobConfig, logLevel, logFormat, logrus.New())
require.NoError(t, err)
// Verify the priority class name is set correctly