From 5ebb055c57eafeddd88229e6747f83b88e063cbc Mon Sep 17 00:00:00 2001 From: Xun Jiang/Bruce Jiang <59276555+blackpiglet@users.noreply.github.com> Date: Fri, 27 Jun 2025 08:49:26 +0800 Subject: [PATCH] Add UT for maintenance's DeleteOldJobs function. (#9013) Unify the k8s.io/api/batch/v1 import alias to bactchv1api. Signed-off-by: Xun Jiang --- .golangci.yaml | 2 + pkg/builder/job_builder.go | 62 ++++++++++++ .../backup_repository_controller.go | 16 +++- .../backup_repository_controller_test.go | 96 ++++++++++++++++++- pkg/repository/maintenance/maintenance.go | 22 ++--- .../maintenance/maintenance_test.go | 50 +++++----- pkg/test/fake_controller_runtime_client.go | 2 + 7 files changed, 207 insertions(+), 43 deletions(-) create mode 100644 pkg/builder/job_builder.go diff --git a/.golangci.yaml b/.golangci.yaml index d1bd80fd6..d899273a0 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -149,6 +149,8 @@ linters: pkg: k8s.io/apimachinery/pkg/apis/meta/v1 - alias: storagev1api pkg: k8s.io/api/storage/v1 + - alias: batchv1api + pkg: k8s.io/api/batch/v1 lll: # max line length, lines longer will be reported. Default is 120. diff --git a/pkg/builder/job_builder.go b/pkg/builder/job_builder.go new file mode 100644 index 000000000..548fc1992 --- /dev/null +++ b/pkg/builder/job_builder.go @@ -0,0 +1,62 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + batchv1api "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type JobBuilder struct { + object *batchv1api.Job +} + +// ForJob is the constructor for a JobBuilder. +func ForJob(ns, name string) *JobBuilder { + return &JobBuilder{ + object: &batchv1api.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1api.SchemeGroupVersion.String(), + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Job. +func (j *JobBuilder) Result() *batchv1api.Job { + return j.object +} + +// ObjectMeta applies functional options to the Job's ObjectMeta. +func (j *JobBuilder) ObjectMeta(opts ...ObjectMetaOpt) *JobBuilder { + for _, opt := range opts { + opt(j.object) + } + + return j +} + +// Succeeded sets Succeeded on the Job's Status. +func (j *JobBuilder) Succeeded(succeeded int) *JobBuilder { + j.object.Status.Succeeded = int32(succeeded) + return j +} diff --git a/pkg/controller/backup_repository_controller.go b/pkg/controller/backup_repository_controller.go index 27002c0df..b6b3cf43c 100644 --- a/pkg/controller/backup_repository_controller.go +++ b/pkg/controller/backup_repository_controller.go @@ -71,9 +71,19 @@ type BackupRepoReconciler struct { logFormat *logging.FormatFlag } -func NewBackupRepoReconciler(namespace string, logger logrus.FieldLogger, client client.Client, repositoryManager repomanager.Manager, - maintenanceFrequency time.Duration, backupRepoConfig string, keepLatestMaintenanceJobs int, repoMaintenanceConfig string, maintenanceJobResources kube.PodResources, - logLevel logrus.Level, logFormat *logging.FormatFlag) *BackupRepoReconciler { +func NewBackupRepoReconciler( + namespace string, + logger logrus.FieldLogger, + client client.Client, + repositoryManager repomanager.Manager, + maintenanceFrequency time.Duration, + backupRepoConfig string, + keepLatestMaintenanceJobs int, + repoMaintenanceConfig string, + maintenanceJobResources kube.PodResources, + logLevel logrus.Level, + logFormat *logging.FormatFlag, +) *BackupRepoReconciler { c := &BackupRepoReconciler{ client, namespace, diff --git a/pkg/controller/backup_repository_controller_test.go b/pkg/controller/backup_repository_controller_test.go index 0558ae072..6f1226f1d 100644 --- a/pkg/controller/backup_repository_controller_test.go +++ b/pkg/controller/backup_repository_controller_test.go @@ -32,7 +32,10 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" + "github.com/vmware-tanzu/velero/pkg/repository" "github.com/vmware-tanzu/velero/pkg/repository/maintenance" + repomaintenance "github.com/vmware-tanzu/velero/pkg/repository/maintenance" + repomanager "github.com/vmware-tanzu/velero/pkg/repository/manager" repomokes "github.com/vmware-tanzu/velero/pkg/repository/mocks" repotypes "github.com/vmware-tanzu/velero/pkg/repository/types" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -42,7 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" - batchv1 "k8s.io/api/batch/v1" + batchv1api "k8s.io/api/batch/v1" ) const testMaintenanceFrequency = 10 * time.Minute @@ -958,18 +961,18 @@ func TestRecallMaintenance(t *testing.T) { velerov1api.AddToScheme(schemeFail) scheme := runtime.NewScheme() - batchv1.AddToScheme(scheme) + batchv1api.AddToScheme(scheme) corev1api.AddToScheme(scheme) velerov1api.AddToScheme(scheme) - jobSucceeded := &batchv1.Job{ + jobSucceeded := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job1", Namespace: velerov1api.DefaultNamespace, Labels: map[string]string{maintenance.RepositoryNameLabel: "repo"}, CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)}, }, - Status: batchv1.JobStatus{ + Status: batchv1api.JobStatus{ StartTime: &metav1.Time{Time: now.Add(time.Hour)}, CompletionTime: &metav1.Time{Time: now.Add(time.Hour * 2)}, Succeeded: 1, @@ -1414,3 +1417,88 @@ func TestGetLastMaintenanceTimeFromHistory(t *testing.T) { }) } } + +// This test verify the BackupRepository controller will keep no more jobs +// than the number of test case's keptJobNumber. +func TestDeleteOldMaintenanceJob(t *testing.T) { + now := time.Now().Round(time.Second) + + tests := []struct { + name string + repo *velerov1api.BackupRepository + keptJobNumber int // The BackupRepository controller's keepLatestMaintenanceJobs parameter + expectNil bool + maintenanceJobs []batchv1api.Job + bsl *velerov1api.BackupStorageLocation + }{ + { + name: "test maintenance job cleaning when repo is ready", + repo: &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "repo", + }, + Spec: velerov1api.BackupRepositorySpec{ + MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency}, + BackupStorageLocation: "default", + }, + Status: velerov1api.BackupRepositoryStatus{ + LastMaintenanceTime: &metav1.Time{Time: time.Now()}, + RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Minute)}, + CompleteTimestamp: &metav1.Time{Time: now}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, Phase: velerov1api.BackupRepositoryPhaseReady, + }, + }, + keptJobNumber: 1, + expectNil: true, + maintenanceJobs: []batchv1api.Job{ + *builder.ForJob("velero", "job-01").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(), + *builder.ForJob("velero", "job-02").ObjectMeta(builder.WithLabels(repomaintenance.RepositoryNameLabel, "repo")).Succeeded(1).Result(), + }, + bsl: builder.ForBackupStorageLocation("velero", "default").Result(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + crClient := velerotest.NewFakeControllerRuntimeClient(t, test.repo, test.bsl) + for _, job := range test.maintenanceJobs { + require.NoError(t, crClient.Create(context.TODO(), &job)) + } + + repoLocker := repository.NewRepoLocker() + mgr := repomanager.NewManager("", crClient, repoLocker, nil, nil, nil) + + reconciler := NewBackupRepoReconciler( + velerov1api.DefaultNamespace, + velerotest.NewLogger(), + crClient, + mgr, + time.Duration(0), + "", + test.keptJobNumber, + "", + kube.PodResources{}, + logrus.InfoLevel, + nil, + ) + + _, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.repo.Namespace, Name: "repo"}}) + if test.expectNil { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + if len(test.maintenanceJobs) > 0 { + jobList := new(batchv1api.JobList) + require.NoError(t, reconciler.Client.List(context.TODO(), jobList, &client.ListOptions{Namespace: "velero"})) + assert.Len(t, jobList.Items, 1) + } + }) + } +} diff --git a/pkg/repository/maintenance/maintenance.go b/pkg/repository/maintenance/maintenance.go index aace83d59..35d7bd99e 100644 --- a/pkg/repository/maintenance/maintenance.go +++ b/pkg/repository/maintenance/maintenance.go @@ -27,7 +27,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - batchv1 "k8s.io/api/batch/v1" + batchv1api "k8s.io/api/batch/v1" corev1api "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -74,7 +74,7 @@ func GenerateJobName(repo string) string { // DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs func DeleteOldJobs(cli client.Client, repo string, keep int) error { // Get the maintenance job list by label - jobList := &batchv1.JobList{} + jobList := &batchv1api.JobList{} err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) if err != nil { return err @@ -104,8 +104,8 @@ var waitCompletionBackOff = wait.Backoff{ } // waitForJobComplete wait for completion of the specified job and update the latest job object -func waitForJobComplete(ctx context.Context, client client.Client, ns string, job string, logger logrus.FieldLogger) (*batchv1.Job, error) { - var ret *batchv1.Job +func waitForJobComplete(ctx context.Context, client client.Client, ns string, job string, logger logrus.FieldLogger) (*batchv1api.Job, error) { + var ret *batchv1api.Job backOff := waitCompletionBackOff @@ -113,7 +113,7 @@ func waitForJobComplete(ctx context.Context, client client.Client, ns string, jo nextCheckpoint := startTime.Add(backOff.Step()) err := wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (bool, error) { - updated := &batchv1.Job{} + updated := &batchv1api.Job{} err := client.Get(ctx, types.NamespacedName{Namespace: ns, Name: job}, updated) if err != nil && !apierrors.IsNotFound(err) { return false, err @@ -141,7 +141,7 @@ func waitForJobComplete(ctx context.Context, client client.Client, ns string, jo return ret, err } -func getResultFromJob(cli client.Client, job *batchv1.Job) (string, error) { +func getResultFromJob(cli client.Client, job *batchv1api.Job) (string, error) { // Get the maintenance job related pod by label selector podList := &corev1api.PodList{} err := cli.List(context.TODO(), podList, client.InNamespace(job.Namespace), client.MatchingLabels(map[string]string{"job-name": job.Name})) @@ -303,7 +303,7 @@ func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string, // WaitAllJobsComplete checks all the incomplete maintenance jobs of the specified repo and wait for them to complete, // and then return the maintenance jobs' status in the range of limit func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) { - jobList := &batchv1.JobList{} + jobList := &batchv1api.JobList{} err := cli.List(context.TODO(), jobList, &client.ListOptions{ Namespace: repo.Namespace, }, @@ -408,7 +408,7 @@ func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.Backu } func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, bslName string, config *JobConfigs, - podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag) (*batchv1.Job, error) { + podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag) (*batchv1api.Job, error) { // Get the Velero server deployment deployment := &appsv1api.Deployment{} err := cli.Get(ctx, types.NamespacedName{Name: "velero", Namespace: repo.Namespace}, deployment) @@ -482,7 +482,7 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe args = append(args, fmt.Sprintf("--log-format=%s", logFormat.String())) // build the maintenance job - job := &batchv1.Job{ + job := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: GenerateJobName(repo.Name), Namespace: repo.Namespace, @@ -490,7 +490,7 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe RepositoryNameLabel: repo.Name, }, }, - Spec: batchv1.JobSpec{ + Spec: batchv1api.JobSpec{ BackoffLimit: new(int32), // Never retry Template: corev1api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -541,7 +541,7 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe return job, nil } -func composeStatusFromJob(job *batchv1.Job, message string) velerov1api.BackupRepositoryMaintenanceStatus { +func composeStatusFromJob(job *batchv1api.Job, message string) velerov1api.BackupRepositoryMaintenanceStatus { result := velerov1api.BackupRepositoryMaintenanceSucceeded if job.Status.Failed > 0 { result = velerov1api.BackupRepositoryMaintenanceFailed diff --git a/pkg/repository/maintenance/maintenance_test.go b/pkg/repository/maintenance/maintenance_test.go index ea830391a..c0e0a8851 100644 --- a/pkg/repository/maintenance/maintenance_test.go +++ b/pkg/repository/maintenance/maintenance_test.go @@ -27,7 +27,7 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - batchv1 "k8s.io/api/batch/v1" + batchv1api "k8s.io/api/batch/v1" corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -87,18 +87,18 @@ func TestDeleteOldJobs(t *testing.T) { // Create some maintenance jobs for testing var objs []client.Object // Create a newer job - newerJob := &batchv1.Job{ + newerJob := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job1", Namespace: "default", Labels: map[string]string{RepositoryNameLabel: repo}, }, - Spec: batchv1.JobSpec{}, + Spec: batchv1api.JobSpec{}, } objs = append(objs, newerJob) // Create older jobs for i := 2; i <= 3; i++ { - olderJob := &batchv1.Job{ + olderJob := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("job%d", i), Namespace: "default", @@ -107,13 +107,13 @@ func TestDeleteOldJobs(t *testing.T) { Time: metav1.Now().Add(time.Duration(-24*i) * time.Hour), }, }, - Spec: batchv1.JobSpec{}, + Spec: batchv1api.JobSpec{}, } objs = append(objs, olderJob) } // Create a fake Kubernetes client scheme := runtime.NewScheme() - _ = batchv1.AddToScheme(scheme) + _ = batchv1api.AddToScheme(scheme) cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() // Call the function @@ -121,7 +121,7 @@ func TestDeleteOldJobs(t *testing.T) { require.NoError(t, err) // Get the remaining jobs - jobList := &batchv1.JobList{} + jobList := &batchv1api.JobList{} err = cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) require.NoError(t, err) @@ -138,18 +138,18 @@ func TestDeleteOldJobs(t *testing.T) { func TestWaitForJobComplete(t *testing.T) { // Set up test job - job := &batchv1.Job{ + job := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "test-job", Namespace: "default", }, - Status: batchv1.JobStatus{}, + Status: batchv1api.JobStatus{}, } schemeFail := runtime.NewScheme() scheme := runtime.NewScheme() - batchv1.AddToScheme(scheme) + batchv1api.AddToScheme(scheme) waitCompletionBackOff1 := wait.Backoff{ Duration: time.Second, @@ -170,7 +170,7 @@ func TestWaitForJobComplete(t *testing.T) { description string // Test case description kubeClientObj []runtime.Object runtimeScheme *runtime.Scheme - jobStatus batchv1.JobStatus // Job status to set for the test + jobStatus batchv1api.JobStatus // Job status to set for the test logBackOff wait.Backoff updateAfter time.Duration expectedLogs int @@ -187,7 +187,7 @@ func TestWaitForJobComplete(t *testing.T) { kubeClientObj: []runtime.Object{ job, }, - jobStatus: batchv1.JobStatus{ + jobStatus: batchv1api.JobStatus{ Succeeded: 1, }, expectError: false, @@ -198,7 +198,7 @@ func TestWaitForJobComplete(t *testing.T) { kubeClientObj: []runtime.Object{ job, }, - jobStatus: batchv1.JobStatus{ + jobStatus: batchv1api.JobStatus{ Failed: 1, }, expectError: false, @@ -268,7 +268,7 @@ func TestWaitForJobComplete(t *testing.T) { func TestGetResultFromJob(t *testing.T) { // Set up test job - job := &batchv1.Job{ + job := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "test-job", Namespace: "default", @@ -579,7 +579,7 @@ func TestWaitAllJobsComplete(t *testing.T) { now := time.Now().Round(time.Second) - jobOtherLabel := &batchv1.Job{ + jobOtherLabel := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job1", Namespace: veleroNamespace, @@ -588,7 +588,7 @@ func TestWaitAllJobsComplete(t *testing.T) { }, } - jobIncomplete := &batchv1.Job{ + jobIncomplete := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job1", Namespace: veleroNamespace, @@ -597,14 +597,14 @@ func TestWaitAllJobsComplete(t *testing.T) { }, } - jobSucceeded1 := &batchv1.Job{ + jobSucceeded1 := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job1", Namespace: veleroNamespace, Labels: map[string]string{RepositoryNameLabel: "fake-repo"}, CreationTimestamp: metav1.Time{Time: now}, }, - Status: batchv1.JobStatus{ + Status: batchv1api.JobStatus{ StartTime: &metav1.Time{Time: now}, CompletionTime: &metav1.Time{Time: now.Add(time.Hour)}, Succeeded: 1, @@ -617,14 +617,14 @@ func TestWaitAllJobsComplete(t *testing.T) { }, }).Result() - jobFailed1 := &batchv1.Job{ + jobFailed1 := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job2", Namespace: veleroNamespace, Labels: map[string]string{RepositoryNameLabel: "fake-repo"}, CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)}, }, - Status: batchv1.JobStatus{ + Status: batchv1api.JobStatus{ StartTime: &metav1.Time{Time: now.Add(time.Hour)}, Failed: 1, }, @@ -638,14 +638,14 @@ func TestWaitAllJobsComplete(t *testing.T) { }, }).Result() - jobSucceeded2 := &batchv1.Job{ + jobSucceeded2 := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job3", Namespace: veleroNamespace, Labels: map[string]string{RepositoryNameLabel: "fake-repo"}, CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 2)}, }, - Status: batchv1.JobStatus{ + Status: batchv1api.JobStatus{ StartTime: &metav1.Time{Time: now.Add(time.Hour * 2)}, CompletionTime: &metav1.Time{Time: now.Add(time.Hour * 3)}, Succeeded: 1, @@ -658,14 +658,14 @@ func TestWaitAllJobsComplete(t *testing.T) { }, }).Result() - jobSucceeded3 := &batchv1.Job{ + jobSucceeded3 := &batchv1api.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job4", Namespace: veleroNamespace, Labels: map[string]string{RepositoryNameLabel: "fake-repo"}, CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 3)}, }, - Status: batchv1.JobStatus{ + Status: batchv1api.JobStatus{ StartTime: &metav1.Time{Time: now.Add(time.Hour * 3)}, CompletionTime: &metav1.Time{Time: now.Add(time.Hour * 4)}, Succeeded: 1, @@ -681,7 +681,7 @@ func TestWaitAllJobsComplete(t *testing.T) { schemeFail := runtime.NewScheme() scheme := runtime.NewScheme() - batchv1.AddToScheme(scheme) + batchv1api.AddToScheme(scheme) corev1api.AddToScheme(scheme) testCases := []struct { diff --git a/pkg/test/fake_controller_runtime_client.go b/pkg/test/fake_controller_runtime_client.go index 836cb7c3c..34b3908d1 100644 --- a/pkg/test/fake_controller_runtime_client.go +++ b/pkg/test/fake_controller_runtime_client.go @@ -22,6 +22,7 @@ import ( snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" "github.com/stretchr/testify/require" appsv1api "k8s.io/api/apps/v1" + batchv1api "k8s.io/api/batch/v1" corev1api "k8s.io/api/core/v1" storagev1api "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" @@ -56,6 +57,7 @@ func NewFakeControllerRuntimeClient(t *testing.T, initObjs ...runtime.Object) cl require.NoError(t, appsv1api.AddToScheme(scheme)) require.NoError(t, snapshotv1api.AddToScheme(scheme)) require.NoError(t, storagev1api.AddToScheme(scheme)) + require.NoError(t, batchv1api.AddToScheme(scheme)) return k8sfake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(initObjs...).Build() }