mirror of
https://github.com/vmware-tanzu/velero.git
synced 2025-12-23 06:15:21 +00:00
Fix the Job build error when BackupReposiotry name longer than 63.
Fix the Job build error. Consider the name length limitation change in job list code. Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
This commit is contained in:
@@ -275,7 +275,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
log.WithError(err).Warn("Failed to get keepLatestMaintenanceJobs from ConfigMap, using CLI parameter value")
|
||||
}
|
||||
|
||||
if err := maintenance.DeleteOldJobs(r.Client, req.Name, keepJobs, log); err != nil {
|
||||
if err := maintenance.DeleteOldJobs(r.Client, *backupRepo, keepJobs, log); err != nil {
|
||||
log.WithError(err).Warn("Failed to delete old maintenance jobs")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,11 +32,13 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -68,11 +70,22 @@ func GenerateJobName(repo string) string {
|
||||
}
|
||||
|
||||
// DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs
|
||||
func DeleteOldJobs(cli client.Client, repo string, keep int, logger logrus.FieldLogger) error {
|
||||
func DeleteOldJobs(cli client.Client, repo velerov1api.BackupRepository, keep int, logger logrus.FieldLogger) error {
|
||||
logger.Infof("Start to delete old maintenance jobs. %d jobs will be kept.", keep)
|
||||
// Get the maintenance job list by label
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -339,10 +352,17 @@ func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string,
|
||||
// and then return the maintenance jobs' status in the range of limit
|
||||
func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, &client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
},
|
||||
client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name}),
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -558,7 +578,7 @@ func buildJob(
|
||||
}
|
||||
|
||||
podLabels := map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
}
|
||||
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
@@ -588,7 +608,7 @@ func buildJob(
|
||||
Name: GenerateJobName(repo.Name),
|
||||
Namespace: repo.Namespace,
|
||||
Labels: map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/provider"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
@@ -48,7 +49,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
)
|
||||
|
||||
func TestGenerateJobName1(t *testing.T) {
|
||||
func TestGenerateJobName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
repo string
|
||||
expectedStart string
|
||||
@@ -82,59 +83,62 @@ func TestGenerateJobName1(t *testing.T) {
|
||||
}
|
||||
func TestDeleteOldJobs(t *testing.T) {
|
||||
// Set up test repo and keep value
|
||||
repo := "test-repo"
|
||||
keep := 2
|
||||
|
||||
// Create some maintenance jobs for testing
|
||||
var objs []client.Object
|
||||
// Create a newer job
|
||||
newerJob := &batchv1api.Job{
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
keep := 1
|
||||
|
||||
jobArray := []client.Object{
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-0",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
newJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-new",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, newerJob)
|
||||
// Create older jobs
|
||||
for i := 2; i <= 3; i++ {
|
||||
olderJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("job%d", i),
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
CreationTimestamp: metav1.Time{
|
||||
Time: metav1.Now().Add(time.Duration(-24*i) * time.Hour),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, olderJob)
|
||||
}
|
||||
// Create a fake Kubernetes client
|
||||
|
||||
// Create a fake Kubernetes client with 2 jobs.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = batchv1api.AddToScheme(scheme)
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(jobArray...).Build()
|
||||
|
||||
// Create a new job
|
||||
require.NoError(t, cli.Create(context.TODO(), newJob))
|
||||
|
||||
// Call the function
|
||||
err := DeleteOldJobs(cli, repo, keep, velerotest.NewLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DeleteOldJobs(cli, *repo, keep, velerotest.NewLogger()))
|
||||
|
||||
// Get the remaining jobs
|
||||
jobList := &batchv1api.JobList{}
|
||||
err = cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name})))
|
||||
|
||||
// We expect the number of jobs to be equal to 'keep'
|
||||
assert.Len(t, jobList.Items, keep)
|
||||
|
||||
// We expect that the oldest jobs were deleted
|
||||
// Job3 should not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[2])
|
||||
|
||||
// Job2 should also not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[1])
|
||||
// Only the new created job should be left.
|
||||
assert.Equal(t, jobList.Items[0].Name, newJob.Name)
|
||||
}
|
||||
|
||||
func TestWaitForJobComplete(t *testing.T) {
|
||||
@@ -571,7 +575,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: "fake-repo",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
BackupStorageLocation: "default",
|
||||
@@ -595,7 +599,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
}
|
||||
@@ -604,7 +608,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -624,7 +628,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job2",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -645,7 +649,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job3",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -665,7 +669,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job4",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 3)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -698,7 +702,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
{
|
||||
name: "list job error",
|
||||
runtimeScheme: schemeFail,
|
||||
expectedError: "error listing maintenance job for repo fake-repo: no kind is registered for the type v1.JobList in scheme",
|
||||
expectedError: "error listing maintenance job for repo label with more than 63 characters should be modified: no kind is registered for the type v1.JobList in scheme",
|
||||
},
|
||||
{
|
||||
name: "job not exist",
|
||||
@@ -943,6 +947,7 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedSecurityContext *corev1api.SecurityContext
|
||||
expectedPodSecurityContext *corev1api.PodSecurityContext
|
||||
expectedImagePullSecrets []corev1api.LocalObjectReference
|
||||
backupRepository *velerov1api.BackupRepository
|
||||
}{
|
||||
{
|
||||
name: "Valid maintenance job without third party labels",
|
||||
@@ -1060,6 +1065,64 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedJobName: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job with third party labels and BackupRepository name longer than 63",
|
||||
m: &velerotypes.JobConfigs{
|
||||
PodResources: &kube.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "128Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "256Mi",
|
||||
},
|
||||
},
|
||||
deploy: deploy2,
|
||||
logLevel: logrus.InfoLevel,
|
||||
logFormat: logging.NewFormatFlag(),
|
||||
expectedError: false,
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodLabel: map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName("label with more than 63 characters should be modified"),
|
||||
"azure.workload.identity/use": "fake-label-value",
|
||||
},
|
||||
expectedSecurityContext: nil,
|
||||
expectedPodSecurityContext: nil,
|
||||
expectedImagePullSecrets: []corev1api.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
backupRepository: &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
VolumeNamespace: "test-123",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
param := provider.RepoParam{
|
||||
@@ -1083,6 +1146,10 @@ func TestBuildJob(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.backupRepository != nil {
|
||||
param.BackupRepo = tc.backupRepository
|
||||
}
|
||||
|
||||
// Create a fake clientset with resources
|
||||
objs := []runtime.Object{param.BackupLocation, param.BackupRepo}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user