mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-26 06:32:06 +00:00
Compare commits
18 Commits
action_ver
...
9328_fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4121808f38 | ||
|
|
420a65a116 | ||
|
|
3bf4a7dced | ||
|
|
2a5804b595 | ||
|
|
9a3fabbc55 | ||
|
|
99a46ed818 | ||
|
|
93e8379530 | ||
|
|
72ddfd7d78 | ||
|
|
8b3ba78c8c | ||
|
|
b34f2deff2 | ||
|
|
e9666f9aea | ||
|
|
e6aab8ca93 | ||
|
|
031df8d5e0 | ||
|
|
37df853a99 | ||
|
|
8ee3436f5c | ||
|
|
b9159c22ca | ||
|
|
112bea520e | ||
|
|
7e15e9ba05 |
2
.github/workflows/stale-issues.yml
vendored
2
.github/workflows/stale-issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v10.0.0
|
||||
- uses: actions/stale@v10.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
|
||||
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #7904, remove the code and doc for PVC node selection
|
||||
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9267, add events to data mover prepare diagnostic
|
||||
1
changelogs/unreleased/9329-T4iFooN-IX
Normal file
1
changelogs/unreleased/9329-T4iFooN-IX
Normal file
@@ -0,0 +1 @@
|
||||
Fix typos in documentation
|
||||
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9332, add bytesDone for cache files
|
||||
@@ -275,7 +275,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
log.WithError(err).Warn("Failed to get keepLatestMaintenanceJobs from ConfigMap, using CLI parameter value")
|
||||
}
|
||||
|
||||
if err := maintenance.DeleteOldJobs(r.Client, req.Name, keepJobs, log); err != nil {
|
||||
if err := maintenance.DeleteOldJobs(r.Client, *backupRepo, keepJobs, log); err != nil {
|
||||
log.WithError(err).Warn("Failed to delete old maintenance jobs")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -381,8 +381,13 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -392,7 +397,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
@@ -404,7 +409,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if vs != nil {
|
||||
diag += csi.DiagnoseVS(vs)
|
||||
diag += csi.DiagnoseVS(vs, events)
|
||||
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" {
|
||||
if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -1288,6 +1288,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1313,6 +1314,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1341,6 +1343,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1359,6 +1362,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1404,6 +1408,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1419,6 +1424,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1436,6 +1442,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1633,6 +1640,74 @@ PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&backupPVCWithVolumeName,
|
||||
&backupPV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-6"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
&backupVSWithVSC,
|
||||
&backupVSC,
|
||||
},
|
||||
expected: `begin diagnose CSI exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-6, message message-6
|
||||
PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VS event reason reason-4, message message-4
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -287,8 +287,13 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -298,7 +303,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -549,6 +549,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -574,6 +575,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -602,6 +604,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -620,6 +623,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -758,6 +762,60 @@ Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&restorePodWithNodeName,
|
||||
&restorePVCWithVolumeName,
|
||||
&restorePV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose restore exposer
|
||||
Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-5, message message-5
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -251,8 +251,13 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
|
||||
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
|
||||
@@ -466,6 +466,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -491,6 +492,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -587,6 +589,48 @@ end diagnose pod volume exposer`,
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-4, message message-4
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -32,11 +32,13 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -68,11 +70,22 @@ func GenerateJobName(repo string) string {
|
||||
}
|
||||
|
||||
// DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs
|
||||
func DeleteOldJobs(cli client.Client, repo string, keep int, logger logrus.FieldLogger) error {
|
||||
func DeleteOldJobs(cli client.Client, repo velerov1api.BackupRepository, keep int, logger logrus.FieldLogger) error {
|
||||
logger.Infof("Start to delete old maintenance jobs. %d jobs will be kept.", keep)
|
||||
// Get the maintenance job list by label
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -339,10 +352,17 @@ func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string,
|
||||
// and then return the maintenance jobs' status in the range of limit
|
||||
func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, &client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
},
|
||||
client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name}),
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -558,7 +578,7 @@ func buildJob(
|
||||
}
|
||||
|
||||
podLabels := map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
}
|
||||
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
@@ -588,7 +608,7 @@ func buildJob(
|
||||
Name: GenerateJobName(repo.Name),
|
||||
Namespace: repo.Namespace,
|
||||
Labels: map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/provider"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
@@ -48,7 +49,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
)
|
||||
|
||||
func TestGenerateJobName1(t *testing.T) {
|
||||
func TestGenerateJobName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
repo string
|
||||
expectedStart string
|
||||
@@ -82,59 +83,62 @@ func TestGenerateJobName1(t *testing.T) {
|
||||
}
|
||||
func TestDeleteOldJobs(t *testing.T) {
|
||||
// Set up test repo and keep value
|
||||
repo := "test-repo"
|
||||
keep := 2
|
||||
|
||||
// Create some maintenance jobs for testing
|
||||
var objs []client.Object
|
||||
// Create a newer job
|
||||
newerJob := &batchv1api.Job{
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
keep := 1
|
||||
|
||||
jobArray := []client.Object{
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-0",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
newJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-new",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, newerJob)
|
||||
// Create older jobs
|
||||
for i := 2; i <= 3; i++ {
|
||||
olderJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("job%d", i),
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
CreationTimestamp: metav1.Time{
|
||||
Time: metav1.Now().Add(time.Duration(-24*i) * time.Hour),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, olderJob)
|
||||
}
|
||||
// Create a fake Kubernetes client
|
||||
|
||||
// Create a fake Kubernetes client with 2 jobs.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = batchv1api.AddToScheme(scheme)
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(jobArray...).Build()
|
||||
|
||||
// Create a new job
|
||||
require.NoError(t, cli.Create(context.TODO(), newJob))
|
||||
|
||||
// Call the function
|
||||
err := DeleteOldJobs(cli, repo, keep, velerotest.NewLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DeleteOldJobs(cli, *repo, keep, velerotest.NewLogger()))
|
||||
|
||||
// Get the remaining jobs
|
||||
jobList := &batchv1api.JobList{}
|
||||
err = cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name})))
|
||||
|
||||
// We expect the number of jobs to be equal to 'keep'
|
||||
assert.Len(t, jobList.Items, keep)
|
||||
|
||||
// We expect that the oldest jobs were deleted
|
||||
// Job3 should not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[2])
|
||||
|
||||
// Job2 should also not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[1])
|
||||
// Only the new created job should be left.
|
||||
assert.Equal(t, jobList.Items[0].Name, newJob.Name)
|
||||
}
|
||||
|
||||
func TestWaitForJobComplete(t *testing.T) {
|
||||
@@ -571,7 +575,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: "fake-repo",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
BackupStorageLocation: "default",
|
||||
@@ -595,7 +599,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
}
|
||||
@@ -604,7 +608,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -624,7 +628,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job2",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -645,7 +649,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job3",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -665,7 +669,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job4",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 3)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -698,7 +702,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
{
|
||||
name: "list job error",
|
||||
runtimeScheme: schemeFail,
|
||||
expectedError: "error listing maintenance job for repo fake-repo: no kind is registered for the type v1.JobList in scheme",
|
||||
expectedError: "error listing maintenance job for repo label with more than 63 characters should be modified: no kind is registered for the type v1.JobList in scheme",
|
||||
},
|
||||
{
|
||||
name: "job not exist",
|
||||
@@ -943,6 +947,7 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedSecurityContext *corev1api.SecurityContext
|
||||
expectedPodSecurityContext *corev1api.PodSecurityContext
|
||||
expectedImagePullSecrets []corev1api.LocalObjectReference
|
||||
backupRepository *velerov1api.BackupRepository
|
||||
}{
|
||||
{
|
||||
name: "Valid maintenance job without third party labels",
|
||||
@@ -1060,6 +1065,64 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedJobName: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job with third party labels and BackupRepository name longer than 63",
|
||||
m: &velerotypes.JobConfigs{
|
||||
PodResources: &kube.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "128Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "256Mi",
|
||||
},
|
||||
},
|
||||
deploy: deploy2,
|
||||
logLevel: logrus.InfoLevel,
|
||||
logFormat: logging.NewFormatFlag(),
|
||||
expectedError: false,
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodLabel: map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName("label with more than 63 characters should be modified"),
|
||||
"azure.workload.identity/use": "fake-label-value",
|
||||
},
|
||||
expectedSecurityContext: nil,
|
||||
expectedPodSecurityContext: nil,
|
||||
expectedImagePullSecrets: []corev1api.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
backupRepository: &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
VolumeNamespace: "test-123",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
param := provider.RepoParam{
|
||||
@@ -1083,6 +1146,10 @@ func TestBuildJob(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.backupRepository != nil {
|
||||
param.BackupRepo = tc.backupRepository
|
||||
}
|
||||
|
||||
// Create a fake clientset with resources
|
||||
objs := []runtime.Object{param.BackupLocation, param.BackupRepo}
|
||||
|
||||
|
||||
@@ -17,20 +17,15 @@ limitations under the License.
|
||||
package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
)
|
||||
@@ -91,46 +86,13 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if pvc.Annotations == nil {
|
||||
pvc.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
log := p.logger.WithFields(map[string]any{
|
||||
"kind": pvc.Kind,
|
||||
"namespace": pvc.Namespace,
|
||||
"name": pvc.Name,
|
||||
})
|
||||
|
||||
// Handle selected node annotation
|
||||
node, ok := pvc.Annotations[AnnSelectedNode]
|
||||
if ok {
|
||||
// fetch node mapping from configMap
|
||||
newNode, err := getNewNodeFromConfigMap(p.configMapClient, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(newNode) != 0 {
|
||||
// Check whether the mapped node exists first.
|
||||
exists, err := isNodeExist(p.nodeClient, newNode)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking %s's mapped node %s existence", node, newNode)
|
||||
}
|
||||
if !exists {
|
||||
log.Warnf("Selected-node's mapped node doesn't exist: source: %s, dest: %s. Please check the ConfigMap with label velero.io/change-pvc-node-selector.", node, newNode)
|
||||
}
|
||||
|
||||
// set node selector
|
||||
// We assume that node exist for node-mapping
|
||||
pvc.Annotations[AnnSelectedNode] = newNode
|
||||
log.Infof("Updating selected-node to %s from %s", newNode, node)
|
||||
} else {
|
||||
log.Info("Clearing PVC selected-node annotation")
|
||||
delete(pvc.Annotations, AnnSelectedNode)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove other annotations
|
||||
// Remove PVC annotations
|
||||
removePVCAnnotations(
|
||||
&pvc,
|
||||
[]string{
|
||||
@@ -138,6 +100,7 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
AnnBoundByController,
|
||||
AnnStorageProvisioner,
|
||||
AnnBetaStorageProvisioner,
|
||||
AnnSelectedNode,
|
||||
velerov1api.VolumeSnapshotLabel,
|
||||
velerov1api.DataUploadNameAnnotation,
|
||||
},
|
||||
@@ -167,34 +130,6 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func getNewNodeFromConfigMap(client corev1client.ConfigMapInterface, node string) (string, error) {
|
||||
// fetch node mapping from configMap
|
||||
config, err := common.GetPluginConfig(common.PluginKindRestoreItemAction, "velero.io/change-pvc-node-selector", client)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
// there is no node mapping defined for change-pvc-node
|
||||
// so we will return empty new node
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return config.Data[node], nil
|
||||
}
|
||||
|
||||
// isNodeExist check if node resource exist or not
|
||||
func isNodeExist(nodeClient corev1client.NodeInterface, name string) (bool, error) {
|
||||
_, err := nodeClient.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func removePVCAnnotations(pvc *corev1api.PersistentVolumeClaim, remove []string) {
|
||||
for k := range pvc.Annotations {
|
||||
if util.Contains(remove, k) {
|
||||
|
||||
@@ -17,11 +17,9 @@ limitations under the License.
|
||||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
@@ -42,105 +40,57 @@ import (
|
||||
// desired result.
|
||||
func TestPVCActionExecute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
configMap *corev1api.ConfigMap
|
||||
node *corev1api.Node
|
||||
newNode *corev1api.Node
|
||||
want *corev1api.PersistentVolumeClaim
|
||||
wantErr error
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
want *corev1api.PersistentVolumeClaim
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "a valid mapping for a persistent volume claim is applied correctly",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
newNode: builder.ForNode("dest-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "dest-node"),
|
||||
).Result(),
|
||||
},
|
||||
{
|
||||
name: "when no config map exists for the plugin, the item is returned without node selector",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/some-other-plugin", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when no node-mappings exist in the plugin config map, the item is returned without node selector",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when persistent volume claim has no node selector, the item is returned as-is",
|
||||
name: "a persistent volume claim with no annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when persistent volume claim's node-selector has no mapping in the config map, the item is returned without node selector",
|
||||
name: "a persistent volume claim with selected-node annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node-1", "dest-node").
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(builder.WithAnnotationsMap(map[string]string{})).Result(),
|
||||
},
|
||||
{
|
||||
name: "a persistent volume claim with other annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("other-anno-1", "other-value-1", "other-anno-2", "other-value-2"),
|
||||
).Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(
|
||||
builder.WithAnnotations("other-anno-1", "other-value-1", "other-anno-2", "other-value-2"),
|
||||
).Result(),
|
||||
},
|
||||
{
|
||||
name: "a persistent volume claim with other annotation and selected-node annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("other-anno", "other-value", "volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(
|
||||
builder.WithAnnotations("other-anno", "other-value"),
|
||||
).Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset()
|
||||
logger := logrus.StandardLogger()
|
||||
buf := bytes.Buffer{}
|
||||
logrus.SetOutput(&buf)
|
||||
|
||||
a := NewPVCAction(
|
||||
logger,
|
||||
velerotest.NewLogger(),
|
||||
clientset.CoreV1().ConfigMaps("velero"),
|
||||
clientset.CoreV1().Nodes(),
|
||||
)
|
||||
|
||||
// set up test data
|
||||
if tc.configMap != nil {
|
||||
_, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(t.Context(), tc.configMap, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if tc.node != nil {
|
||||
_, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.node, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tc.newNode != nil {
|
||||
_, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.newNode, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -156,10 +106,6 @@ func TestPVCActionExecute(t *testing.T) {
|
||||
// execute method under test
|
||||
res, err := a.Execute(input)
|
||||
|
||||
// Make sure mapped selected-node exists.
|
||||
logOutput := buf.String()
|
||||
assert.NotContains(t, logOutput, "Selected-node's mapped node doesn't exist")
|
||||
|
||||
// validate for both error and non-error cases
|
||||
switch {
|
||||
case tc.wantErr != nil:
|
||||
|
||||
@@ -121,6 +121,7 @@ func (p *Progress) UploadStarted() {}
|
||||
// CachedFile statistic the total bytes been cached currently
|
||||
func (p *Progress) CachedFile(fname string, numBytes int64) {
|
||||
atomic.AddInt64(&p.cachedBytes, numBytes)
|
||||
atomic.AddInt64(&p.processedBytes, numBytes)
|
||||
p.UpdateProgress()
|
||||
}
|
||||
|
||||
|
||||
@@ -689,7 +689,7 @@ func WaitUntilVSCHandleIsReady(
|
||||
return vsc, nil
|
||||
}
|
||||
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot, events *corev1api.EventList) string {
|
||||
vscName := ""
|
||||
readyToUse := false
|
||||
errMessage := ""
|
||||
@@ -710,6 +710,14 @@ func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
|
||||
diag := fmt.Sprintf("VS %s/%s, bind to %s, readyToUse %v, errMessage %s\n", vs.Namespace, vs.Name, vscName, readyToUse, errMessage)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == vs.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("VS event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -1699,6 +1699,7 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
vs *snapshotv1api.VolumeSnapshot
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
@@ -1781,11 +1782,81 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage fake-message\n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and empty event",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and events",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-vs-uid",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \nVS event reason reason-3, message message-3\nVS event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnoseVS(tc.vs)
|
||||
diag := DiagnoseVS(tc.vs, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -268,13 +268,21 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DiagnosePod(pod *corev1api.Pod) string {
|
||||
func DiagnosePod(pod *corev1api.Pod, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName)
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message)
|
||||
}
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pod.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("Pod event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -896,10 +896,11 @@ func TestDiagnosePod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pod with all info",
|
||||
name: "pod with all info but event",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
@@ -928,11 +929,111 @@ func TestDiagnosePod(t *testing.T) {
|
||||
},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and empty event list",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and events",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pod-uid",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\nPod event reason reason-3, message message-3\nPod event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePod(tc.pod)
|
||||
diag := DiagnosePod(tc.pod, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -463,8 +463,18 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pvc.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("PVC event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
func DiagnosePV(pv *corev1api.PersistentVolume) string {
|
||||
|
||||
@@ -1593,10 +1593,11 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pvc with all info",
|
||||
name: "pvc with all info but events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
@@ -1611,11 +1612,83 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and empty events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pvc-uid",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\nPVC event reason reason-3, message message-3\nPVC event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePVC(tc.pvc)
|
||||
diag := DiagnosePVC(tc.pvc, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ Note: If less resources are assigned to data mover pods, the data movement activ
|
||||
Refer to [Performance Guidance][3] for a guidance of performance vs. resource usage, and it is highly recommended that you perform your own testing to find the best resource limits for your data.
|
||||
|
||||
Velero introduces a new section in the node-agent configMap, called ```podResources```, through which you can set customized resources configurations for data mover pods.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-config```.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-configmap```.
|
||||
Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
|
||||
### Sample
|
||||
@@ -39,19 +39,19 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap``` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-config=<configMap name>
|
||||
- --node-agent-configmap=<configMap name>
|
||||
```
|
||||
|
||||
### Priority Class
|
||||
@@ -126,4 +126,4 @@ kubectl create cm node-agent-config -n velero --from-file=node-agent-config.json
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/
|
||||
[4]: performance-guidance.md
|
||||
[4]: performance-guidance.md
|
||||
|
||||
@@ -27,22 +27,22 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap`` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-config=<configMap name>
|
||||
- --node-agent-configmap=<configMap name>
|
||||
```
|
||||
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: node-agent-concurrency.md
|
||||
[4]: data-movement-node-selection.md
|
||||
[4]: data-movement-node-selection.md
|
||||
|
||||
@@ -215,37 +215,9 @@ data:
|
||||
|
||||
### PVC selected-node
|
||||
|
||||
Velero by default removes PVC's `volume.kubernetes.io/selected-node` annotation during restore, so that the restored PVC could be provisioned appropriately according to ```WaitForFirstConsumer``` rules, storage topologies and the restored pod's schedule result, etc.
|
||||
Velero removes PVC's `volume.kubernetes.io/selected-node` annotation during restore, so that the restored PVC could be provisioned appropriately according to ```WaitForFirstConsumer``` rules, storage topologies and the restored pod's schedule result, etc.
|
||||
|
||||
For more information of how this selected-node annotation matters to PVC restore, see issue https://github.com/vmware-tanzu/velero/issues/9053.
|
||||
|
||||
As an expectation, when you provide the selected-node configuration, Velero sets the annotation to the node in the configuration, if the node doesn't exist in the cluster then the annotation will also be removed.
|
||||
Note: This feature is under deprecation as of Velero 1.15, following Velero deprecation policy. This feature is primarily used to remedy some problems in old Kubernetes versions as described [here](https://github.com/vmware-tanzu/velero/pull/2377). It may not work with the new features of Kubernetes and Velero. For more information, see issue https://github.com/vmware-tanzu/velero/issues/9053 for more information.
|
||||
To configure a selected-node, create a config map in the Velero namespace like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
# any name can be used; Velero uses the labels (below)
|
||||
# to identify it rather than the name
|
||||
name: change-pvc-node-selector-config
|
||||
# must be in the velero namespace
|
||||
namespace: velero
|
||||
# the below labels should be used verbatim in your
|
||||
# ConfigMap.
|
||||
labels:
|
||||
# this value-less label identifies the ConfigMap as
|
||||
# config for a plugin (i.e. the built-in restore item action plugin)
|
||||
velero.io/plugin-config: ""
|
||||
# this label identifies the name and kind of plugin
|
||||
# that this ConfigMap is for.
|
||||
velero.io/change-pvc-node-selector: RestoreItemAction
|
||||
data:
|
||||
# add 1+ key-value pairs here, where the key is the old
|
||||
# node name and the value is the new node name.
|
||||
<old-node-name>: <new-node-name>
|
||||
```
|
||||
For more information of how this selected-node annotation matters to PVC restore, see issue https://github.com/vmware-tanzu/velero/issues/9053.
|
||||
|
||||
## Restoring into a different namespace
|
||||
|
||||
|
||||
Reference in New Issue
Block a user