mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-15 17:22:52 +00:00
Compare commits
6 Commits
action_ver
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d2555bfc28 | ||
|
|
99a46ed818 | ||
|
|
93e8379530 | ||
|
|
8b3ba78c8c | ||
|
|
b34f2deff2 | ||
|
|
e6aab8ca93 |
2
.github/workflows/nightly-trivy-scan.yml
vendored
2
.github/workflows/nightly-trivy-scan.yml
vendored
@@ -31,6 +31,6 @@ jobs:
|
||||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
uses: github/codeql-action/upload-sarif@v4
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9267, add events to data mover prepare diagnostic
|
||||
1
changelogs/unreleased/9329-T4iFooN-IX
Normal file
1
changelogs/unreleased/9329-T4iFooN-IX
Normal file
@@ -0,0 +1 @@
|
||||
Fix typos in documentation
|
||||
@@ -381,8 +381,13 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -392,7 +397,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
@@ -404,7 +409,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if vs != nil {
|
||||
diag += csi.DiagnoseVS(vs)
|
||||
diag += csi.DiagnoseVS(vs, events)
|
||||
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" {
|
||||
if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -1288,6 +1288,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1313,6 +1314,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1341,6 +1343,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1359,6 +1362,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1404,6 +1408,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1419,6 +1424,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1436,6 +1442,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1633,6 +1640,74 @@ PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&backupPVCWithVolumeName,
|
||||
&backupPV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-6"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
&backupVSWithVSC,
|
||||
&backupVSC,
|
||||
},
|
||||
expected: `begin diagnose CSI exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-6, message message-6
|
||||
PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VS event reason reason-4, message message-4
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -287,8 +287,13 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -298,7 +303,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -549,6 +549,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -574,6 +575,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -602,6 +604,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -620,6 +623,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -758,6 +762,60 @@ Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&restorePodWithNodeName,
|
||||
&restorePVCWithVolumeName,
|
||||
&restorePV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose restore exposer
|
||||
Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-5, message message-5
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -251,8 +251,13 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
|
||||
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
|
||||
@@ -466,6 +466,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -491,6 +492,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -587,6 +589,48 @@ end diagnose pod volume exposer`,
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-4, message message-4
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -689,7 +689,7 @@ func WaitUntilVSCHandleIsReady(
|
||||
return vsc, nil
|
||||
}
|
||||
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot, events *corev1api.EventList) string {
|
||||
vscName := ""
|
||||
readyToUse := false
|
||||
errMessage := ""
|
||||
@@ -710,6 +710,14 @@ func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
|
||||
diag := fmt.Sprintf("VS %s/%s, bind to %s, readyToUse %v, errMessage %s\n", vs.Namespace, vs.Name, vscName, readyToUse, errMessage)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == vs.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("VS event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -1699,6 +1699,7 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
vs *snapshotv1api.VolumeSnapshot
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
@@ -1781,11 +1782,81 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage fake-message\n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and empty event",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and events",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-vs-uid",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \nVS event reason reason-3, message message-3\nVS event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnoseVS(tc.vs)
|
||||
diag := DiagnoseVS(tc.vs, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -268,13 +268,21 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DiagnosePod(pod *corev1api.Pod) string {
|
||||
func DiagnosePod(pod *corev1api.Pod, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName)
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message)
|
||||
}
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pod.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("Pod event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -896,10 +896,11 @@ func TestDiagnosePod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pod with all info",
|
||||
name: "pod with all info but event",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
@@ -928,11 +929,111 @@ func TestDiagnosePod(t *testing.T) {
|
||||
},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and empty event list",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and events",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pod-uid",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\nPod event reason reason-3, message message-3\nPod event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePod(tc.pod)
|
||||
diag := DiagnosePod(tc.pod, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -463,8 +463,18 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pvc.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("PVC event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
func DiagnosePV(pv *corev1api.PersistentVolume) string {
|
||||
|
||||
@@ -1593,10 +1593,11 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pvc with all info",
|
||||
name: "pvc with all info but events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
@@ -1611,11 +1612,83 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and empty events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pvc-uid",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\nPVC event reason reason-3, message message-3\nPVC event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePVC(tc.pvc)
|
||||
diag := DiagnosePVC(tc.pvc, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ Note: If less resources are assigned to data mover pods, the data movement activ
|
||||
Refer to [Performance Guidance][3] for a guidance of performance vs. resource usage, and it is highly recommended that you perform your own testing to find the best resource limits for your data.
|
||||
|
||||
Velero introduces a new section in the node-agent configMap, called ```podResources```, through which you can set customized resources configurations for data mover pods.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-config```.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-configmap```.
|
||||
Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
|
||||
### Sample
|
||||
@@ -39,19 +39,19 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap``` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-config=<configMap name>
|
||||
- --node-agent-configmap=<configMap name>
|
||||
```
|
||||
|
||||
### Priority Class
|
||||
@@ -126,4 +126,4 @@ kubectl create cm node-agent-config -n velero --from-file=node-agent-config.json
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/
|
||||
[4]: performance-guidance.md
|
||||
[4]: performance-guidance.md
|
||||
|
||||
@@ -27,22 +27,22 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap`` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-config=<configMap name>
|
||||
- --node-agent-configmap=<configMap name>
|
||||
```
|
||||
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: node-agent-concurrency.md
|
||||
[4]: data-movement-node-selection.md
|
||||
[4]: data-movement-node-selection.md
|
||||
|
||||
Reference in New Issue
Block a user