mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-04-22 00:30:34 +00:00
Merge branch 'main' into dont-connect-repo-in-repo-controller
This commit is contained in:
2
.github/workflows/push.yml
vendored
2
.github/workflows/push.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${ github.ref }
|
||||
ref: ${{ github.ref }}
|
||||
|
||||
build:
|
||||
name: Build
|
||||
|
||||
1
changelogs/unreleased/9233-Lyndon-Li
Normal file
1
changelogs/unreleased/9233-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9229, don't attach backupPVC to the source node
|
||||
1
changelogs/unreleased/9295-sseago
Normal file
1
changelogs/unreleased/9295-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Add option for privileged fs-backup pod
|
||||
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9267, add events to data mover prepare diagnostic
|
||||
1
changelogs/unreleased/9302-blackpiglet
Normal file
1
changelogs/unreleased/9302-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
VerifyJSONConfigs verify every elements in Data.
|
||||
@@ -545,24 +545,22 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er
|
||||
return fmt.Errorf("fail to create go-client %w", err)
|
||||
}
|
||||
|
||||
// If either Linux or Windows node-agent is installed, and the node-agent-configmap
|
||||
// is specified, need to validate the ConfigMap.
|
||||
if (o.UseNodeAgent || o.UseNodeAgentWindows) && len(o.NodeAgentConfigMap) > 0 {
|
||||
if len(o.NodeAgentConfigMap) > 0 {
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.NodeAgentConfigMap, &velerotypes.NodeAgentConfigs{}); err != nil {
|
||||
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid", o.NodeAgentConfigMap)
|
||||
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid: %w", o.NodeAgentConfigMap, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(o.RepoMaintenanceJobConfigMap) > 0 {
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.RepoMaintenanceJobConfigMap, &velerotypes.JobConfigs{}); err != nil {
|
||||
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid", o.RepoMaintenanceJobConfigMap)
|
||||
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid: %w", o.RepoMaintenanceJobConfigMap, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(o.BackupRepoConfigMap) > 0 {
|
||||
config := make(map[string]any)
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.BackupRepoConfigMap, &config); err != nil {
|
||||
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid", o.BackupRepoConfigMap)
|
||||
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid: %w", o.BackupRepoConfigMap, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -308,6 +308,8 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig)
|
||||
}
|
||||
|
||||
privilegedFsBackup := s.dataPathConfigs != nil && s.dataPathConfigs.PrivilegedFsBackup
|
||||
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
@@ -327,12 +329,12 @@ func (s *nodeAgentServer) run() {
|
||||
}
|
||||
}
|
||||
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass)
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup)
|
||||
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
|
||||
}
|
||||
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass)
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup)
|
||||
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
|
||||
}
|
||||
|
||||
@@ -916,6 +916,13 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
}
|
||||
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := r.client.Get(context.Background(), types.NamespacedName{
|
||||
Name: pvc.Spec.VolumeName,
|
||||
}, pv); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get source PV %s", pvc.Spec.VolumeName)
|
||||
}
|
||||
|
||||
nodeOS := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), log)
|
||||
|
||||
if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil {
|
||||
@@ -963,6 +970,8 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
return &exposer.CSISnapshotExposeParam{
|
||||
SnapshotName: du.Spec.CSISnapshot.VolumeSnapshot,
|
||||
SourceNamespace: du.Spec.SourceNamespace,
|
||||
SourcePVCName: pvc.Name,
|
||||
SourcePVName: pv.Name,
|
||||
StorageClass: du.Spec.CSISnapshot.StorageClass,
|
||||
HostingPodLabels: hostingPodLabels,
|
||||
HostingPodAnnotations: hostingPodAnnotation,
|
||||
|
||||
@@ -60,7 +60,7 @@ const (
|
||||
// NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance
|
||||
func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
|
||||
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeBackupReconciler {
|
||||
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeBackupReconciler {
|
||||
return &PodVolumeBackupReconciler{
|
||||
client: client,
|
||||
mgr: mgr,
|
||||
@@ -77,6 +77,7 @@ func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kub
|
||||
exposer: exposer.NewPodVolumeExposer(kubeClient, logger),
|
||||
cancelledPVB: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
privileged: privileged,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +98,7 @@ type PodVolumeBackupReconciler struct {
|
||||
resourceTimeout time.Duration
|
||||
cancelledPVB map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
privileged bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -837,6 +839,7 @@ func (r *PodVolumeBackupReconciler) setupExposeParam(pvb *velerov1api.PodVolumeB
|
||||
Resources: r.podResources,
|
||||
// Priority class name for the data mover pod, retrieved from node-agent-configmap
|
||||
PriorityClassName: r.dataMovePriorityClass,
|
||||
Privileged: r.privileged,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -151,7 +151,8 @@ func initPVBReconcilerWithError(needError ...error) (*PodVolumeBackupReconciler,
|
||||
corev1api.ResourceRequirements{},
|
||||
metrics.NewServerMetrics(),
|
||||
velerotest.NewLogger(),
|
||||
"", // dataMovePriorityClass
|
||||
"", // dataMovePriorityClass
|
||||
false, // privileged
|
||||
), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ import (
|
||||
|
||||
func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
|
||||
logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeRestoreReconciler {
|
||||
logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeRestoreReconciler {
|
||||
return &PodVolumeRestoreReconciler{
|
||||
client: client,
|
||||
mgr: mgr,
|
||||
@@ -72,6 +72,7 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
|
||||
exposer: exposer.NewPodVolumeExposer(kubeClient, logger),
|
||||
cancelledPVR: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
privileged: privileged,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,6 +91,7 @@ type PodVolumeRestoreReconciler struct {
|
||||
resourceTimeout time.Duration
|
||||
cancelledPVR map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
privileged bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -896,6 +898,7 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
|
||||
Resources: r.podResources,
|
||||
// Priority class name for the data mover pod, retrieved from node-agent-configmap
|
||||
PriorityClassName: r.dataMovePriorityClass,
|
||||
Privileged: r.privileged,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -617,7 +617,7 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj []
|
||||
|
||||
dataPathMgr := datapath.NewManager(1)
|
||||
|
||||
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), ""), nil
|
||||
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false), nil
|
||||
}
|
||||
|
||||
func TestPodVolumeRestoreReconcile(t *testing.T) {
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -48,6 +49,12 @@ type CSISnapshotExposeParam struct {
|
||||
// SourceNamespace is the original namespace of the volume that the snapshot is taken for
|
||||
SourceNamespace string
|
||||
|
||||
// SourcePVCName is the original name of the PVC that the snapshot is taken for
|
||||
SourcePVCName string
|
||||
|
||||
// SourcePVName is the name of PV for SourcePVC
|
||||
SourcePVName string
|
||||
|
||||
// AccessMode defines the mode to access the snapshot
|
||||
AccessMode string
|
||||
|
||||
@@ -189,6 +196,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
backupPVCReadOnly := false
|
||||
spcNoRelabeling := false
|
||||
backupPVCAnnotations := map[string]string{}
|
||||
intoleratableNodes := []string{}
|
||||
if value, exists := csiExposeParam.BackupPVCConfig[csiExposeParam.StorageClass]; exists {
|
||||
if value.StorageClass != "" {
|
||||
backupPVCStorageClass = value.StorageClass
|
||||
@@ -206,6 +214,15 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
if len(value.Annotations) > 0 {
|
||||
backupPVCAnnotations = value.Annotations
|
||||
}
|
||||
|
||||
if _, found := backupPVCAnnotations[util.VSphereCNSFastCloneAnno]; found {
|
||||
if n, err := kube.GetPVAttachedNodes(ctx, csiExposeParam.SourcePVName, e.kubeClient.StorageV1()); err != nil {
|
||||
curLog.WithField("source PV", csiExposeParam.SourcePVName).WithError(err).Warnf("Failed to get attached node for source PV, ignore %s annotation", util.VSphereCNSFastCloneAnno)
|
||||
delete(backupPVCAnnotations, util.VSphereCNSFastCloneAnno)
|
||||
} else {
|
||||
intoleratableNodes = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, backupPVCStorageClass, csiExposeParam.AccessMode, volumeSize, backupPVCReadOnly, backupPVCAnnotations)
|
||||
@@ -236,6 +253,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
spcNoRelabeling,
|
||||
csiExposeParam.NodeOS,
|
||||
csiExposeParam.PriorityClassName,
|
||||
intoleratableNodes,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to create backup pod")
|
||||
@@ -363,8 +381,13 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -374,7 +397,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
@@ -386,7 +409,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if vs != nil {
|
||||
diag += csi.DiagnoseVS(vs)
|
||||
diag += csi.DiagnoseVS(vs, events)
|
||||
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" {
|
||||
if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil {
|
||||
@@ -564,6 +587,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
spcNoRelabeling bool,
|
||||
nodeOS string,
|
||||
priorityClassName string,
|
||||
intoleratableNodes []string,
|
||||
) (*corev1api.Pod, error) {
|
||||
podName := ownerObject.Name
|
||||
|
||||
@@ -664,6 +688,18 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
}
|
||||
|
||||
var podAffinity *corev1api.Affinity
|
||||
if len(intoleratableNodes) > 0 {
|
||||
if affinity == nil {
|
||||
affinity = &kube.LoadAffinity{}
|
||||
}
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Values: intoleratableNodes,
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
})
|
||||
}
|
||||
|
||||
if affinity != nil {
|
||||
podAffinity = kube.ToSystemAffinity([]*kube.LoadAffinity{affinity})
|
||||
}
|
||||
|
||||
@@ -153,6 +153,7 @@ func TestCreateBackupPodWithPriorityClass(t *testing.T) {
|
||||
false, // spcNoRelabeling
|
||||
kube.NodeOSLinux,
|
||||
tc.expectedPriorityClass,
|
||||
nil,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tc.description)
|
||||
@@ -237,6 +238,7 @@ func TestCreateBackupPodWithMissingConfigMap(t *testing.T) {
|
||||
false, // spcNoRelabeling
|
||||
kube.NodeOSLinux,
|
||||
"", // empty priority class since config map is missing
|
||||
nil,
|
||||
)
|
||||
|
||||
// Should succeed even when config map is missing
|
||||
|
||||
@@ -39,8 +39,11 @@ import (
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
)
|
||||
|
||||
type reactor struct {
|
||||
@@ -156,6 +159,31 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pvName := "pv-1"
|
||||
volumeAttachement1 := &storagev1api.VolumeAttachment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "va1",
|
||||
},
|
||||
Spec: storagev1api.VolumeAttachmentSpec{
|
||||
Source: storagev1api.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
NodeName: "node-1",
|
||||
},
|
||||
}
|
||||
|
||||
volumeAttachement2 := &storagev1api.VolumeAttachment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "va2",
|
||||
},
|
||||
Spec: storagev1api.VolumeAttachmentSpec{
|
||||
Source: storagev1api.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
NodeName: "node-2",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotClientObj []runtime.Object
|
||||
@@ -169,6 +197,7 @@ func TestExpose(t *testing.T) {
|
||||
expectedReadOnlyPVC bool
|
||||
expectedBackupPVCStorageClass string
|
||||
expectedAffinity *corev1api.Affinity
|
||||
expectedPVCAnnotation map[string]string
|
||||
}{
|
||||
{
|
||||
name: "wait vs ready fail",
|
||||
@@ -624,6 +653,117 @@ func TestExpose(t *testing.T) {
|
||||
expectedBackupPVCStorageClass: "fake-sc-read-only",
|
||||
expectedAffinity: nil,
|
||||
},
|
||||
{
|
||||
name: "IntolerateSourceNode, get source node fail",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
SourcePVName: pvName,
|
||||
StorageClass: "fake-sc",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
BackupPVCConfig: map[string]velerotypes.BackupPVC{
|
||||
"fake-sc": {
|
||||
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
},
|
||||
Affinity: nil,
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
},
|
||||
kubeReactors: []reactor{
|
||||
{
|
||||
verb: "list",
|
||||
resource: "volumeattachments",
|
||||
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, errors.New("fake-create-error")
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedAffinity: nil,
|
||||
expectedPVCAnnotation: nil,
|
||||
},
|
||||
{
|
||||
name: "IntolerateSourceNode, get empty source node",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
SourcePVName: pvName,
|
||||
StorageClass: "fake-sc",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
BackupPVCConfig: map[string]velerotypes.BackupPVC{
|
||||
"fake-sc": {
|
||||
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
},
|
||||
Affinity: nil,
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
},
|
||||
expectedAffinity: nil,
|
||||
expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
{
|
||||
name: "IntolerateSourceNode, get source nodes",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
SourcePVName: pvName,
|
||||
StorageClass: "fake-sc",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
BackupPVCConfig: map[string]velerotypes.BackupPVC{
|
||||
"fake-sc": {
|
||||
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
},
|
||||
Affinity: nil,
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
volumeAttachement1,
|
||||
volumeAttachement2,
|
||||
},
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -705,6 +845,12 @@ func TestExpose(t *testing.T) {
|
||||
if test.expectedAffinity != nil {
|
||||
assert.Equal(t, test.expectedAffinity, backupPod.Spec.Affinity)
|
||||
}
|
||||
|
||||
if test.expectedPVCAnnotation != nil {
|
||||
assert.Equal(t, test.expectedPVCAnnotation, backupPVC.Annotations)
|
||||
} else {
|
||||
assert.Empty(t, backupPVC.Annotations)
|
||||
}
|
||||
} else {
|
||||
assert.EqualError(t, err, test.err)
|
||||
}
|
||||
@@ -1142,6 +1288,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1167,6 +1314,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1195,6 +1343,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1213,6 +1362,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1258,6 +1408,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1273,6 +1424,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1290,6 +1442,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1487,6 +1640,74 @@ PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&backupPVCWithVolumeName,
|
||||
&backupPV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-6"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
&backupVSWithVSC,
|
||||
&backupVSC,
|
||||
},
|
||||
expected: `begin diagnose CSI exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-6, message message-6
|
||||
PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VS event reason reason-4, message message-4
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -287,8 +287,13 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -298,7 +303,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -549,6 +549,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -574,6 +575,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -602,6 +604,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -620,6 +623,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -758,6 +762,60 @@ Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&restorePodWithNodeName,
|
||||
&restorePVCWithVolumeName,
|
||||
&restorePV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose restore exposer
|
||||
Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-5, message message-5
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -73,6 +73,9 @@ type PodVolumeExposeParam struct {
|
||||
|
||||
// PriorityClassName is the priority class name for the data mover pod
|
||||
PriorityClassName string
|
||||
|
||||
// Privileged indicates whether to create the pod with a privileged container
|
||||
Privileged bool
|
||||
}
|
||||
|
||||
// PodVolumeExposer is the interfaces for a pod volume exposer
|
||||
@@ -153,7 +156,7 @@ func (e *podVolumeExposer) Expose(ctx context.Context, ownerObject corev1api.Obj
|
||||
|
||||
curLog.WithField("path", path).Infof("Host path is retrieved for pod %s, volume %s", param.ClientPodName, param.ClientPodVolume)
|
||||
|
||||
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName)
|
||||
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to create hosting pod")
|
||||
}
|
||||
@@ -248,8 +251,13 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
|
||||
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -269,7 +277,7 @@ func (e *podVolumeExposer) CleanUp(ctx context.Context, ownerObject corev1api.Ob
|
||||
}
|
||||
|
||||
func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject corev1api.ObjectReference, exposeType string, hostPath string,
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string) (*corev1api.Pod, error) {
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool) (*corev1api.Pod, error) {
|
||||
hostingPodName := ownerObject.Name
|
||||
|
||||
containerName := string(ownerObject.UID)
|
||||
@@ -327,6 +335,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
|
||||
args = append(args, podInfo.logLevelArgs...)
|
||||
|
||||
var securityCtx *corev1api.PodSecurityContext
|
||||
var containerSecurityCtx *corev1api.SecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1api.PodOS{}
|
||||
if nodeOS == kube.NodeOSWindows {
|
||||
@@ -359,6 +368,9 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
|
||||
securityCtx = &corev1api.PodSecurityContext{
|
||||
RunAsUser: &userID,
|
||||
}
|
||||
containerSecurityCtx = &corev1api.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
|
||||
podOS.Name = kube.NodeOSLinux
|
||||
@@ -394,6 +406,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
|
||||
Env: podInfo.env,
|
||||
EnvFrom: podInfo.envFrom,
|
||||
Resources: resources,
|
||||
SecurityContext: containerSecurityCtx,
|
||||
},
|
||||
},
|
||||
PriorityClassName: priorityClassName,
|
||||
|
||||
@@ -190,6 +190,29 @@ func TestPodVolumeExpose(t *testing.T) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "succeed with privileged pod",
|
||||
ownerBackup: backup,
|
||||
exposeParam: PodVolumeExposeParam{
|
||||
ClientNamespace: "fake-ns",
|
||||
ClientPodName: "fake-client-pod",
|
||||
ClientPodVolume: "fake-client-volume",
|
||||
Privileged: true,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
podWithNode,
|
||||
node,
|
||||
daemonSet,
|
||||
},
|
||||
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
|
||||
return datapath.AccessPoint{
|
||||
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
|
||||
}, nil
|
||||
},
|
||||
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -443,6 +466,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -468,6 +492,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -564,6 +589,48 @@ end diagnose pod volume exposer`,
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-4, message message-4
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -143,6 +143,10 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int
|
||||
return nil, errors.Errorf("data is not available in config map %s", configName)
|
||||
}
|
||||
|
||||
if len(cm.Data) > 1 {
|
||||
return nil, errors.Errorf("more than one keys are found in ConfigMap %s's data. only expect one", configName)
|
||||
}
|
||||
|
||||
jsonString := ""
|
||||
for _, v := range cm.Data {
|
||||
jsonString = v
|
||||
|
||||
@@ -249,6 +249,7 @@ func TestGetConfigs(t *testing.T) {
|
||||
cmWithValidData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"loadConcurrency\":{\"globalConfig\": 5}}").Result()
|
||||
cmWithPriorityClass := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"high-priority\"}").Result()
|
||||
cmWithPriorityClassAndOther := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"low-priority\", \"loadConcurrency\":{\"globalConfig\": 3}}").Result()
|
||||
cmWithMultipleKeysInData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key-1", "{}", "fake-key-2", "{}").Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -331,6 +332,14 @@ func TestGetConfigs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ConfigMap's Data has more than one key",
|
||||
namespace: "fake-ns",
|
||||
kubeClientObj: []runtime.Object{
|
||||
cmWithMultipleKeysInData,
|
||||
},
|
||||
expectErr: "more than one keys are found in ConfigMap node-agent-config's data. only expect one",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -84,4 +84,7 @@ type NodeAgentConfigs struct {
|
||||
|
||||
// PriorityClassName is the priority class name for data mover pods created by the node agent
|
||||
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||
|
||||
// PrivilegedFsBackup determines whether to create fs-backup pods as privileged pods
|
||||
PrivilegedFsBackup bool `json:"privilegedFsBackup,omitempty"`
|
||||
}
|
||||
|
||||
@@ -689,7 +689,7 @@ func WaitUntilVSCHandleIsReady(
|
||||
return vsc, nil
|
||||
}
|
||||
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot, events *corev1api.EventList) string {
|
||||
vscName := ""
|
||||
readyToUse := false
|
||||
errMessage := ""
|
||||
@@ -710,6 +710,14 @@ func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
|
||||
diag := fmt.Sprintf("VS %s/%s, bind to %s, readyToUse %v, errMessage %s\n", vs.Namespace, vs.Name, vscName, readyToUse, errMessage)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == vs.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("VS event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -1699,6 +1699,7 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
vs *snapshotv1api.VolumeSnapshot
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
@@ -1781,11 +1782,81 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage fake-message\n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and empty event",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and events",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-vs-uid",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \nVS event reason reason-3, message message-3\nVS event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnoseVS(tc.vs)
|
||||
diag := DiagnoseVS(tc.vs, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -268,13 +268,21 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DiagnosePod(pod *corev1api.Pod) string {
|
||||
func DiagnosePod(pod *corev1api.Pod, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName)
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message)
|
||||
}
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pod.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("Pod event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -896,10 +896,11 @@ func TestDiagnosePod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pod with all info",
|
||||
name: "pod with all info but event",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
@@ -928,11 +929,111 @@ func TestDiagnosePod(t *testing.T) {
|
||||
},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and empty event list",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and events",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pod-uid",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\nPod event reason reason-3, message message-3\nPod event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePod(tc.pod)
|
||||
diag := DiagnosePod(tc.pod, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -463,8 +463,18 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pvc.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("PVC event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
func DiagnosePV(pv *corev1api.PersistentVolume) string {
|
||||
@@ -554,3 +564,19 @@ func GetPVAttachedNode(ctx context.Context, pv string, storageClient storagev1.S
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func GetPVAttachedNodes(ctx context.Context, pv string, storageClient storagev1.StorageV1Interface) ([]string, error) {
|
||||
vaList, err := storageClient.VolumeAttachments().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing volumeattachment")
|
||||
}
|
||||
|
||||
nodes := []string{}
|
||||
for _, va := range vaList.Items {
|
||||
if va.Spec.Source.PersistentVolumeName != nil && *va.Spec.Source.PersistentVolumeName == pv {
|
||||
nodes = append(nodes, va.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -1593,10 +1593,11 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pvc with all info",
|
||||
name: "pvc with all info but events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
@@ -1611,11 +1612,83 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and empty events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pvc-uid",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\nPVC event reason reason-3, message message-3\nPVC event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePVC(tc.pvc)
|
||||
diag := DiagnosePVC(tc.pvc, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -371,15 +371,16 @@ func VerifyJSONConfigs(ctx context.Context, namespace string, crClient client.Cl
|
||||
return errors.Errorf("data is not available in ConfigMap %s", configName)
|
||||
}
|
||||
|
||||
// Verify all the keys in ConfigMap's data.
|
||||
jsonString := ""
|
||||
for _, v := range cm.Data {
|
||||
jsonString = v
|
||||
}
|
||||
|
||||
configs := configType
|
||||
err = json.Unmarshal([]byte(jsonString), configs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName)
|
||||
configs := configType
|
||||
err = json.Unmarshal([]byte(jsonString), configs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -28,3 +28,7 @@ var ThirdPartyTolerations = []string{
|
||||
"kubernetes.azure.com/scalesetpriority",
|
||||
"CriticalAddonsOnly",
|
||||
}
|
||||
|
||||
const (
|
||||
VSphereCNSFastCloneAnno = "csi.vsphere.volume/fast-provisioning"
|
||||
)
|
||||
|
||||
@@ -23,6 +23,8 @@ By default, `velero install` does not install Velero's [File System Backup][3].
|
||||
|
||||
If you've already run `velero install` without the `--use-node-agent` flag, you can run the same command again, including the `--use-node-agent` flag, to add the file system backup to your existing install.
|
||||
|
||||
Note that for some use cases (including installation on OpenShift clusters) the fs-backup pods must run in a Privileged security context. This is configured through the node-agent configmap (see below) by setting `privilegedFsBackup` to `true` in the configmap.
|
||||
|
||||
## CSI Snapshot Data Movement
|
||||
|
||||
Velero node-agent is required by [CSI Snapshot Data Movement][12] when Velero built-in data mover is used. By default, `velero install` does not install Velero's node-agent. To enable it, specify the `--use-node-agent` flag.
|
||||
|
||||
Reference in New Issue
Block a user