Add maintenance job and data mover pod's labels and annotations setting.

Add wait in file_system_test's async test cases.
Add related documents.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
This commit is contained in:
Xun Jiang
2025-12-11 15:51:21 +08:00
parent a1026cb531
commit e39374f335
18 changed files with 1064 additions and 81 deletions

View File

@@ -0,0 +1 @@
Add maintenance job and data mover pod's labels and annotations setting.

View File

@@ -354,16 +354,62 @@ func (s *nodeAgentServer) run() {
s.logger.Infof("Using customized cachePVC config %v", cachePVCConfig)
}
var podLabels map[string]string
if s.dataPathConfigs != nil && len(s.dataPathConfigs.PodLabels) > 0 {
podLabels = s.dataPathConfigs.PodLabels
s.logger.Infof("Using customized pod labels %+v", podLabels)
}
var podAnnotations map[string]string
if s.dataPathConfigs != nil && len(s.dataPathConfigs.PodAnnotations) > 0 {
podAnnotations = s.dataPathConfigs.PodAnnotations
s.logger.Infof("Using customized pod annotations %+v", podAnnotations)
}
if s.backupRepoConfigs != nil {
s.logger.Infof("Using backup repo config %v", s.backupRepoConfigs)
}
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup)
pvbReconciler := controller.NewPodVolumeBackupReconciler(
s.mgr.GetClient(),
s.mgr,
s.kubeClient,
s.dataPathMgr,
s.vgdpCounter,
s.nodeName,
s.config.dataMoverPrepareTimeout,
s.config.resourceTimeout,
podResources,
s.metrics,
s.logger,
dataMovePriorityClass,
privilegedFsBackup,
podLabels,
podAnnotations,
)
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
}
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, s.backupRepoConfigs, cachePVCConfig, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup, s.repoConfigMgr)
pvrReconciler := controller.NewPodVolumeRestoreReconciler(
s.mgr.GetClient(),
s.mgr,
s.kubeClient,
s.dataPathMgr,
s.vgdpCounter,
s.nodeName,
s.config.dataMoverPrepareTimeout,
s.config.resourceTimeout,
s.backupRepoConfigs,
cachePVCConfig,
podResources,
s.logger,
dataMovePriorityClass,
privilegedFsBackup,
s.repoConfigMgr,
podLabels,
podAnnotations,
)
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
}
@@ -388,6 +434,8 @@ func (s *nodeAgentServer) run() {
s.logger,
s.metrics,
dataMovePriorityClass,
podLabels,
podAnnotations,
)
if err := dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
@@ -416,6 +464,8 @@ func (s *nodeAgentServer) run() {
s.metrics,
dataMovePriorityClass,
s.repoConfigMgr,
podLabels,
podAnnotations,
)
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {

View File

@@ -77,6 +77,8 @@ type DataDownloadReconciler struct {
cancelledDataDownload map[string]time.Time
dataMovePriorityClass string
repoConfigMgr repository.ConfigManager
podLabels map[string]string
podAnnotations map[string]string
}
func NewDataDownloadReconciler(
@@ -96,6 +98,8 @@ func NewDataDownloadReconciler(
metrics *metrics.ServerMetrics,
dataMovePriorityClass string,
repoConfigMgr repository.ConfigManager,
podLabels map[string]string,
podAnnotations map[string]string,
) *DataDownloadReconciler {
return &DataDownloadReconciler{
client: client,
@@ -117,6 +121,8 @@ func NewDataDownloadReconciler(
cancelledDataDownload: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
repoConfigMgr: repoConfigMgr,
podLabels: podLabels,
podAnnotations: podAnnotations,
}
}
@@ -860,25 +866,37 @@ func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDown
}
hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
if len(r.podLabels) > 0 {
for k, v := range r.podLabels {
hostingPodLabels[k] = v
}
} else {
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
hostingPodLabels[k] = v
}
}
}
hostingPodAnnotation := map[string]string{}
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
if len(r.podAnnotations) > 0 {
for k, v := range r.podAnnotations {
hostingPodAnnotation[k] = v
}
} else {
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
hostingPodAnnotation[k] = v
}
}
}
hostingPodTolerations := []corev1api.Toleration{}

View File

@@ -129,7 +129,26 @@ func initDataDownloadReconcilerWithError(t *testing.T, objects []any, needError
dataPathMgr := datapath.NewManager(1)
return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, velerotypes.RestorePVC{}, nil, nil, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), "", nil), nil
return NewDataDownloadReconciler(
&fakeClient,
nil,
fakeKubeClient,
dataPathMgr,
nil,
nil,
velerotypes.RestorePVC{},
nil,
nil,
corev1api.ResourceRequirements{},
"test-node",
time.Minute*5,
velerotest.NewLogger(),
metrics.NewServerMetrics(),
"",
nil,
nil, // podLabels
nil, // podAnnotations
), nil
}
func TestDataDownloadReconcile(t *testing.T) {
@@ -1292,3 +1311,127 @@ func TestResumeCancellableRestore(t *testing.T) {
})
}
}
func TestDataDownloadSetupExposeParam(t *testing.T) {
// Common objects for all cases
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
baseDataDownload := dataDownloadBuilder().Result()
baseDataDownload.Namespace = velerov1api.DefaultNamespace
baseDataDownload.Spec.OperationTimeout = metav1.Duration{Duration: time.Minute * 10}
baseDataDownload.Spec.SnapshotSize = 5368709120 // 5Gi
type args struct {
customLabels map[string]string
customAnnotations map[string]string
}
type want struct {
labels map[string]string
annotations map[string]string
}
tests := []struct {
name string
args args
want want
}{
{
name: "label has customize values",
args: args{
customLabels: map[string]string{"custom-label": "label-value"},
customAnnotations: nil,
},
want: want{
labels: map[string]string{
velerov1api.DataDownloadLabel: baseDataDownload.Name,
"custom-label": "label-value",
},
annotations: map[string]string{},
},
},
{
name: "label has no customize values",
args: args{
customLabels: nil,
customAnnotations: nil,
},
want: want{
labels: map[string]string{velerov1api.DataDownloadLabel: baseDataDownload.Name},
annotations: map[string]string{},
},
},
{
name: "annotation has customize values",
args: args{
customLabels: nil,
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
},
want: want{
labels: map[string]string{velerov1api.DataDownloadLabel: baseDataDownload.Name},
annotations: map[string]string{"custom-annotation": "annotation-value"},
},
},
{
name: "both label and annotation have customize values",
args: args{
customLabels: map[string]string{"custom-label": "label-value"},
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
},
want: want{
labels: map[string]string{
velerov1api.DataDownloadLabel: baseDataDownload.Name,
"custom-label": "label-value",
},
annotations: map[string]string{"custom-annotation": "annotation-value"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Fake clients per case
fakeClient := FakeClient{
Client: velerotest.NewFakeControllerRuntimeClient(t, node, baseDataDownload.DeepCopy()),
}
fakeKubeClient := clientgofake.NewSimpleClientset(node)
// Reconciler config per case
preparingTimeout := time.Minute * 3
podRes := corev1api.ResourceRequirements{}
r := NewDataDownloadReconciler(
&fakeClient,
nil,
fakeKubeClient,
datapath.NewManager(1),
nil,
nil,
velerotypes.RestorePVC{},
nil,
nil,
podRes,
"test-node",
preparingTimeout,
velerotest.NewLogger(),
metrics.NewServerMetrics(),
"download-priority",
nil, // repoConfigMgr (unused when cacheVolumeConfigs is nil)
tt.args.customLabels,
tt.args.customAnnotations,
)
// Act
got, err := r.setupExposeParam(baseDataDownload)
// Assert no error
require.NoError(t, err)
// Core fields
assert.Equal(t, baseDataDownload.Spec.TargetVolume.PVC, got.TargetPVCName)
assert.Equal(t, baseDataDownload.Spec.TargetVolume.Namespace, got.TargetNamespace)
// Labels and Annotations
assert.Equal(t, tt.want.labels, got.HostingPodLabels)
assert.Equal(t, tt.want.annotations, got.HostingPodAnnotations)
})
}
}

View File

@@ -83,6 +83,8 @@ type DataUploadReconciler struct {
metrics *metrics.ServerMetrics
cancelledDataUpload map[string]time.Time
dataMovePriorityClass string
podLabels map[string]string
podAnnotations map[string]string
}
func NewDataUploadReconciler(
@@ -101,6 +103,8 @@ func NewDataUploadReconciler(
log logrus.FieldLogger,
metrics *metrics.ServerMetrics,
dataMovePriorityClass string,
podLabels map[string]string,
podAnnotations map[string]string,
) *DataUploadReconciler {
return &DataUploadReconciler{
client: client,
@@ -126,6 +130,8 @@ func NewDataUploadReconciler(
metrics: metrics,
cancelledDataUpload: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
podLabels: podLabels,
podAnnotations: podAnnotations,
}
}
@@ -936,25 +942,37 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
}
hostingPodLabels := map[string]string{velerov1api.DataUploadLabel: du.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
if len(r.podLabels) > 0 {
for k, v := range r.podLabels {
hostingPodLabels[k] = v
}
} else {
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
hostingPodLabels[k] = v
}
}
}
hostingPodAnnotation := map[string]string{}
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
if len(r.podAnnotations) > 0 {
for k, v := range r.podAnnotations {
hostingPodAnnotation[k] = v
}
} else {
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
hostingPodAnnotation[k] = v
}
}
}
hostingPodTolerations := []corev1api.Toleration{}

View File

@@ -248,7 +248,9 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
time.Minute*5,
velerotest.NewLogger(),
metrics.NewServerMetrics(),
"", // dataMovePriorityClass
"", // dataMovePriorityClass
nil, // podLabels
nil, // podAnnotations
), nil
}
@@ -1384,3 +1386,149 @@ func TestResumeCancellableBackup(t *testing.T) {
})
}
}
func TestDataUploadSetupExposeParam(t *testing.T) {
// Common objects for all cases
fileMode := corev1api.PersistentVolumeFilesystem
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
pvc := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "app-ns",
Name: "test-pvc",
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "test-pv",
VolumeMode: &fileMode,
Resources: corev1api.VolumeResourceRequirements{
Requests: corev1api.ResourceList{
corev1api.ResourceStorage: resource.MustParse("10Gi"),
},
},
},
}
pv := &corev1api.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pv",
},
}
baseDataUpload := dataUploadBuilder().Result()
baseDataUpload.Spec.SourceNamespace = "app-ns"
baseDataUpload.Spec.SourcePVC = "test-pvc"
baseDataUpload.Namespace = velerov1api.DefaultNamespace
baseDataUpload.Spec.OperationTimeout = metav1.Duration{Duration: time.Minute * 10}
type args struct {
customLabels map[string]string
customAnnotations map[string]string
}
type want struct {
labels map[string]string
annotations map[string]string
}
tests := []struct {
name string
args args
want want
}{
{
name: "label has customize values",
args: args{
customLabels: map[string]string{"custom-label": "label-value"},
customAnnotations: nil,
},
want: want{
labels: map[string]string{
velerov1api.DataUploadLabel: baseDataUpload.Name,
"custom-label": "label-value",
},
annotations: map[string]string{},
},
},
{
name: "label has no customize values",
args: args{
customLabels: nil,
customAnnotations: nil,
},
want: want{
labels: map[string]string{velerov1api.DataUploadLabel: baseDataUpload.Name},
annotations: map[string]string{},
},
},
{
name: "annotation has customize values",
args: args{
customLabels: nil,
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
},
want: want{
labels: map[string]string{velerov1api.DataUploadLabel: baseDataUpload.Name},
annotations: map[string]string{"custom-annotation": "annotation-value"},
},
},
{
name: "both label and annotation have customize values",
args: args{
customLabels: map[string]string{"custom-label": "label-value"},
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
},
want: want{
labels: map[string]string{
velerov1api.DataUploadLabel: baseDataUpload.Name,
"custom-label": "label-value",
},
annotations: map[string]string{"custom-annotation": "annotation-value"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Fake clients per case
fakeCRClient := velerotest.NewFakeControllerRuntimeClient(t, pvc, pv, node, baseDataUpload.DeepCopy())
fakeKubeClient := clientgofake.NewSimpleClientset(node)
// Reconciler config per case
preparingTimeout := time.Minute * 3
podRes := corev1api.ResourceRequirements{}
r := NewDataUploadReconciler(
fakeCRClient,
nil,
fakeKubeClient,
nil, // snapshotClient (unused in setupExposeParam)
datapath.NewManager(1),
nil, // dataPathMgr
nil, // exposer (unused in setupExposeParam)
map[string]velerotypes.BackupPVC{},
podRes,
testclocks.NewFakeClock(time.Now()),
"test-node",
preparingTimeout,
velerotest.NewLogger(),
metrics.NewServerMetrics(),
"upload-priority",
tt.args.customLabels,
tt.args.customAnnotations,
)
// Act
got, err := r.setupExposeParam(baseDataUpload)
// Assert no error
require.NoError(t, err)
require.NotNil(t, got)
// Type assertion to CSISnapshotExposeParam
csiParam, ok := got.(*exposer.CSISnapshotExposeParam)
require.True(t, ok, "expected CSISnapshotExposeParam type")
// Labels and Annotations
assert.Equal(t, tt.want.labels, csiParam.HostingPodLabels)
assert.Equal(t, tt.want.annotations, csiParam.HostingPodAnnotations)
})
}
}

View File

@@ -58,9 +58,23 @@ const (
)
// NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance
func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeBackupReconciler {
func NewPodVolumeBackupReconciler(
client client.Client,
mgr manager.Manager,
kubeClient kubernetes.Interface,
dataPathMgr *datapath.Manager,
counter *exposer.VgdpCounter,
nodeName string,
preparingTimeout time.Duration,
resourceTimeout time.Duration,
podResources corev1api.ResourceRequirements,
metrics *metrics.ServerMetrics,
logger logrus.FieldLogger,
dataMovePriorityClass string,
privileged bool,
podLabels map[string]string,
podAnnotations map[string]string,
) *PodVolumeBackupReconciler {
return &PodVolumeBackupReconciler{
client: client,
mgr: mgr,
@@ -78,6 +92,8 @@ func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kub
cancelledPVB: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
privileged: privileged,
podLabels: podLabels,
podAnnotations: podAnnotations,
}
}
@@ -99,6 +115,8 @@ type PodVolumeBackupReconciler struct {
cancelledPVB map[string]time.Time
dataMovePriorityClass string
privileged bool
podLabels map[string]string
podAnnotations map[string]string
}
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
@@ -796,25 +814,37 @@ func (r *PodVolumeBackupReconciler) setupExposeParam(pvb *velerov1api.PodVolumeB
}
hostingPodLabels := map[string]string{velerov1api.PVBLabel: pvb.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
if len(r.podLabels) > 0 {
for k, v := range r.podLabels {
hostingPodLabels[k] = v
}
} else {
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
hostingPodLabels[k] = v
}
}
}
hostingPodAnnotation := map[string]string{}
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
if len(r.podAnnotations) > 0 {
for k, v := range r.podAnnotations {
hostingPodAnnotation[k] = v
}
} else {
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
hostingPodAnnotation[k] = v
}
}
}
hostingPodTolerations := []corev1api.Toleration{}

View File

@@ -47,13 +47,12 @@ import (
velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
"github.com/vmware-tanzu/velero/pkg/builder"
"github.com/vmware-tanzu/velero/pkg/datapath"
datapathmocks "github.com/vmware-tanzu/velero/pkg/datapath/mocks"
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util/kube"
datapathmocks "github.com/vmware-tanzu/velero/pkg/datapath/mocks"
)
const pvbName = "pvb-1"
@@ -153,6 +152,8 @@ func initPVBReconcilerWithError(needError ...error) (*PodVolumeBackupReconciler,
velerotest.NewLogger(),
"", // dataMovePriorityClass
false, // privileged
nil, // podLabels
nil, // podAnnotations
), nil
}
@@ -1187,3 +1188,123 @@ func TestResumeCancellablePodVolumeBackup(t *testing.T) {
})
}
}
func TestPodVolumeBackupSetupExposeParam(t *testing.T) {
// common objects for all cases
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
basePVB := pvbBuilder().Result()
basePVB.Spec.Node = "worker-1"
basePVB.Spec.Pod.Namespace = "app-ns"
basePVB.Spec.Pod.Name = "app-pod"
basePVB.Spec.Volume = "data-vol"
type args struct {
customLabels map[string]string
customAnnotations map[string]string
}
type want struct {
labels map[string]string
annotations map[string]string
}
tests := []struct {
name string
args args
want want
}{
{
name: "label has customize values",
args: args{
customLabels: map[string]string{"custom-label": "label-value"},
customAnnotations: nil,
},
want: want{
labels: map[string]string{
velerov1api.PVBLabel: basePVB.Name,
"custom-label": "label-value",
},
annotations: map[string]string{},
},
},
{
name: "label has no customize values",
args: args{
customLabels: nil,
customAnnotations: nil,
},
want: want{
labels: map[string]string{velerov1api.PVBLabel: basePVB.Name},
annotations: map[string]string{},
},
},
{
name: "annotation has customize values",
args: args{
customLabels: nil,
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
},
want: want{
labels: map[string]string{velerov1api.PVBLabel: basePVB.Name},
annotations: map[string]string{"custom-annotation": "annotation-value"},
},
},
{
name: "annotation has no customize values",
args: args{
customLabels: map[string]string{"another-label": "lval"},
customAnnotations: nil,
},
want: want{
labels: map[string]string{
velerov1api.PVBLabel: basePVB.Name,
"another-label": "lval",
},
annotations: map[string]string{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Fake clients per case
fakeCRClient := velerotest.NewFakeControllerRuntimeClient(t, node, basePVB.DeepCopy())
fakeKubeClient := clientgofake.NewSimpleClientset(node)
// Reconciler config per case
preparingTimeout := time.Minute * 3
resourceTimeout := time.Minute * 10
podRes := corev1api.ResourceRequirements{}
r := NewPodVolumeBackupReconciler(
fakeCRClient,
nil,
fakeKubeClient,
datapath.NewManager(1),
nil,
"test-node",
preparingTimeout,
resourceTimeout,
podRes,
metrics.NewServerMetrics(),
velerotest.NewLogger(),
"backup-priority",
true,
tt.args.customLabels,
tt.args.customAnnotations,
)
// Act
got := r.setupExposeParam(basePVB)
// Core fields
assert.Equal(t, exposer.PodVolumeExposeTypeBackup, got.Type)
assert.Equal(t, basePVB.Spec.Pod.Namespace, got.ClientNamespace)
assert.Equal(t, basePVB.Spec.Pod.Name, got.ClientPodName)
assert.Equal(t, basePVB.Spec.Volume, got.ClientPodVolume)
// Labels/Annotations
assert.Equal(t, tt.want.labels, got.HostingPodLabels)
assert.Equal(t, tt.want.annotations, got.HostingPodAnnotations)
})
}
}

View File

@@ -56,10 +56,25 @@ import (
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, backupRepoConfigs map[string]string,
cacheVolumeConfigs *velerotypes.CachePVC, podResources corev1api.ResourceRequirements, logger logrus.FieldLogger, dataMovePriorityClass string,
privileged bool, repoConfigMgr repository.ConfigManager) *PodVolumeRestoreReconciler {
func NewPodVolumeRestoreReconciler(
client client.Client,
mgr manager.Manager,
kubeClient kubernetes.Interface,
dataPathMgr *datapath.Manager,
counter *exposer.VgdpCounter,
nodeName string,
preparingTimeout time.Duration,
resourceTimeout time.Duration,
backupRepoConfigs map[string]string,
cacheVolumeConfigs *velerotypes.CachePVC,
podResources corev1api.ResourceRequirements,
logger logrus.FieldLogger,
dataMovePriorityClass string,
privileged bool,
repoConfigMgr repository.ConfigManager,
podLabels map[string]string,
podAnnotations map[string]string,
) *PodVolumeRestoreReconciler {
return &PodVolumeRestoreReconciler{
client: client,
mgr: mgr,
@@ -79,6 +94,8 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
dataMovePriorityClass: dataMovePriorityClass,
privileged: privileged,
repoConfigMgr: repoConfigMgr,
podLabels: podLabels,
podAnnotations: podAnnotations,
}
}
@@ -101,6 +118,8 @@ type PodVolumeRestoreReconciler struct {
dataMovePriorityClass string
privileged bool
repoConfigMgr repository.ConfigManager
podLabels map[string]string
podAnnotations map[string]string
}
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
@@ -863,25 +882,37 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
}
hostingPodLabels := map[string]string{velerov1api.PVRLabel: pvr.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
if len(r.podLabels) > 0 {
for k, v := range r.podLabels {
hostingPodLabels[k] = v
}
} else {
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
hostingPodLabels[k] = v
}
}
}
hostingPodAnnotation := map[string]string{}
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
if len(r.podAnnotations) > 0 {
for k, v := range r.podAnnotations {
hostingPodAnnotation[k] = v
}
} else {
for _, k := range util.ThirdPartyAnnotations {
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
}
} else {
hostingPodAnnotation[k] = v
}
}
}
hostingPodTolerations := []corev1api.Toleration{}

View File

@@ -617,7 +617,25 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj []
dataPathMgr := datapath.NewManager(1)
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, nil, nil, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false, nil), nil
return NewPodVolumeRestoreReconciler(
fakeClient,
nil,
fakeKubeClient,
dataPathMgr,
nil,
"test-node",
time.Minute*5,
time.Minute,
nil,
nil,
corev1api.ResourceRequirements{},
velerotest.NewLogger(),
"",
false,
nil,
nil, // podLabels
nil, // podAnnotations
), nil
}
func TestPodVolumeRestoreReconcile(t *testing.T) {
@@ -1082,6 +1100,128 @@ func TestPodVolumeRestoreReconcile(t *testing.T) {
}
}
func TestPodVolumeRestoreSetupExposeParam(t *testing.T) {
// common objects for all cases
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
basePVR := pvrBuilder().Result()
basePVR.Status.Node = "worker-1"
basePVR.Spec.Pod.Namespace = "app-ns"
basePVR.Spec.Pod.Name = "app-pod"
basePVR.Spec.Volume = "data-vol"
type args struct {
customLabels map[string]string
customAnnotations map[string]string
}
type want struct {
labels map[string]string
annotations map[string]string
}
tests := []struct {
name string
args args
want want
}{
{
name: "label has customize values",
args: args{
customLabels: map[string]string{"custom-label": "label-value"},
customAnnotations: nil,
},
want: want{
labels: map[string]string{
velerov1api.PVRLabel: basePVR.Name,
"custom-label": "label-value",
},
annotations: map[string]string{},
},
},
{
name: "label has no customize values",
args: args{
customLabels: nil,
customAnnotations: nil,
},
want: want{
labels: map[string]string{velerov1api.PVRLabel: basePVR.Name},
annotations: map[string]string{},
},
},
{
name: "annotation has customize values",
args: args{
customLabels: nil,
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
},
want: want{
labels: map[string]string{velerov1api.PVRLabel: basePVR.Name},
annotations: map[string]string{"custom-annotation": "annotation-value"},
},
},
{
name: "annotation has no customize values",
args: args{
customLabels: map[string]string{"another-label": "lval"},
customAnnotations: nil,
},
want: want{
labels: map[string]string{
velerov1api.PVRLabel: basePVR.Name,
"another-label": "lval",
},
annotations: map[string]string{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Fake clients per case
fakeCRClient := velerotest.NewFakeControllerRuntimeClient(t, node, basePVR.DeepCopy())
fakeKubeClient := clientgofake.NewSimpleClientset(node)
// Reconciler config per case
preparingTimeout := time.Minute * 3
resourceTimeout := time.Minute * 10
podRes := corev1api.ResourceRequirements{}
r := NewPodVolumeRestoreReconciler(
fakeCRClient,
nil,
fakeKubeClient,
datapath.NewManager(1),
nil,
"test-node",
preparingTimeout,
resourceTimeout,
nil, // backupRepoConfigs
nil, // cacheVolumeConfigs -> keep nil so CacheVolume is nil
podRes,
velerotest.NewLogger(),
"restore-priority",
true,
nil, // repoConfigMgr (unused when cacheVolumeConfigs is nil)
tt.args.customLabels,
tt.args.customAnnotations,
)
// Act
got := r.setupExposeParam(basePVR)
// Core fields
assert.Equal(t, exposer.PodVolumeExposeTypeRestore, got.Type)
assert.Equal(t, basePVR.Spec.Pod.Namespace, got.ClientNamespace)
assert.Equal(t, basePVR.Spec.Pod.Name, got.ClientPodName)
assert.Equal(t, basePVR.Spec.Volume, got.ClientPodVolume)
// Labels/Annotations
assert.Equal(t, tt.want.labels, got.HostingPodLabels)
assert.Equal(t, tt.want.annotations, got.HostingPodAnnotations)
})
}
}
func TestOnPodVolumeRestoreFailed(t *testing.T) {
for _, getErr := range []bool{true, false} {
ctx := t.Context()

View File

@@ -107,6 +107,9 @@ func TestAsyncBackup(t *testing.T) {
<-finish
// Ensure the goroutine finishes so deferred fs.close executes, satisfying mock expectations.
fs.wgDataPath.Wait()
assert.Equal(t, test.err, asyncErr)
assert.Equal(t, test.result, asyncResult)
})
@@ -192,6 +195,9 @@ func TestAsyncRestore(t *testing.T) {
<-finish
// Ensure the goroutine finishes so deferred fs.close executes, satisfying mock expectations.
fs.wgDataPath.Wait()
assert.Equal(t, asyncErr, test.err)
assert.Equal(t, asyncResult, test.result)
})

View File

@@ -184,7 +184,22 @@ func (e *podVolumeExposer) Expose(ctx context.Context, ownerObject corev1api.Obj
}
}
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged, cachePVC)
hostingPod, err := e.createHostingPod(
ctx,
ownerObject,
param.Type,
path.ByPath,
param.OperationTimeout,
param.HostingPodLabels,
param.HostingPodAnnotations,
param.HostingPodTolerations,
pod.Spec.NodeName,
param.Resources,
nodeOS,
param.PriorityClassName,
param.Privileged,
cachePVC,
)
if err != nil {
return errors.Wrapf(err, "error to create hosting pod")
}
@@ -328,8 +343,22 @@ func (e *podVolumeExposer) CleanUp(ctx context.Context, ownerObject corev1api.Ob
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVCName, ownerObject.Namespace, 0, e.log)
}
func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject corev1api.ObjectReference, exposeType string, hostPath string,
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool, cachePVC *corev1api.PersistentVolumeClaim) (*corev1api.Pod, error) {
func (e *podVolumeExposer) createHostingPod(
ctx context.Context,
ownerObject corev1api.ObjectReference,
exposeType string,
hostPath string,
operationTimeout time.Duration,
label map[string]string,
annotation map[string]string,
toleration []corev1api.Toleration,
selectedNode string,
resources corev1api.ResourceRequirements,
nodeOS string,
priorityClassName string,
privileged bool,
cachePVC *corev1api.PersistentVolumeClaim,
) (*corev1api.Pod, error) {
hostingPodName := ownerObject.Name
containerName := string(ownerObject.UID)

View File

@@ -290,9 +290,19 @@ func getJobConfig(
if globalResult.PriorityClassName != "" {
result.PriorityClassName = globalResult.PriorityClassName
}
// Pod's labels are only read from global config, not per-repository
if len(globalResult.PodLabels) > 0 {
result.PodLabels = globalResult.PodLabels
}
// Pod's annotations are only read from global config, not per-repository
if len(globalResult.PodAnnotations) > 0 {
result.PodAnnotations = globalResult.PodAnnotations
}
}
logger.Debugf("Didn't find content for repository %s in cm %s", repo.Name, repoMaintenanceJobConfig)
logger.Debugf("Configuration content for repository %s is %+v", repo.Name, result)
return result, nil
}
@@ -580,18 +590,29 @@ func buildJob(
podLabels := map[string]string{
RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name),
}
for _, k := range util.ThirdPartyLabels {
if v := veleroutil.GetVeleroServerLabelValue(deployment, k); v != "" {
if config != nil && len(config.PodLabels) > 0 {
for k, v := range config.PodLabels {
podLabels[k] = v
}
} else {
for _, k := range util.ThirdPartyLabels {
if v := veleroutil.GetVeleroServerLabelValue(deployment, k); v != "" {
podLabels[k] = v
}
}
}
podAnnotations := map[string]string{}
for _, k := range util.ThirdPartyAnnotations {
if v := veleroutil.GetVeleroServerAnnotationValue(deployment, k); v != "" {
if config != nil && len(config.PodAnnotations) > 0 {
for k, v := range config.PodAnnotations {
podAnnotations[k] = v
}
} else {
for _, k := range util.ThirdPartyAnnotations {
if v := veleroutil.GetVeleroServerAnnotationValue(deployment, k); v != "" {
podAnnotations[k] = v
}
}
}
// Set arguments

View File

@@ -538,6 +538,45 @@ func TestGetJobConfig(t *testing.T) {
},
expectedError: nil,
},
{
name: "Configs only exist in global section should supersede specific config",
repoJobConfig: &corev1api.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: veleroNamespace,
Name: repoMaintenanceJobConfig,
},
Data: map[string]string{
GlobalKeyForRepoMaintenanceJobCM: "{\"keepLatestMaintenanceJobs\":1,\"podResources\":{\"cpuRequest\":\"50m\",\"cpuLimit\":\"100m\",\"memoryRequest\":\"50Mi\",\"memoryLimit\":\"100Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"n2\"]}]}}],\"priorityClassName\":\"global-priority\",\"podAnnotations\":{\"global-key\":\"global-value\"},\"podLabels\":{\"global-key\":\"global-value\"}}",
"test-default-kopia": "{\"podResources\":{\"cpuRequest\":\"100m\",\"cpuLimit\":\"200m\",\"memoryRequest\":\"100Mi\",\"memoryLimit\":\"200Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"e2\"]}]}}],\"priorityClassName\":\"specific-priority\",\"podAnnotations\":{\"specific-key\":\"specific-value\"},\"podLabels\":{\"specific-key\":\"specific-value\"}}",
},
},
expectedConfig: &velerotypes.JobConfigs{
KeepLatestMaintenanceJobs: &keepLatestMaintenanceJobs,
PodResources: &kube.PodResources{
CPURequest: "100m",
CPULimit: "200m",
MemoryRequest: "100Mi",
MemoryLimit: "200Mi",
},
LoadAffinities: []*kube.LoadAffinity{
{
NodeSelector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "cloud.google.com/machine-family",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"e2"},
},
},
},
},
},
PriorityClassName: "global-priority",
PodAnnotations: map[string]string{"global-key": "global-value"},
PodLabels: map[string]string{"global-key": "global-value"},
},
expectedError: nil,
},
}
for _, tc := range testCases {
@@ -938,12 +977,12 @@ func TestBuildJob(t *testing.T) {
deploy *appsv1api.Deployment
logLevel logrus.Level
logFormat *logging.FormatFlag
thirdPartyLabel map[string]string
expectedJobName string
expectedError bool
expectedEnv []corev1api.EnvVar
expectedEnvFrom []corev1api.EnvFromSource
expectedPodLabel map[string]string
expectedPodAnnotation map[string]string
expectedSecurityContext *corev1api.SecurityContext
expectedPodSecurityContext *corev1api.PodSecurityContext
expectedImagePullSecrets []corev1api.LocalObjectReference
@@ -1065,6 +1104,68 @@ func TestBuildJob(t *testing.T) {
expectedJobName: "",
expectedError: true,
},
{
name: "Valid maintenance job customized labels and annotations",
m: &velerotypes.JobConfigs{
PodResources: &kube.PodResources{
CPURequest: "100m",
MemoryRequest: "128Mi",
CPULimit: "200m",
MemoryLimit: "256Mi",
},
PodLabels: map[string]string{
"global-label-1": "global-label-value-1",
"global-label-2": "global-label-value-2",
},
PodAnnotations: map[string]string{
"global-annotation-1": "global-annotation-value-1",
"global-annotation-2": "global-annotation-value-2",
},
},
deploy: deploy2,
logLevel: logrus.InfoLevel,
logFormat: logging.NewFormatFlag(),
expectedError: false,
expectedJobName: "test-123-maintain-job",
expectedEnv: []corev1api.EnvVar{
{
Name: "test-name",
Value: "test-value",
},
},
expectedEnvFrom: []corev1api.EnvFromSource{
{
ConfigMapRef: &corev1api.ConfigMapEnvSource{
LocalObjectReference: corev1api.LocalObjectReference{
Name: "test-configmap",
},
},
},
{
SecretRef: &corev1api.SecretEnvSource{
LocalObjectReference: corev1api.LocalObjectReference{
Name: "test-secret",
},
},
},
},
expectedPodLabel: map[string]string{
"global-label-1": "global-label-value-1",
"global-label-2": "global-label-value-2",
RepositoryNameLabel: "test-123",
},
expectedPodAnnotation: map[string]string{
"global-annotation-1": "global-annotation-value-1",
"global-annotation-2": "global-annotation-value-2",
},
expectedSecurityContext: nil,
expectedPodSecurityContext: nil,
expectedImagePullSecrets: []corev1api.LocalObjectReference{
{
Name: "imagePullSecret1",
},
},
},
{
name: "Valid maintenance job with third party labels and BackupRepository name longer than 63",
m: &velerotypes.JobConfigs{

View File

@@ -98,4 +98,10 @@ type NodeAgentConfigs struct {
// CachePVCConfig is the config for cachePVC
CachePVCConfig *CachePVC `json:"cachePVC,omitempty"`
// PodAnnotations are annotations to be added to pods created by node-agent, i.e., data mover pods.
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
// PodLabels are labels to be added to pods created by node-agent, i.e., data mover pods.
PodLabels map[string]string `json:"podLabels,omitempty"`
}

View File

@@ -31,4 +31,12 @@ type JobConfigs struct {
// PriorityClassName is the priority class name for the maintenance job pod
// Note: This is only read from the global configuration, not per-repository
PriorityClassName string `json:"priorityClassName,omitempty"`
// PodAnnotations are annotations to be added to maintenance job pods.
// Note: This is only read from the global configuration, not per-repository
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
// PodLabels are labels to be added to maintenance job pods.
// Note: This is only read from the global configuration, not per-repository
PodLabels map[string]string `json:"podLabels,omitempty"`
}

View File

@@ -18,7 +18,7 @@ Velero introduces a new section in the node-agent configMap, called ```podResour
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-configmap```.
Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
### Sample
### Pod Resources
Here is a sample of the configMap with ```podResources```:
```json
{
@@ -27,8 +27,7 @@ Here is a sample of the configMap with ```podResources```:
"cpuLimit": "1000m",
"memoryRequest": "512Mi",
"memoryLimit": "1Gi"
},
"priorityClassName": "high-priority"
}
}
```
@@ -93,12 +92,6 @@ To configure priority class for data mover pods, include it in your node-agent c
```json
{
"podResources": {
"cpuRequest": "1000m",
"cpuLimit": "2000m",
"memoryRequest": "1Gi",
"memoryLimit": "4Gi"
},
"priorityClassName": "backup-priority"
}
```
@@ -123,6 +116,47 @@ kubectl create cm node-agent-config -n velero --from-file=node-agent-config.json
**Note**: If the specified priority class doesn't exist in the cluster when data mover pods are created, the pods will fail to schedule. Velero validates the priority class at startup and logs a warning if it doesn't exist, but the pods will still attempt to use it.
### Pod Labels
Add customized labels for data mover pods to support third-party integrations and environment-specific requirements.
If `podLabels` is configured, it supersedes Velero's [in-tree third-party labels](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L19-L21).
If `podLabels` is not configured, Velero uses the in-tree third-party labels for compatibility with common cloud providers and networking solutions.
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
#### Configuration Example
```json
{
"podLabels": {
"spectrocloud.com/connection": "proxy",
"gnp/k8s-api-access": "",
"gnp/monitoring-client": "",
"np/s3-backup-backend": "",
"cp/inject-truststore": "extended"
}
}
```
### Pod Annotations
Add customized annotations for data mover pods to support third-party integrations and pod-level configuration.
If `podAnnotations` is configured, it supersedes Velero's [in-tree third-party annotations](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L23-L25).
If `podAnnotations` is not configured, Velero uses the in-tree third-party annotations for compatibility with common cloud providers and networking solutions.
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
#### Configuration Example
```json
{
"podAnnotations": {
"iam.amazonaws.com/role": "velero-backup-role",
"vault.hashicorp.com/agent-inject": "true",
"prometheus.io/scrape": "true",
"custom.company.com/environment": "production"
}
}
```
## Related Documentation
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options

View File

@@ -426,6 +426,70 @@ For detailed information, see [Cache PVC Configuration for Data Movement Restore
}
```
### Pod Labels Configuration (`podLabels`)
Add customized labels for data mover pods to support third-party integrations and environment-specific requirements.
If `podLabels` is configured, it supersedes Velero's [in-tree third-party labels](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L19-L21).
If `podLabels` is not configured, Velero uses the in-tree third-party labels for compatibility with common cloud providers and networking solutions.
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
#### Configuration Example
```json
{
"podLabels": {
"spectrocloud.com/connection": "proxy",
"gnp/k8s-api-access": "",
"gnp/monitoring-client": "",
"np/s3-backup-backend": "",
"cp/inject-truststore": "extended"
}
}
```
#### Use Cases
- **Proxy Configuration**: Kubernetes environment requires proxy settings for external connections configured via labels
- **Firewall Rules**: Network policies configured based on pod labels for traffic control
- **Cloud Provider Integration**: Labels required by managed Kubernetes services (AKS, EKS, GKE)
- **Security Policy Injection**: Labels that trigger security agent or certificate injection
#### Important Notes
- **Third-party Label Replacement**: When `podLabels` is configured, Velero's built-in in-tree labels are NOT automatically added
- **Explicit Configuration Required**: If you need both custom labels and in-tree third-party labels, explicitly include the in-tree labels in the `podLabels` configuration
- **In-tree Labels**: The default in-tree labels include support for Azure workload identity
### Pod Annotations Configuration (`podAnnotations`)
Add customized annotations for data mover pods to support third-party integrations and pod-level configuration.
If `podAnnotations` is configured, it supersedes Velero's [in-tree third-party annotations](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L23-L25).
If `podAnnotations` is not configured, Velero uses the in-tree third-party annotations for compatibility with common cloud providers and networking solutions.
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
#### Configuration Example
```json
{
"podAnnotations": {
"iam.amazonaws.com/role": "velero-backup-role",
"vault.hashicorp.com/agent-inject": "true",
"prometheus.io/scrape": "true",
"custom.company.com/environment": "production"
}
}
```
#### Use Cases
- **Secret Management Integration**: HashiCorp Vault or other secret managers using annotations for automatic secret injection
- **Monitoring and Observability**: Prometheus scrape configurations and other monitoring tool annotations
- **Custom Application Integration**: Company-specific annotations for operational tooling
#### Important Notes
- **Third-party Annotation Replacement**: When `podAnnotations` is configured, Velero's built-in in-tree annotations are NOT automatically added
- **Explicit Configuration Required**: If you need both custom annotations and in-tree third-party annotations, explicitly include the in-tree annotations in the `podAnnotations` configuration
- **In-tree Annotations**: The default in-tree annotations include support for AWS IAM roles
## Complete Configuration Example
Here's a comprehensive example showing how all configuration sections work together:
@@ -492,6 +556,19 @@ Here's a comprehensive example showing how all configuration sections work toget
"cachePVC": {
"thresholdInGB": 1,
"storageClass": "cache-optimized-storage"
},
"podLabels": {
"spectrocloud.com/connection": "proxy",
"gnp/k8s-api-access": "",
"gnp/monitoring-client": "",
"np/s3-backup-backend": "",
"cp/inject-truststore": "extended"
},
"podAnnotations": {
"iam.amazonaws.com/role": "velero-backup-role",
"vault.hashicorp.com/agent-inject": "true",
"prometheus.io/scrape": "true",
"custom.company.com/environment": "production"
}
}
```
@@ -508,6 +585,7 @@ This configuration:
- Enable privileged permission for PodVolume pods
- Enable cache PVC for file system restore
- The cache threshold is 1GB and use dedicated StorageClass
- Use customized labels and annotations data mover pods
## Troubleshooting