Merge pull request #6926 from Lyndon-Li/backup-pod-spread-evenly

Issue 6734: spread backup pod evenly
This commit is contained in:
Wenkai Yin(尹文开)
2023-10-10 10:05:41 +08:00
committed by GitHub
3 changed files with 21 additions and 0 deletions

View File

@@ -0,0 +1 @@
Partially fix #6734, guide Kubernetes' scheduler to spread backup pods evenly across nodes as much as possible, so that data mover backup could achieve better parallelism

View File

@@ -361,6 +361,12 @@ func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject co
var gracePeriod int64 = 0
volumeMounts, volumeDevices := kube.MakePodPVCAttachment(volumeName, backupPVC.Spec.VolumeMode)
if label == nil {
label = make(map[string]string)
}
label[podGroupLabel] = podGroupSnapshot
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@@ -377,6 +383,18 @@ func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject co
Labels: label,
},
Spec: corev1.PodSpec{
TopologySpreadConstraints: []corev1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "kubernetes.io/hostname",
WhenUnsatisfiable: corev1.ScheduleAnyway,
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
podGroupLabel: podGroupSnapshot,
},
},
},
},
Containers: []corev1.Container{
{
Name: containerName,

View File

@@ -23,6 +23,8 @@ import (
const (
AccessModeFileSystem = "by-file-system"
AccessModeBlock = "by-block-device"
podGroupLabel = "velero.io/exposer-pod-group"
podGroupSnapshot = "snapshot-exposer"
)
// ExposeResult defines the result of expose.