mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-14 00:32:54 +00:00
Compare commits
13 Commits
dependabot
...
9328_fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4121808f38 | ||
|
|
420a65a116 | ||
|
|
3bf4a7dced | ||
|
|
2a5804b595 | ||
|
|
9a3fabbc55 | ||
|
|
72ddfd7d78 | ||
|
|
e9666f9aea | ||
|
|
031df8d5e0 | ||
|
|
37df853a99 | ||
|
|
8ee3436f5c | ||
|
|
b9159c22ca | ||
|
|
112bea520e | ||
|
|
7e15e9ba05 |
2
.github/workflows/stale-issues.yml
vendored
2
.github/workflows/stale-issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v10.0.0
|
||||
- uses: actions/stale@v10.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
|
||||
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #7904, remove the code and doc for PVC node selection
|
||||
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9332, add bytesDone for cache files
|
||||
@@ -275,7 +275,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
log.WithError(err).Warn("Failed to get keepLatestMaintenanceJobs from ConfigMap, using CLI parameter value")
|
||||
}
|
||||
|
||||
if err := maintenance.DeleteOldJobs(r.Client, req.Name, keepJobs, log); err != nil {
|
||||
if err := maintenance.DeleteOldJobs(r.Client, *backupRepo, keepJobs, log); err != nil {
|
||||
log.WithError(err).Warn("Failed to delete old maintenance jobs")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,11 +32,13 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -68,11 +70,22 @@ func GenerateJobName(repo string) string {
|
||||
}
|
||||
|
||||
// DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs
|
||||
func DeleteOldJobs(cli client.Client, repo string, keep int, logger logrus.FieldLogger) error {
|
||||
func DeleteOldJobs(cli client.Client, repo velerov1api.BackupRepository, keep int, logger logrus.FieldLogger) error {
|
||||
logger.Infof("Start to delete old maintenance jobs. %d jobs will be kept.", keep)
|
||||
// Get the maintenance job list by label
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -339,10 +352,17 @@ func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string,
|
||||
// and then return the maintenance jobs' status in the range of limit
|
||||
func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, &client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
},
|
||||
client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name}),
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -558,7 +578,7 @@ func buildJob(
|
||||
}
|
||||
|
||||
podLabels := map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
}
|
||||
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
@@ -588,7 +608,7 @@ func buildJob(
|
||||
Name: GenerateJobName(repo.Name),
|
||||
Namespace: repo.Namespace,
|
||||
Labels: map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/provider"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
@@ -48,7 +49,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
)
|
||||
|
||||
func TestGenerateJobName1(t *testing.T) {
|
||||
func TestGenerateJobName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
repo string
|
||||
expectedStart string
|
||||
@@ -82,59 +83,62 @@ func TestGenerateJobName1(t *testing.T) {
|
||||
}
|
||||
func TestDeleteOldJobs(t *testing.T) {
|
||||
// Set up test repo and keep value
|
||||
repo := "test-repo"
|
||||
keep := 2
|
||||
|
||||
// Create some maintenance jobs for testing
|
||||
var objs []client.Object
|
||||
// Create a newer job
|
||||
newerJob := &batchv1api.Job{
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
keep := 1
|
||||
|
||||
jobArray := []client.Object{
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-0",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
newJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-new",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, newerJob)
|
||||
// Create older jobs
|
||||
for i := 2; i <= 3; i++ {
|
||||
olderJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("job%d", i),
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
CreationTimestamp: metav1.Time{
|
||||
Time: metav1.Now().Add(time.Duration(-24*i) * time.Hour),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, olderJob)
|
||||
}
|
||||
// Create a fake Kubernetes client
|
||||
|
||||
// Create a fake Kubernetes client with 2 jobs.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = batchv1api.AddToScheme(scheme)
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(jobArray...).Build()
|
||||
|
||||
// Create a new job
|
||||
require.NoError(t, cli.Create(context.TODO(), newJob))
|
||||
|
||||
// Call the function
|
||||
err := DeleteOldJobs(cli, repo, keep, velerotest.NewLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DeleteOldJobs(cli, *repo, keep, velerotest.NewLogger()))
|
||||
|
||||
// Get the remaining jobs
|
||||
jobList := &batchv1api.JobList{}
|
||||
err = cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name})))
|
||||
|
||||
// We expect the number of jobs to be equal to 'keep'
|
||||
assert.Len(t, jobList.Items, keep)
|
||||
|
||||
// We expect that the oldest jobs were deleted
|
||||
// Job3 should not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[2])
|
||||
|
||||
// Job2 should also not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[1])
|
||||
// Only the new created job should be left.
|
||||
assert.Equal(t, jobList.Items[0].Name, newJob.Name)
|
||||
}
|
||||
|
||||
func TestWaitForJobComplete(t *testing.T) {
|
||||
@@ -571,7 +575,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: "fake-repo",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
BackupStorageLocation: "default",
|
||||
@@ -595,7 +599,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
}
|
||||
@@ -604,7 +608,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -624,7 +628,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job2",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -645,7 +649,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job3",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -665,7 +669,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job4",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 3)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -698,7 +702,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
{
|
||||
name: "list job error",
|
||||
runtimeScheme: schemeFail,
|
||||
expectedError: "error listing maintenance job for repo fake-repo: no kind is registered for the type v1.JobList in scheme",
|
||||
expectedError: "error listing maintenance job for repo label with more than 63 characters should be modified: no kind is registered for the type v1.JobList in scheme",
|
||||
},
|
||||
{
|
||||
name: "job not exist",
|
||||
@@ -943,6 +947,7 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedSecurityContext *corev1api.SecurityContext
|
||||
expectedPodSecurityContext *corev1api.PodSecurityContext
|
||||
expectedImagePullSecrets []corev1api.LocalObjectReference
|
||||
backupRepository *velerov1api.BackupRepository
|
||||
}{
|
||||
{
|
||||
name: "Valid maintenance job without third party labels",
|
||||
@@ -1060,6 +1065,64 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedJobName: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job with third party labels and BackupRepository name longer than 63",
|
||||
m: &velerotypes.JobConfigs{
|
||||
PodResources: &kube.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "128Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "256Mi",
|
||||
},
|
||||
},
|
||||
deploy: deploy2,
|
||||
logLevel: logrus.InfoLevel,
|
||||
logFormat: logging.NewFormatFlag(),
|
||||
expectedError: false,
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodLabel: map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName("label with more than 63 characters should be modified"),
|
||||
"azure.workload.identity/use": "fake-label-value",
|
||||
},
|
||||
expectedSecurityContext: nil,
|
||||
expectedPodSecurityContext: nil,
|
||||
expectedImagePullSecrets: []corev1api.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
backupRepository: &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
VolumeNamespace: "test-123",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
param := provider.RepoParam{
|
||||
@@ -1083,6 +1146,10 @@ func TestBuildJob(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.backupRepository != nil {
|
||||
param.BackupRepo = tc.backupRepository
|
||||
}
|
||||
|
||||
// Create a fake clientset with resources
|
||||
objs := []runtime.Object{param.BackupLocation, param.BackupRepo}
|
||||
|
||||
|
||||
@@ -17,20 +17,15 @@ limitations under the License.
|
||||
package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
)
|
||||
@@ -91,46 +86,13 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if pvc.Annotations == nil {
|
||||
pvc.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
log := p.logger.WithFields(map[string]any{
|
||||
"kind": pvc.Kind,
|
||||
"namespace": pvc.Namespace,
|
||||
"name": pvc.Name,
|
||||
})
|
||||
|
||||
// Handle selected node annotation
|
||||
node, ok := pvc.Annotations[AnnSelectedNode]
|
||||
if ok {
|
||||
// fetch node mapping from configMap
|
||||
newNode, err := getNewNodeFromConfigMap(p.configMapClient, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(newNode) != 0 {
|
||||
// Check whether the mapped node exists first.
|
||||
exists, err := isNodeExist(p.nodeClient, newNode)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking %s's mapped node %s existence", node, newNode)
|
||||
}
|
||||
if !exists {
|
||||
log.Warnf("Selected-node's mapped node doesn't exist: source: %s, dest: %s. Please check the ConfigMap with label velero.io/change-pvc-node-selector.", node, newNode)
|
||||
}
|
||||
|
||||
// set node selector
|
||||
// We assume that node exist for node-mapping
|
||||
pvc.Annotations[AnnSelectedNode] = newNode
|
||||
log.Infof("Updating selected-node to %s from %s", newNode, node)
|
||||
} else {
|
||||
log.Info("Clearing PVC selected-node annotation")
|
||||
delete(pvc.Annotations, AnnSelectedNode)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove other annotations
|
||||
// Remove PVC annotations
|
||||
removePVCAnnotations(
|
||||
&pvc,
|
||||
[]string{
|
||||
@@ -138,6 +100,7 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
AnnBoundByController,
|
||||
AnnStorageProvisioner,
|
||||
AnnBetaStorageProvisioner,
|
||||
AnnSelectedNode,
|
||||
velerov1api.VolumeSnapshotLabel,
|
||||
velerov1api.DataUploadNameAnnotation,
|
||||
},
|
||||
@@ -167,34 +130,6 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func getNewNodeFromConfigMap(client corev1client.ConfigMapInterface, node string) (string, error) {
|
||||
// fetch node mapping from configMap
|
||||
config, err := common.GetPluginConfig(common.PluginKindRestoreItemAction, "velero.io/change-pvc-node-selector", client)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
// there is no node mapping defined for change-pvc-node
|
||||
// so we will return empty new node
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return config.Data[node], nil
|
||||
}
|
||||
|
||||
// isNodeExist check if node resource exist or not
|
||||
func isNodeExist(nodeClient corev1client.NodeInterface, name string) (bool, error) {
|
||||
_, err := nodeClient.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func removePVCAnnotations(pvc *corev1api.PersistentVolumeClaim, remove []string) {
|
||||
for k := range pvc.Annotations {
|
||||
if util.Contains(remove, k) {
|
||||
|
||||
@@ -17,11 +17,9 @@ limitations under the License.
|
||||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
@@ -42,105 +40,57 @@ import (
|
||||
// desired result.
|
||||
func TestPVCActionExecute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
configMap *corev1api.ConfigMap
|
||||
node *corev1api.Node
|
||||
newNode *corev1api.Node
|
||||
want *corev1api.PersistentVolumeClaim
|
||||
wantErr error
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
want *corev1api.PersistentVolumeClaim
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "a valid mapping for a persistent volume claim is applied correctly",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
newNode: builder.ForNode("dest-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "dest-node"),
|
||||
).Result(),
|
||||
},
|
||||
{
|
||||
name: "when no config map exists for the plugin, the item is returned without node selector",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/some-other-plugin", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when no node-mappings exist in the plugin config map, the item is returned without node selector",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when persistent volume claim has no node selector, the item is returned as-is",
|
||||
name: "a persistent volume claim with no annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when persistent volume claim's node-selector has no mapping in the config map, the item is returned without node selector",
|
||||
name: "a persistent volume claim with selected-node annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node-1", "dest-node").
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(builder.WithAnnotationsMap(map[string]string{})).Result(),
|
||||
},
|
||||
{
|
||||
name: "a persistent volume claim with other annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("other-anno-1", "other-value-1", "other-anno-2", "other-value-2"),
|
||||
).Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(
|
||||
builder.WithAnnotations("other-anno-1", "other-value-1", "other-anno-2", "other-value-2"),
|
||||
).Result(),
|
||||
},
|
||||
{
|
||||
name: "a persistent volume claim with other annotation and selected-node annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("other-anno", "other-value", "volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(
|
||||
builder.WithAnnotations("other-anno", "other-value"),
|
||||
).Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset()
|
||||
logger := logrus.StandardLogger()
|
||||
buf := bytes.Buffer{}
|
||||
logrus.SetOutput(&buf)
|
||||
|
||||
a := NewPVCAction(
|
||||
logger,
|
||||
velerotest.NewLogger(),
|
||||
clientset.CoreV1().ConfigMaps("velero"),
|
||||
clientset.CoreV1().Nodes(),
|
||||
)
|
||||
|
||||
// set up test data
|
||||
if tc.configMap != nil {
|
||||
_, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(t.Context(), tc.configMap, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if tc.node != nil {
|
||||
_, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.node, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tc.newNode != nil {
|
||||
_, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.newNode, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -156,10 +106,6 @@ func TestPVCActionExecute(t *testing.T) {
|
||||
// execute method under test
|
||||
res, err := a.Execute(input)
|
||||
|
||||
// Make sure mapped selected-node exists.
|
||||
logOutput := buf.String()
|
||||
assert.NotContains(t, logOutput, "Selected-node's mapped node doesn't exist")
|
||||
|
||||
// validate for both error and non-error cases
|
||||
switch {
|
||||
case tc.wantErr != nil:
|
||||
|
||||
@@ -121,6 +121,7 @@ func (p *Progress) UploadStarted() {}
|
||||
// CachedFile statistic the total bytes been cached currently
|
||||
func (p *Progress) CachedFile(fname string, numBytes int64) {
|
||||
atomic.AddInt64(&p.cachedBytes, numBytes)
|
||||
atomic.AddInt64(&p.processedBytes, numBytes)
|
||||
p.UpdateProgress()
|
||||
}
|
||||
|
||||
|
||||
@@ -215,37 +215,9 @@ data:
|
||||
|
||||
### PVC selected-node
|
||||
|
||||
Velero by default removes PVC's `volume.kubernetes.io/selected-node` annotation during restore, so that the restored PVC could be provisioned appropriately according to ```WaitForFirstConsumer``` rules, storage topologies and the restored pod's schedule result, etc.
|
||||
Velero removes PVC's `volume.kubernetes.io/selected-node` annotation during restore, so that the restored PVC could be provisioned appropriately according to ```WaitForFirstConsumer``` rules, storage topologies and the restored pod's schedule result, etc.
|
||||
|
||||
For more information of how this selected-node annotation matters to PVC restore, see issue https://github.com/vmware-tanzu/velero/issues/9053.
|
||||
|
||||
As an expectation, when you provide the selected-node configuration, Velero sets the annotation to the node in the configuration, if the node doesn't exist in the cluster then the annotation will also be removed.
|
||||
Note: This feature is under deprecation as of Velero 1.15, following Velero deprecation policy. This feature is primarily used to remedy some problems in old Kubernetes versions as described [here](https://github.com/vmware-tanzu/velero/pull/2377). It may not work with the new features of Kubernetes and Velero. For more information, see issue https://github.com/vmware-tanzu/velero/issues/9053 for more information.
|
||||
To configure a selected-node, create a config map in the Velero namespace like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
# any name can be used; Velero uses the labels (below)
|
||||
# to identify it rather than the name
|
||||
name: change-pvc-node-selector-config
|
||||
# must be in the velero namespace
|
||||
namespace: velero
|
||||
# the below labels should be used verbatim in your
|
||||
# ConfigMap.
|
||||
labels:
|
||||
# this value-less label identifies the ConfigMap as
|
||||
# config for a plugin (i.e. the built-in restore item action plugin)
|
||||
velero.io/plugin-config: ""
|
||||
# this label identifies the name and kind of plugin
|
||||
# that this ConfigMap is for.
|
||||
velero.io/change-pvc-node-selector: RestoreItemAction
|
||||
data:
|
||||
# add 1+ key-value pairs here, where the key is the old
|
||||
# node name and the value is the new node name.
|
||||
<old-node-name>: <new-node-name>
|
||||
```
|
||||
For more information of how this selected-node annotation matters to PVC restore, see issue https://github.com/vmware-tanzu/velero/issues/9053.
|
||||
|
||||
## Restoring into a different namespace
|
||||
|
||||
|
||||
Reference in New Issue
Block a user