mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-03-26 11:35:04 +00:00
Compare commits
62 Commits
main
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33b1fde8e1 | ||
|
|
525036bc69 | ||
|
|
974c465d0a | ||
|
|
7da042a053 | ||
|
|
ca628ccc44 | ||
|
|
6055bd5478 | ||
|
|
f7890d3c59 | ||
|
|
a83ab21a9a | ||
|
|
79f0e72fde | ||
|
|
c5bca75f17 | ||
|
|
fcdbc7cfa8 | ||
|
|
2b87a2306e | ||
|
|
c239b27bf2 | ||
|
|
6ba0f86586 | ||
|
|
6dfd8c96d0 | ||
|
|
336e8c4b56 | ||
|
|
883befcdde | ||
|
|
7cfd4af733 | ||
|
|
4cc1779fec | ||
|
|
ce2b4c191f | ||
|
|
1e6f02dc24 | ||
|
|
e2bbace03b | ||
|
|
341597f542 | ||
|
|
ea97ef8279 | ||
|
|
384a492aa2 | ||
|
|
c3237addfe | ||
|
|
e4774b32f3 | ||
|
|
ac73e8f29d | ||
|
|
ea2c4f4e5c | ||
|
|
2c0fddc498 | ||
|
|
eac69375c9 | ||
|
|
733b2eb6f5 | ||
|
|
01bd153968 | ||
|
|
57892169a9 | ||
|
|
072dc4c610 | ||
|
|
77c60589d6 | ||
|
|
d0cea53676 | ||
|
|
9a39cbfbf5 | ||
|
|
62a24ece50 | ||
|
|
b85a8f6784 | ||
|
|
d39285be32 | ||
|
|
c30164c355 | ||
|
|
ce0888ee44 | ||
|
|
8682cdd36e | ||
|
|
c87e8acbf4 | ||
|
|
6adcf06b5b | ||
|
|
ffa65605a6 | ||
|
|
bd8dfe9ee2 | ||
|
|
54783fbe28 | ||
|
|
cb5f56265a | ||
|
|
0c7b89a44e | ||
|
|
aa89713559 | ||
|
|
5db4c65a92 | ||
|
|
87db850f66 | ||
|
|
c7631fc4a4 | ||
|
|
9a37478cc2 | ||
|
|
5b54ccd2e0 | ||
|
|
43b926a58b | ||
|
|
9bfc78e769 | ||
|
|
c9e26256fa | ||
|
|
6e315c32e2 | ||
|
|
91cbc40956 |
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.7-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.7-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -73,7 +73,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.104
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.7-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.25 as tilt-helper
|
||||
FROM golang:1.25.7 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Support all glob wildcard characters in namespace validation
|
||||
@@ -1 +0,0 @@
|
||||
Fix VolumePolicy PVC phase condition filter for unbound PVCs (#9507)
|
||||
@@ -1 +0,0 @@
|
||||
Issue #9544: Add test coverage for S3 bucket name in MRAP ARN notation and fix bucket validation to accept ARN format
|
||||
@@ -1 +0,0 @@
|
||||
Add schedule_expected_interval_seconds metric for dynamic backup alerting thresholds (#9559)
|
||||
@@ -1 +0,0 @@
|
||||
Implement original VolumeSnapshotContent deletion for legacy backups
|
||||
1
changelogs/unreleased/9629-sseago
Normal file
1
changelogs/unreleased/9629-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Optimize VSC handle readiness polling for VSS backups
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9636, fix configmap lookup in non-default namespaces
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9641, Remove redundant ReadyToUse polling in CSI VolumeSnapshotContent delete plugin
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.25.0
|
||||
go 1.25.7
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.57.2
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.25-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.25.7-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
@@ -26,11 +27,14 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
@@ -67,7 +71,7 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
// So skip deleting VolumeSnapshotContent not have the backup name
|
||||
// in its labels.
|
||||
if !kubeutil.HasBackupLabel(&snapCont.ObjectMeta, input.Backup.Name) {
|
||||
p.log.Infof(
|
||||
p.log.Info(
|
||||
"VolumeSnapshotContent %s was not taken by backup %s, skipping deletion",
|
||||
snapCont.Name,
|
||||
input.Backup.Name,
|
||||
@@ -77,17 +81,6 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
|
||||
p.log.Infof("Deleting VolumeSnapshotContent %s", snapCont.Name)
|
||||
|
||||
// Try to delete the original VSC from the cluster first.
|
||||
// This handles legacy (pre-1.15) backups where the original VSC
|
||||
// with DeletionPolicy=Retain still exists in the cluster.
|
||||
originalVSCName := snapCont.Name
|
||||
if cleaned := p.tryDeleteOriginalVSC(context.TODO(), originalVSCName); cleaned {
|
||||
p.log.Infof("Successfully deleted original VolumeSnapshotContent %s from cluster, skipping temp VSC creation", originalVSCName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a temp VSC to trigger cloud snapshot deletion
|
||||
// (for backups where the original VSC no longer exists in cluster)
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
p.log.WithError(err).Errorf("Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
@@ -121,62 +114,71 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
p.log.Infof("Created temp VolumeSnapshotContent %s with DeletionPolicy=Delete to trigger cloud snapshot cleanup", snapCont.Name)
|
||||
|
||||
// Delete the temp VSC immediately to trigger cloud snapshot removal.
|
||||
// The CSI driver will handle the actual cloud snapshot deletion.
|
||||
// Read resource timeout from backup annotation, if not set, use default value.
|
||||
timeout, err := time.ParseDuration(
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation])
|
||||
if err != nil {
|
||||
p.log.Warnf("fail to parse resource timeout annotation %s: %s",
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation], err.Error())
|
||||
timeout = 10 * time.Minute
|
||||
}
|
||||
p.log.Debugf("resource timeout is set to %s", timeout.String())
|
||||
|
||||
interval := 5 * time.Second
|
||||
|
||||
// Wait until VSC created and ReadyToUse is true.
|
||||
if err := wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
return checkVSCReadiness(ctx, &snapCont, p.crClient)
|
||||
},
|
||||
); err != nil {
|
||||
// Clean up the VSC we created since it can't become ready
|
||||
if deleteErr := p.crClient.Delete(context.TODO(), &snapCont); deleteErr != nil && !apierrors.IsNotFound(deleteErr) {
|
||||
p.log.WithError(deleteErr).Errorf("Failed to clean up VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
return errors.Wrapf(err, "fail to wait VolumeSnapshotContent %s becomes ready.", snapCont.Name)
|
||||
}
|
||||
|
||||
if err := p.crClient.Delete(
|
||||
context.TODO(),
|
||||
&snapCont,
|
||||
); err != nil && !apierrors.IsNotFound(err) {
|
||||
p.log.WithError(err).Errorf("Failed to delete temp VolumeSnapshotContent %s", snapCont.Name)
|
||||
p.log.Infof("VolumeSnapshotContent %s not found", snapCont.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
p.log.Infof("Successfully triggered deletion of VolumeSnapshotContent %s and its cloud snapshot", snapCont.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// tryDeleteOriginalVSC attempts to find and delete the original VSC from
|
||||
// the cluster (legacy pre-1.15 backups). It patches the DeletionPolicy to
|
||||
// Delete so the CSI driver also removes the cloud snapshot, then deletes
|
||||
// the VSC object itself.
|
||||
// Returns true if the original VSC was found and deletion was initiated.
|
||||
func (p *volumeSnapshotContentDeleteItemAction) tryDeleteOriginalVSC(
|
||||
var checkVSCReadiness = func(
|
||||
ctx context.Context,
|
||||
vscName string,
|
||||
) bool {
|
||||
existing := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := p.crClient.Get(ctx, crclient.ObjectKey{Name: vscName}, existing); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
p.log.Debugf("Original VolumeSnapshotContent %s not found in cluster, will use temp VSC flow", vscName)
|
||||
} else {
|
||||
p.log.WithError(err).Warnf("Error looking up original VolumeSnapshotContent %s, will use temp VSC flow", vscName)
|
||||
}
|
||||
return false
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
tmpVSC := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := client.Get(ctx, crclient.ObjectKeyFromObject(vsc), tmpVSC); err != nil {
|
||||
return false, errors.Wrapf(
|
||||
err, "failed to get VolumeSnapshotContent %s", vsc.Name,
|
||||
)
|
||||
}
|
||||
|
||||
p.log.Debugf("Found original VolumeSnapshotContent %s in cluster (legacy backup), cleaning up directly", vscName)
|
||||
|
||||
// Patch DeletionPolicy to Delete so the CSI driver removes the cloud snapshot
|
||||
if existing.Spec.DeletionPolicy != snapshotv1api.VolumeSnapshotContentDelete {
|
||||
original := existing.DeepCopy()
|
||||
existing.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentDelete
|
||||
if err := p.crClient.Patch(ctx, existing, crclient.MergeFrom(original)); err != nil {
|
||||
p.log.WithError(err).Warnf("Failed to patch DeletionPolicy on original VSC %s, will use temp VSC flow", vscName)
|
||||
return false
|
||||
}
|
||||
p.log.Debugf("Patched DeletionPolicy to Delete on original VolumeSnapshotContent %s", vscName)
|
||||
if tmpVSC.Status != nil && boolptr.IsSetToTrue(tmpVSC.Status.ReadyToUse) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Delete the original VSC — the CSI driver will clean up the cloud snapshot
|
||||
if err := p.crClient.Delete(ctx, existing); err != nil && !apierrors.IsNotFound(err) {
|
||||
p.log.WithError(err).Warnf("Failed to delete original VolumeSnapshotContent %s, will use temp VSC flow", vscName)
|
||||
return false
|
||||
// Fail fast on permanent CSI driver errors (e.g., InvalidSnapshot.NotFound)
|
||||
if tmpVSC.Status != nil && tmpVSC.Status.Error != nil && tmpVSC.Status.Error.Message != nil {
|
||||
return false, errors.Errorf(
|
||||
"VolumeSnapshotContent %s has error: %s", vsc.Name, *tmpVSC.Status.Error.Message,
|
||||
)
|
||||
}
|
||||
|
||||
p.log.Infof("Deleted original VolumeSnapshotContent %s with DeletionPolicy=Delete, CSI driver will remove cloud snapshot", vscName)
|
||||
return true
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotContentDeleteItemAction(
|
||||
|
||||
@@ -22,10 +22,9 @@ import (
|
||||
"testing"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -38,44 +37,19 @@ import (
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
// fakeClientWithErrors wraps a real client and injects errors for specific operations.
|
||||
type fakeClientWithErrors struct {
|
||||
crclient.Client
|
||||
getError error
|
||||
patchError error
|
||||
deleteError error
|
||||
}
|
||||
|
||||
func (c *fakeClientWithErrors) Get(ctx context.Context, key crclient.ObjectKey, obj crclient.Object, opts ...crclient.GetOption) error {
|
||||
if c.getError != nil {
|
||||
return c.getError
|
||||
}
|
||||
return c.Client.Get(ctx, key, obj, opts...)
|
||||
}
|
||||
|
||||
func (c *fakeClientWithErrors) Patch(ctx context.Context, obj crclient.Object, patch crclient.Patch, opts ...crclient.PatchOption) error {
|
||||
if c.patchError != nil {
|
||||
return c.patchError
|
||||
}
|
||||
return c.Client.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
|
||||
func (c *fakeClientWithErrors) Delete(ctx context.Context, obj crclient.Object, opts ...crclient.DeleteOption) error {
|
||||
if c.deleteError != nil {
|
||||
return c.deleteError
|
||||
}
|
||||
return c.Client.Delete(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
func TestVSCExecute(t *testing.T) {
|
||||
snapshotHandleStr := "test"
|
||||
tests := []struct {
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
preExistingVSC *snapshotv1api.VolumeSnapshotContent
|
||||
expectErr bool
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
function func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error)
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumeSnapshotContent doesn't have backup label",
|
||||
@@ -97,22 +71,40 @@ func TestVSCExecute(t *testing.T) {
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).VolumeSnapshotClassName("volumesnapshotclass").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: false,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return true, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Original VSC exists in cluster, cleaned up directly",
|
||||
name: "Error case, deletion fails",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
preExistingVSC: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{SnapshotHandle: stringPtr("snap-123")},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{Name: "vs-1", Namespace: "default"},
|
||||
},
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return false, errors.Errorf("test error case")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error case with CSI error, dangling VSC should be cleaned up",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return false, errors.Errorf("VolumeSnapshotContent %s has error: InvalidSnapshot.NotFound", vsc.Name)
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -121,10 +113,7 @@ func TestVSCExecute(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
logger := logrus.StandardLogger()
|
||||
|
||||
if test.preExistingVSC != nil {
|
||||
require.NoError(t, crClient.Create(t.Context(), test.preExistingVSC))
|
||||
}
|
||||
checkVSCReadiness = test.function
|
||||
|
||||
p := volumeSnapshotContentDeleteItemAction{log: logger, crClient: crClient}
|
||||
|
||||
@@ -182,147 +171,72 @@ func TestNewVolumeSnapshotContentDeleteItemAction(t *testing.T) {
|
||||
require.NoError(t, err1)
|
||||
}
|
||||
|
||||
func TestTryDeleteOriginalVSC(t *testing.T) {
|
||||
func TestCheckVSCReadiness(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
vscName string
|
||||
existing *snapshotv1api.VolumeSnapshotContent
|
||||
createIt bool
|
||||
expectRet bool
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
createVSC bool
|
||||
expectErr bool
|
||||
ready bool
|
||||
}{
|
||||
{
|
||||
name: "VSC not found in cluster, returns false",
|
||||
vscName: "not-found",
|
||||
expectRet: false,
|
||||
name: "VSC not exist",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
createVSC: false,
|
||||
expectErr: true,
|
||||
ready: false,
|
||||
},
|
||||
{
|
||||
name: "VSC found with Retain policy, patches and deletes",
|
||||
vscName: "legacy-vsc",
|
||||
existing: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "legacy-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: stringPtr("snap-123"),
|
||||
},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: "vs-1",
|
||||
Namespace: "default",
|
||||
name: "VSC not ready",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
createVSC: true,
|
||||
expectErr: false,
|
||||
ready: false,
|
||||
},
|
||||
{
|
||||
name: "VSC has error from CSI driver",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotContentStatus{
|
||||
ReadyToUse: boolPtr(false),
|
||||
Error: &snapshotv1api.VolumeSnapshotError{
|
||||
Message: stringPtr("InvalidSnapshot.NotFound: The snapshot 'snap-0abc123' does not exist."),
|
||||
},
|
||||
},
|
||||
},
|
||||
createIt: true,
|
||||
expectRet: true,
|
||||
},
|
||||
{
|
||||
name: "VSC found with Delete policy already, just deletes",
|
||||
vscName: "already-delete-vsc",
|
||||
existing: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "already-delete-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentDelete,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: stringPtr("snap-456"),
|
||||
},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: "vs-2",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
createIt: true,
|
||||
expectRet: true,
|
||||
createVSC: true,
|
||||
expectErr: true,
|
||||
ready: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
logger := logrus.StandardLogger()
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logger,
|
||||
crClient: crClient,
|
||||
if test.createVSC {
|
||||
require.NoError(t, crClient.Create(t.Context(), test.vsc))
|
||||
}
|
||||
|
||||
if test.createIt && test.existing != nil {
|
||||
require.NoError(t, crClient.Create(t.Context(), test.existing))
|
||||
}
|
||||
|
||||
result := p.tryDeleteOriginalVSC(t.Context(), test.vscName)
|
||||
require.Equal(t, test.expectRet, result)
|
||||
|
||||
// If cleanup succeeded, verify the VSC is gone
|
||||
if test.expectRet {
|
||||
err := crClient.Get(t.Context(), crclient.ObjectKey{Name: test.vscName},
|
||||
&snapshotv1api.VolumeSnapshotContent{})
|
||||
require.True(t, apierrors.IsNotFound(err),
|
||||
"VSC should have been deleted from cluster")
|
||||
ready, err := checkVSCReadiness(t.Context(), test.vsc, crClient)
|
||||
require.Equal(t, test.ready, ready)
|
||||
if test.expectErr {
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Error injection tests for tryDeleteOriginalVSC
|
||||
t.Run("Get returns non-NotFound error, returns false", func(t *testing.T) {
|
||||
errClient := &fakeClientWithErrors{
|
||||
Client: velerotest.NewFakeControllerRuntimeClient(t),
|
||||
getError: fmt.Errorf("connection refused"),
|
||||
}
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
crClient: errClient,
|
||||
}
|
||||
require.False(t, p.tryDeleteOriginalVSC(t.Context(), "some-vsc"))
|
||||
})
|
||||
|
||||
t.Run("Patch fails, returns false", func(t *testing.T) {
|
||||
realClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
vsc := &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "patch-fail-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{SnapshotHandle: stringPtr("snap-789")},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{Name: "vs-3", Namespace: "default"},
|
||||
},
|
||||
}
|
||||
require.NoError(t, realClient.Create(t.Context(), vsc))
|
||||
|
||||
errClient := &fakeClientWithErrors{
|
||||
Client: realClient,
|
||||
patchError: fmt.Errorf("patch forbidden"),
|
||||
}
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
crClient: errClient,
|
||||
}
|
||||
require.False(t, p.tryDeleteOriginalVSC(t.Context(), "patch-fail-vsc"))
|
||||
})
|
||||
|
||||
t.Run("Delete fails, returns false", func(t *testing.T) {
|
||||
realClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
vsc := &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "delete-fail-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentDelete,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{SnapshotHandle: stringPtr("snap-999")},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{Name: "vs-4", Namespace: "default"},
|
||||
},
|
||||
}
|
||||
require.NoError(t, realClient.Create(t.Context(), vsc))
|
||||
|
||||
errClient := &fakeClientWithErrors{
|
||||
Client: realClient,
|
||||
deleteError: fmt.Errorf("delete forbidden"),
|
||||
}
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
crClient: errClient,
|
||||
}
|
||||
require.False(t, p.tryDeleteOriginalVSC(t.Context(), "delete-fail-vsc"))
|
||||
})
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
|
||||
@@ -81,6 +81,7 @@ type Options struct {
|
||||
DefaultVolumesToFsBackup bool
|
||||
UploaderType string
|
||||
DefaultSnapshotMoveData bool
|
||||
CSISnapshotEarlyFrequentPolling bool
|
||||
DisableInformerCache bool
|
||||
ScheduleSkipImmediately bool
|
||||
PodResources kubeutil.PodResources
|
||||
@@ -141,6 +142,7 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", o.DefaultVolumesToFsBackup, "Bool flag to configure Velero server to use pod volume file system backup by default for all volumes on all backups. Optional.")
|
||||
flags.StringVar(&o.UploaderType, "uploader-type", o.UploaderType, fmt.Sprintf("The type of uploader to transfer the data of pod volumes, supported value: '%s'", uploader.KopiaType))
|
||||
flags.BoolVar(&o.DefaultSnapshotMoveData, "default-snapshot-move-data", o.DefaultSnapshotMoveData, "Bool flag to configure Velero server to move data by default for all snapshots supporting data movement. Optional.")
|
||||
flags.BoolVar(&o.CSISnapshotEarlyFrequentPolling, "csi-snapshot-early-frequent-polling", o.CSISnapshotEarlyFrequentPolling, "Bool flag to configure Velero server to use early frequent polling by default for all CSI snapshots. Optional.")
|
||||
flags.BoolVar(&o.DisableInformerCache, "disable-informer-cache", o.DisableInformerCache, "Disable informer cache for Get calls on restore. With this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable). Optional.")
|
||||
flags.BoolVar(&o.ScheduleSkipImmediately, "schedule-skip-immediately", o.ScheduleSkipImmediately, "Skip the first scheduled backup immediately after creating a schedule. Default is false (don't skip).")
|
||||
flags.BoolVar(&o.NodeAgentDisableHostPath, "node-agent-disable-host-path", o.NodeAgentDisableHostPath, "Don't mount the pod volume host path to node-agent. Optional. Pod volume host path mount is required by fs-backup but could be disabled for other backup methods.")
|
||||
@@ -238,16 +240,17 @@ func NewInstallOptions() *Options {
|
||||
NodeAgentPodCPULimit: install.DefaultNodeAgentPodCPULimit,
|
||||
NodeAgentPodMemLimit: install.DefaultNodeAgentPodMemLimit,
|
||||
// Default to creating a VSL unless we're told otherwise
|
||||
UseVolumeSnapshots: true,
|
||||
NoDefaultBackupLocation: false,
|
||||
CRDsOnly: false,
|
||||
DefaultVolumesToFsBackup: false,
|
||||
UploaderType: uploader.KopiaType,
|
||||
DefaultSnapshotMoveData: false,
|
||||
DisableInformerCache: false,
|
||||
ScheduleSkipImmediately: false,
|
||||
kubeletRootDir: install.DefaultKubeletRootDir,
|
||||
NodeAgentDisableHostPath: false,
|
||||
UseVolumeSnapshots: true,
|
||||
NoDefaultBackupLocation: false,
|
||||
CRDsOnly: false,
|
||||
DefaultVolumesToFsBackup: false,
|
||||
UploaderType: uploader.KopiaType,
|
||||
DefaultSnapshotMoveData: false,
|
||||
CSISnapshotEarlyFrequentPolling: false,
|
||||
DisableInformerCache: false,
|
||||
ScheduleSkipImmediately: false,
|
||||
kubeletRootDir: install.DefaultKubeletRootDir,
|
||||
NodeAgentDisableHostPath: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,6 +327,7 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
|
||||
DefaultVolumesToFsBackup: o.DefaultVolumesToFsBackup,
|
||||
UploaderType: o.UploaderType,
|
||||
DefaultSnapshotMoveData: o.DefaultSnapshotMoveData,
|
||||
CSISnapshotEarlyFrequentPolling: o.CSISnapshotEarlyFrequentPolling,
|
||||
DisableInformerCache: o.DisableInformerCache,
|
||||
ScheduleSkipImmediately: o.ScheduleSkipImmediately,
|
||||
PodResources: o.PodResources,
|
||||
@@ -381,8 +385,8 @@ This is useful as a starting point for more customized installations.
|
||||
|
||||
# velero install --provider azure --plugins velero/velero-plugin-for-microsoft-azure:v1.0.0 --bucket $BLOB_CONTAINER --secret-file ./credentials-velero --backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] --snapshot-location-config apiTimeout=<YOUR_TIMEOUT>[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID]`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
cmd.CheckError(o.Complete(args, f))
|
||||
cmd.CheckError(o.Validate(c, args, f))
|
||||
cmd.CheckError(o.Complete(args, f))
|
||||
cmd.CheckError(o.Run(c, f))
|
||||
},
|
||||
}
|
||||
|
||||
@@ -17,18 +17,11 @@ limitations under the License.
|
||||
package install
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
factorymocks "github.com/vmware-tanzu/velero/pkg/client/mocks"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
func TestPriorityClassNameFlag(t *testing.T) {
|
||||
@@ -98,168 +91,3 @@ func TestPriorityClassNameFlag(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// makeValidateCmd returns a minimal *cobra.Command that satisfies output.ValidateFlags.
|
||||
func makeValidateCmd() *cobra.Command {
|
||||
c := &cobra.Command{}
|
||||
// output.ValidateFlags only inspects the "output" flag; add it so validation passes.
|
||||
c.Flags().StringP("output", "o", "", "output format")
|
||||
return c
|
||||
}
|
||||
|
||||
// configMapInNamespace builds a ConfigMap with a single JSON data entry in the given namespace.
|
||||
func configMapInNamespace(namespace, name, jsonValue string) *corev1api.ConfigMap {
|
||||
return &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"config": jsonValue,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidateConfigMapsUseFactoryNamespace verifies that Validate resolves the target
|
||||
// namespace correctly for all three ConfigMap flags.
|
||||
//
|
||||
// The fix (Option B) calls Complete before Validate in NewCommand so that o.Namespace is
|
||||
// populated from f.Namespace() before VerifyJSONConfigs runs. Tests mirror that order by
|
||||
// calling Complete before Validate.
|
||||
func TestValidateConfigMapsUseFactoryNamespace(t *testing.T) {
|
||||
const targetNS = "tenant-b"
|
||||
const defaultNS = "default"
|
||||
|
||||
// Shared options that satisfy every other validation gate:
|
||||
// - NoDefaultBackupLocation=true + UseVolumeSnapshots=false skips provider/bucket/plugins checks
|
||||
// - NoSecret=true satisfies the secret-file check
|
||||
baseOptions := func() *Options {
|
||||
o := NewInstallOptions()
|
||||
o.NoDefaultBackupLocation = true
|
||||
o.UseVolumeSnapshots = false
|
||||
o.NoSecret = true
|
||||
return o
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupOpts func(o *Options, cmName string)
|
||||
cmJSON string
|
||||
wantErrMsg string // substring expected in error; empty means success
|
||||
}{
|
||||
{
|
||||
name: "NodeAgentConfigMap found in factory namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.NodeAgentConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
},
|
||||
{
|
||||
name: "NodeAgentConfigMap not found when only in default namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.NodeAgentConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
wantErrMsg: "--node-agent-configmap specified ConfigMap",
|
||||
},
|
||||
{
|
||||
name: "RepoMaintenanceJobConfigMap found in factory namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.RepoMaintenanceJobConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
},
|
||||
{
|
||||
name: "RepoMaintenanceJobConfigMap not found when only in default namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.RepoMaintenanceJobConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
wantErrMsg: "--repo-maintenance-job-configmap specified ConfigMap",
|
||||
},
|
||||
{
|
||||
name: "BackupRepoConfigMap found in factory namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.BackupRepoConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
},
|
||||
{
|
||||
name: "BackupRepoConfigMap not found when only in default namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.BackupRepoConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
wantErrMsg: "--backup-repository-configmap specified ConfigMap",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const cmName = "my-config"
|
||||
|
||||
// Decide where to place the ConfigMap:
|
||||
// "not found" cases put it in "default", so the factory namespace lookup misses it.
|
||||
cmNamespace := targetNS
|
||||
if tc.wantErrMsg != "" {
|
||||
cmNamespace = defaultNS
|
||||
}
|
||||
|
||||
cm := configMapInNamespace(cmNamespace, cmName, tc.cmJSON)
|
||||
kbClient := velerotest.NewFakeControllerRuntimeClient(t, cm)
|
||||
|
||||
f := &factorymocks.Factory{}
|
||||
f.On("Namespace").Return(targetNS)
|
||||
f.On("KubebuilderClient").Return(kbClient, nil)
|
||||
|
||||
o := baseOptions()
|
||||
tc.setupOpts(o, cmName)
|
||||
|
||||
// Mirror the NewCommand call order: Complete populates o.Namespace before Validate runs.
|
||||
require.NoError(t, o.Complete([]string{}, f))
|
||||
|
||||
c := makeValidateCmd()
|
||||
c.SetContext(context.Background())
|
||||
|
||||
err := o.Validate(c, []string{}, f)
|
||||
|
||||
if tc.wantErrMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tc.wantErrMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewCommandRunClosureOrder covers the Run closure in NewCommand (the lines that were
|
||||
// reordered by the fix: Complete → Validate → Run).
|
||||
//
|
||||
// The closure uses CheckError which calls os.Exit on any error, so the only safe path is one
|
||||
// where all three steps return nil. DryRun=true causes o.Run to return after PrintWithFormat
|
||||
// (which is a no-op when no --output flag is set) without touching any cluster clients.
|
||||
func TestNewCommandRunClosureOrder(t *testing.T) {
|
||||
const targetNS = "tenant-b"
|
||||
const cmName = "my-config"
|
||||
|
||||
cm := configMapInNamespace(targetNS, cmName, `{}`)
|
||||
kbClient := velerotest.NewFakeControllerRuntimeClient(t, cm)
|
||||
|
||||
f := &factorymocks.Factory{}
|
||||
f.On("Namespace").Return(targetNS)
|
||||
f.On("KubebuilderClient").Return(kbClient, nil)
|
||||
|
||||
c := NewCommand(f)
|
||||
c.SetArgs([]string{
|
||||
"--no-default-backup-location",
|
||||
"--use-volume-snapshots=false",
|
||||
"--no-secret",
|
||||
"--dry-run",
|
||||
"--node-agent-configmap", cmName,
|
||||
})
|
||||
|
||||
// Execute drives the full Run closure: Complete populates o.Namespace, Validate
|
||||
// looks up the ConfigMap in targetNS (succeeds), Run returns early via DryRun.
|
||||
require.NoError(t, c.Execute())
|
||||
}
|
||||
|
||||
@@ -129,13 +129,6 @@ func (c *scheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
|
||||
} else {
|
||||
schedule.Status.Phase = velerov1.SchedulePhaseEnabled
|
||||
schedule.Status.ValidationErrors = nil
|
||||
|
||||
// Compute expected interval between consecutive scheduled backup runs.
|
||||
// Only meaningful when the cron expression is valid.
|
||||
now := c.clock.Now()
|
||||
nextRun := cronSchedule.Next(now)
|
||||
nextNextRun := cronSchedule.Next(nextRun)
|
||||
c.metrics.SetScheduleExpectedIntervalSeconds(schedule.Name, nextNextRun.Sub(nextRun).Seconds())
|
||||
}
|
||||
|
||||
scheduleNeedsPatch := false
|
||||
|
||||
@@ -50,6 +50,7 @@ type podTemplateConfig struct {
|
||||
serviceAccountName string
|
||||
uploaderType string
|
||||
defaultSnapshotMoveData bool
|
||||
csiSnapshotEarlyFrequentPolling bool
|
||||
privilegedNodeAgent bool
|
||||
disableInformerCache bool
|
||||
scheduleSkipImmediately bool
|
||||
@@ -166,6 +167,12 @@ func WithDefaultSnapshotMoveData(b bool) podTemplateOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithCSISnapshotEarlyFrequentPolling(b bool) podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.csiSnapshotEarlyFrequentPolling = b
|
||||
}
|
||||
}
|
||||
|
||||
func WithDisableInformerCache(b bool) podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.disableInformerCache = b
|
||||
@@ -488,6 +495,15 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1api.Deployme
|
||||
}...)
|
||||
}
|
||||
|
||||
if c.csiSnapshotEarlyFrequentPolling {
|
||||
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, []corev1api.EnvVar{
|
||||
{
|
||||
Name: "CSI_SNAPSHOT_EARLY_FREQUENT_POLLING",
|
||||
Value: "true",
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, c.envVars...)
|
||||
|
||||
if len(c.plugins) > 0 {
|
||||
|
||||
@@ -263,6 +263,7 @@ type VeleroOptions struct {
|
||||
DefaultVolumesToFsBackup bool
|
||||
UploaderType string
|
||||
DefaultSnapshotMoveData bool
|
||||
CSISnapshotEarlyFrequentPolling bool
|
||||
DisableInformerCache bool
|
||||
ScheduleSkipImmediately bool
|
||||
PodResources kube.PodResources
|
||||
@@ -390,6 +391,10 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
|
||||
deployOpts = append(deployOpts, WithDefaultSnapshotMoveData(true))
|
||||
}
|
||||
|
||||
if o.CSISnapshotEarlyFrequentPolling {
|
||||
deployOpts = append(deployOpts, WithCSISnapshotEarlyFrequentPolling(true))
|
||||
}
|
||||
|
||||
if o.DisableInformerCache {
|
||||
deployOpts = append(deployOpts, WithDisableInformerCache(true))
|
||||
}
|
||||
|
||||
@@ -80,9 +80,6 @@ const (
|
||||
DataDownloadFailureTotal = "data_download_failure_total"
|
||||
DataDownloadCancelTotal = "data_download_cancel_total"
|
||||
|
||||
// schedule metrics
|
||||
scheduleExpectedIntervalSeconds = "schedule_expected_interval_seconds"
|
||||
|
||||
// repo maintenance metrics
|
||||
repoMaintenanceSuccessTotal = "repo_maintenance_success_total"
|
||||
repoMaintenanceFailureTotal = "repo_maintenance_failure_total"
|
||||
@@ -350,14 +347,6 @@ func NewServerMetrics() *ServerMetrics {
|
||||
},
|
||||
[]string{scheduleLabel, backupNameLabel},
|
||||
),
|
||||
scheduleExpectedIntervalSeconds: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: scheduleExpectedIntervalSeconds,
|
||||
Help: "Expected interval between consecutive scheduled backups, in seconds",
|
||||
},
|
||||
[]string{scheduleLabel},
|
||||
),
|
||||
repoMaintenanceSuccessTotal: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
@@ -655,9 +644,6 @@ func (m *ServerMetrics) RemoveSchedule(scheduleName string) {
|
||||
if c, ok := m.metrics[csiSnapshotFailureTotal].(*prometheus.CounterVec); ok {
|
||||
c.DeleteLabelValues(scheduleName, "")
|
||||
}
|
||||
if g, ok := m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec); ok {
|
||||
g.DeleteLabelValues(scheduleName)
|
||||
}
|
||||
}
|
||||
|
||||
// InitMetricsForNode initializes counter metrics for a node.
|
||||
@@ -772,14 +758,6 @@ func (m *ServerMetrics) SetBackupLastSuccessfulTimestamp(backupSchedule string,
|
||||
}
|
||||
}
|
||||
|
||||
// SetScheduleExpectedIntervalSeconds records the expected interval in seconds,
|
||||
// between consecutive backups for a schedule.
|
||||
func (m *ServerMetrics) SetScheduleExpectedIntervalSeconds(scheduleName string, seconds float64) {
|
||||
if g, ok := m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec); ok {
|
||||
g.WithLabelValues(scheduleName).Set(seconds)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBackupTotal records the current number of existent backups.
|
||||
func (m *ServerMetrics) SetBackupTotal(numberOfBackups int64) {
|
||||
if g, ok := m.metrics[backupTotal].(prometheus.Gauge); ok {
|
||||
|
||||
@@ -259,90 +259,6 @@ func TestMultipleAdhocBackupsShareMetrics(t *testing.T) {
|
||||
assert.Equal(t, float64(1), validationFailureMetric, "All adhoc validation failures should be counted together")
|
||||
}
|
||||
|
||||
// TestSetScheduleExpectedIntervalSeconds verifies that the expected interval metric
|
||||
// is properly recorded for schedules.
|
||||
func TestSetScheduleExpectedIntervalSeconds(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scheduleName string
|
||||
intervalSeconds float64
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "every 5 minutes schedule",
|
||||
scheduleName: "frequent-backup",
|
||||
intervalSeconds: 300,
|
||||
description: "Expected interval should be 5m in seconds",
|
||||
},
|
||||
{
|
||||
name: "daily schedule",
|
||||
scheduleName: "daily-backup",
|
||||
intervalSeconds: 86400,
|
||||
description: "Expected interval should be 24h in seconds",
|
||||
},
|
||||
{
|
||||
name: "monthly schedule",
|
||||
scheduleName: "monthly-backup",
|
||||
intervalSeconds: 2678400, // 31 days in seconds
|
||||
description: "Expected interval should be 31 days in seconds",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := NewServerMetrics()
|
||||
m.SetScheduleExpectedIntervalSeconds(tc.scheduleName, tc.intervalSeconds)
|
||||
|
||||
metric := getMetricValue(t, m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec), tc.scheduleName)
|
||||
assert.Equal(t, tc.intervalSeconds, metric, tc.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestScheduleExpectedIntervalNotInitializedByDefault verifies that the expected
|
||||
// interval metric is not initialized by InitSchedule, so it only appears for
|
||||
// schedules with a valid cron expression.
|
||||
func TestScheduleExpectedIntervalNotInitializedByDefault(t *testing.T) {
|
||||
m := NewServerMetrics()
|
||||
m.InitSchedule("test-schedule")
|
||||
|
||||
// The metric should not have any values after InitSchedule
|
||||
ch := make(chan prometheus.Metric, 1)
|
||||
m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec).Collect(ch)
|
||||
close(ch)
|
||||
|
||||
count := 0
|
||||
for range ch {
|
||||
count++
|
||||
}
|
||||
assert.Equal(t, 0, count, "scheduleExpectedIntervalSeconds should not be initialized by InitSchedule")
|
||||
}
|
||||
|
||||
// TestRemoveScheduleCleansUpExpectedInterval verifies that RemoveSchedule
|
||||
// cleans up the expected interval metric.
|
||||
func TestRemoveScheduleCleansUpExpectedInterval(t *testing.T) {
|
||||
m := NewServerMetrics()
|
||||
m.InitSchedule("test-schedule")
|
||||
m.SetScheduleExpectedIntervalSeconds("test-schedule", 3600)
|
||||
|
||||
// Verify metric exists
|
||||
metric := getMetricValue(t, m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec), "test-schedule")
|
||||
assert.Equal(t, float64(3600), metric)
|
||||
|
||||
// Remove schedule and verify metric is cleaned up
|
||||
m.RemoveSchedule("test-schedule")
|
||||
|
||||
ch := make(chan prometheus.Metric, 1)
|
||||
m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec).Collect(ch)
|
||||
close(ch)
|
||||
|
||||
count := 0
|
||||
for range ch {
|
||||
count++
|
||||
}
|
||||
assert.Equal(t, 0, count, "scheduleExpectedIntervalSeconds should be removed after RemoveSchedule")
|
||||
}
|
||||
|
||||
// TestInitScheduleWithEmptyName verifies that InitSchedule works correctly
|
||||
// with an empty schedule name (for adhoc backups).
|
||||
func TestInitScheduleWithEmptyName(t *testing.T) {
|
||||
|
||||
@@ -149,8 +149,7 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
|
||||
// if there are any slashes in the middle of 'bucket', the user
|
||||
// probably put <bucket>/<prefix> in the bucket field, which we
|
||||
// don't support.
|
||||
// Exception: MRAP ARNs (arn:aws:s3::...) legitimately contain slashes.
|
||||
if strings.Contains(bucket, "/") && !strings.HasPrefix(bucket, "arn:aws:s3:") {
|
||||
if strings.Contains(bucket, "/") {
|
||||
return nil, errors.Errorf("backup storage location's bucket name %q must not contain a '/' (if using a prefix, put it in the 'Prefix' field instead)", location.Spec.ObjectStorage.Bucket)
|
||||
}
|
||||
|
||||
|
||||
@@ -943,24 +943,6 @@ func TestNewObjectBackupStoreGetter(t *testing.T) {
|
||||
wantBucket: "bucket",
|
||||
wantPrefix: "prefix/",
|
||||
},
|
||||
{
|
||||
name: "when the Bucket field is an MRAP ARN, it should be valid",
|
||||
location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap").Result(),
|
||||
objectStoreGetter: objectStoreGetter{
|
||||
"provider-1": newInMemoryObjectStore("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap"),
|
||||
},
|
||||
credFileStore: velerotest.NewFakeCredentialsFileStore("", nil),
|
||||
wantBucket: "arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap",
|
||||
},
|
||||
{
|
||||
name: "when the Bucket field is an MRAP ARN with trailing slash, it should be valid and trimmed",
|
||||
location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap/").Result(),
|
||||
objectStoreGetter: objectStoreGetter{
|
||||
"provider-1": newInMemoryObjectStore("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap"),
|
||||
},
|
||||
credFileStore: velerotest.NewFakeCredentialsFileStore("", nil),
|
||||
wantBucket: "arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -598,72 +600,102 @@ func WaitUntilVSCHandleIsReady(
|
||||
log logrus.FieldLogger,
|
||||
csiSnapshotTimeout time.Duration,
|
||||
) (*snapshotv1api.VolumeSnapshotContent, error) {
|
||||
// We'll wait 10m for the VSC to be reconciled polling
|
||||
// every 5s unless backup's csiSnapshotTimeout is set
|
||||
interval := 5 * time.Second
|
||||
// We'll wait for the VSC to be reconciled, trying a fast poll interval first
|
||||
// before falling back to a slower poll interval for the full csiSnapshotTimeout.
|
||||
vsc := new(snapshotv1api.VolumeSnapshotContent)
|
||||
var interval time.Duration
|
||||
|
||||
err := wait.PollUntilContextTimeout(
|
||||
pollFunc := func(ctx context.Context) (bool, error) {
|
||||
vs := new(snapshotv1api.VolumeSnapshot)
|
||||
if err := crClient.Get(
|
||||
ctx,
|
||||
crclient.ObjectKeyFromObject(volSnap),
|
||||
vs,
|
||||
); err != nil {
|
||||
return false,
|
||||
errors.Wrapf(
|
||||
err,
|
||||
"failed to get volumesnapshot %s/%s",
|
||||
volSnap.Namespace, volSnap.Name,
|
||||
)
|
||||
}
|
||||
|
||||
if vs.Status == nil || vs.Status.BoundVolumeSnapshotContentName == nil {
|
||||
log.Infof("Waiting for CSI driver to reconcile volumesnapshot %s/%s. Retrying in %ds",
|
||||
volSnap.Namespace, volSnap.Name, interval/time.Second)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := crClient.Get(
|
||||
ctx,
|
||||
crclient.ObjectKey{
|
||||
Name: *vs.Status.BoundVolumeSnapshotContentName,
|
||||
},
|
||||
vsc,
|
||||
); err != nil {
|
||||
return false,
|
||||
errors.Wrapf(
|
||||
err,
|
||||
"failed to get VolumeSnapshotContent %s for VolumeSnapshot %s/%s",
|
||||
*vs.Status.BoundVolumeSnapshotContentName, vs.Namespace, vs.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// we need to wait for the VolumeSnapshotContent
|
||||
// to have a snapshot handle because during restore,
|
||||
// we'll use that snapshot handle as the source for
|
||||
// the VolumeSnapshotContent so it's statically
|
||||
// bound to the existing snapshot.
|
||||
if vsc.Status == nil ||
|
||||
vsc.Status.SnapshotHandle == nil {
|
||||
log.Infof(
|
||||
"Waiting for VolumeSnapshotContents %s to have snapshot handle. Retrying in %ds",
|
||||
vsc.Name, interval/time.Second)
|
||||
if vsc.Status != nil &&
|
||||
vsc.Status.Error != nil {
|
||||
log.Warnf("VolumeSnapshotContent %s has error: %v",
|
||||
vsc.Name, *vsc.Status.Error.Message)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
frequentPolling, err := strconv.ParseBool(os.Getenv("CSI_SNAPSHOT_EARLY_FREQUENT_POLLING"))
|
||||
|
||||
if err == nil && frequentPolling {
|
||||
// The short interval for the first ten seconds is due to the fact that
|
||||
// Microsoft VSS backups have a hard-coded unfreeze call after 10 seconds,
|
||||
// so we need to minimize waiting time during the first 10 seconds.
|
||||
// First poll with a short interval and timeout.
|
||||
interval = 1 * time.Second
|
||||
timeout := 10 * time.Second
|
||||
err = wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
pollFunc,
|
||||
)
|
||||
|
||||
if err == nil {
|
||||
return vsc, nil
|
||||
}
|
||||
if !wait.Interrupted(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// If the first poll timed out, poll with a longer interval and the full timeout.
|
||||
interval = 5 * time.Second
|
||||
err = wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
csiSnapshotTimeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
vs := new(snapshotv1api.VolumeSnapshot)
|
||||
if err := crClient.Get(
|
||||
ctx,
|
||||
crclient.ObjectKeyFromObject(volSnap),
|
||||
vs,
|
||||
); err != nil {
|
||||
return false,
|
||||
errors.Wrapf(
|
||||
err,
|
||||
"failed to get volumesnapshot %s/%s",
|
||||
volSnap.Namespace, volSnap.Name,
|
||||
)
|
||||
}
|
||||
|
||||
if vs.Status == nil || vs.Status.BoundVolumeSnapshotContentName == nil {
|
||||
log.Infof("Waiting for CSI driver to reconcile volumesnapshot %s/%s. Retrying in %ds",
|
||||
volSnap.Namespace, volSnap.Name, interval/time.Second)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := crClient.Get(
|
||||
ctx,
|
||||
crclient.ObjectKey{
|
||||
Name: *vs.Status.BoundVolumeSnapshotContentName,
|
||||
},
|
||||
vsc,
|
||||
); err != nil {
|
||||
return false,
|
||||
errors.Wrapf(
|
||||
err,
|
||||
"failed to get VolumeSnapshotContent %s for VolumeSnapshot %s/%s",
|
||||
*vs.Status.BoundVolumeSnapshotContentName, vs.Namespace, vs.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// we need to wait for the VolumeSnapshotContent
|
||||
// to have a snapshot handle because during restore,
|
||||
// we'll use that snapshot handle as the source for
|
||||
// the VolumeSnapshotContent so it's statically
|
||||
// bound to the existing snapshot.
|
||||
if vsc.Status == nil ||
|
||||
vsc.Status.SnapshotHandle == nil {
|
||||
log.Infof(
|
||||
"Waiting for VolumeSnapshotContents %s to have snapshot handle. Retrying in %ds",
|
||||
vsc.Name, interval/time.Second)
|
||||
if vsc.Status != nil &&
|
||||
vsc.Status.Error != nil {
|
||||
log.Warnf("VolumeSnapshotContent %s has error: %v",
|
||||
vsc.Name, *vsc.Status.Error.Message)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
},
|
||||
pollFunc,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
||||
90
site/algolia-crawler.json
Normal file
90
site/algolia-crawler.json
Normal file
@@ -0,0 +1,90 @@
|
||||
new Crawler({
|
||||
rateLimit: 8,
|
||||
maxDepth: 10,
|
||||
startUrls: ["https://velero.io/docs", "https://velero.io/"],
|
||||
renderJavaScript: false,
|
||||
sitemaps: ["https://velero.io/sitemap.xml"],
|
||||
ignoreCanonicalTo: false,
|
||||
discoveryPatterns: ["https://velero.io/**"],
|
||||
schedule: "at 6:39 PM on Friday",
|
||||
actions: [
|
||||
{
|
||||
indexName: "velero_new",
|
||||
pathsToMatch: ["https://velero.io/docs**/**"],
|
||||
recordExtractor: ({ helpers }) => {
|
||||
return helpers.docsearch({
|
||||
recordProps: {
|
||||
lvl1: ["header h1", "article h1", "main h1", "h1", "head > title"],
|
||||
content: ["article p, article li", "main p, main li", "p, li"],
|
||||
lvl0: {
|
||||
defaultValue: "Documentation",
|
||||
},
|
||||
lvl2: ["article h2", "main h2", "h2"],
|
||||
lvl3: ["article h3", "main h3", "h3"],
|
||||
lvl4: ["article h4", "main h4", "h4"],
|
||||
lvl5: ["article h5", "main h5", "h5"],
|
||||
lvl6: ["article h6", "main h6", "h6"],
|
||||
version: "#dropdownMenuButton",
|
||||
},
|
||||
aggregateContent: true,
|
||||
recordVersion: "v3",
|
||||
});
|
||||
},
|
||||
},
|
||||
],
|
||||
initialIndexSettings: {
|
||||
velero_new: {
|
||||
attributesForFaceting: ["type", "lang", "version"],
|
||||
attributesToRetrieve: [
|
||||
"hierarchy",
|
||||
"content",
|
||||
"anchor",
|
||||
"url",
|
||||
"url_without_anchor",
|
||||
"type",
|
||||
"version",
|
||||
],
|
||||
attributesToHighlight: ["hierarchy", "content"],
|
||||
attributesToSnippet: ["content:10"],
|
||||
camelCaseAttributes: ["hierarchy", "content"],
|
||||
searchableAttributes: [
|
||||
"unordered(hierarchy.lvl0)",
|
||||
"unordered(hierarchy.lvl1)",
|
||||
"unordered(hierarchy.lvl2)",
|
||||
"unordered(hierarchy.lvl3)",
|
||||
"unordered(hierarchy.lvl4)",
|
||||
"unordered(hierarchy.lvl5)",
|
||||
"unordered(hierarchy.lvl6)",
|
||||
"content",
|
||||
],
|
||||
distinct: true,
|
||||
attributeForDistinct: "url",
|
||||
customRanking: [
|
||||
"desc(weight.pageRank)",
|
||||
"desc(weight.level)",
|
||||
"asc(weight.position)",
|
||||
],
|
||||
ranking: [
|
||||
"words",
|
||||
"filters",
|
||||
"typo",
|
||||
"attribute",
|
||||
"proximity",
|
||||
"exact",
|
||||
"custom",
|
||||
],
|
||||
highlightPreTag: '<span class="algolia-docsearch-suggestion--highlight">',
|
||||
highlightPostTag: "</span>",
|
||||
minWordSizefor1Typo: 3,
|
||||
minWordSizefor2Typos: 7,
|
||||
allowTyposOnNumericTokens: false,
|
||||
minProximity: 1,
|
||||
ignorePlurals: true,
|
||||
advancedSyntax: true,
|
||||
attributeCriteriaComputedByMinProximity: true,
|
||||
removeWordsIfNoResults: "allOptional",
|
||||
},
|
||||
},
|
||||
appId: "9ASKQJ1HR3",
|
||||
apiKey: "6392a5916af73b73df2406d3aef5ca45",
|
||||
});
|
||||
@@ -12,7 +12,7 @@ params:
|
||||
hero:
|
||||
backgroundColor: med-blue
|
||||
versioning: true
|
||||
latest: v1.18
|
||||
latest: v1.17
|
||||
versions:
|
||||
- main
|
||||
- v1.18
|
||||
|
||||
@@ -63,10 +63,6 @@ spec:
|
||||
# CSI VolumeSnapshot status turns to ReadyToUse during creation, before
|
||||
# returning error as timeout. The default value is 10 minute.
|
||||
csiSnapshotTimeout: 10m
|
||||
# ItemOperationTimeout specifies the time used to wait for
|
||||
# asynchronous BackupItemAction operations
|
||||
# The default value is 4 hour.
|
||||
itemOperationTimeout: 4h
|
||||
# resourcePolicy specifies the referenced resource policies that backup should follow
|
||||
# optional
|
||||
resourcePolicy:
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "Upgrading to Velero 1.18"
|
||||
title: "Upgrading to Velero 1.17"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Velero [v1.17.x][9] installed.
|
||||
- Velero [v1.16.x][9] installed.
|
||||
|
||||
If you're not yet running at least Velero v1.17, see the following:
|
||||
If you're not yet running at least Velero v1.16, see the following:
|
||||
|
||||
- [Upgrading to v1.8][1]
|
||||
- [Upgrading to v1.9][2]
|
||||
@@ -18,14 +18,13 @@ If you're not yet running at least Velero v1.17, see the following:
|
||||
- [Upgrading to v1.14][7]
|
||||
- [Upgrading to v1.15][8]
|
||||
- [Upgrading to v1.16][9]
|
||||
- [Upgrading to v1.17][10]
|
||||
|
||||
Before upgrading, check the [Velero compatibility matrix](https://github.com/vmware-tanzu/velero#velero-compatibility-matrix) to make sure your version of Kubernetes is supported by the new version of Velero.
|
||||
|
||||
## Instructions
|
||||
|
||||
### Upgrade from v1.17
|
||||
1. Install the Velero v1.18 command-line interface (CLI) by following the [instructions here][0].
|
||||
### Upgrade from v1.16
|
||||
1. Install the Velero v1.17 command-line interface (CLI) by following the [instructions here][0].
|
||||
|
||||
Verify that you've properly installed it by running:
|
||||
|
||||
@@ -37,7 +36,7 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
|
||||
```bash
|
||||
Client:
|
||||
Version: v1.18.0
|
||||
Version: v1.17.0
|
||||
Git commit: <git SHA>
|
||||
```
|
||||
|
||||
@@ -47,21 +46,28 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
velero install --crds-only --dry-run -o yaml | kubectl apply -f -
|
||||
```
|
||||
|
||||
3. Update the container image used by the Velero deployment, plugin and (optionally) the node agent daemon set:
|
||||
3. (optional) Update the `uploader-type` to `kopia` if you are using `restic`:
|
||||
```bash
|
||||
kubectl get deploy -n velero -ojson \
|
||||
| sed "s/\"--uploader-type=restic\"/\"--uploader-type=kopia\"/g" \
|
||||
| kubectl apply -f -
|
||||
```
|
||||
|
||||
4. Update the container image used by the Velero deployment, plugin and (optionally) the node agent daemon set:
|
||||
```bash
|
||||
# set the container and image of the init container for plugin accordingly,
|
||||
# if you are using other plugin
|
||||
kubectl set image deployment/velero \
|
||||
velero=velero/velero:v1.18.0 \
|
||||
velero-plugin-for-aws=velero/velero-plugin-for-aws:v1.14.0 \
|
||||
velero=velero/velero:v1.17.0 \
|
||||
velero-plugin-for-aws=velero/velero-plugin-for-aws:v1.13.0 \
|
||||
--namespace velero
|
||||
|
||||
# optional, if using the node agent daemonset
|
||||
kubectl set image daemonset/node-agent \
|
||||
node-agent=velero/velero:v1.18.0 \
|
||||
node-agent=velero/velero:v1.17.0 \
|
||||
--namespace velero
|
||||
```
|
||||
4. Confirm that the deployment is up and running with the correct version by running:
|
||||
5. Confirm that the deployment is up and running with the correct version by running:
|
||||
|
||||
```bash
|
||||
velero version
|
||||
@@ -71,11 +77,11 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
|
||||
```bash
|
||||
Client:
|
||||
Version: v1.18.0
|
||||
Version: v1.17.0
|
||||
Git commit: <git SHA>
|
||||
|
||||
Server:
|
||||
Version: v1.18.0
|
||||
Version: v1.17.0
|
||||
```
|
||||
|
||||
[0]: basic-install.md#install-the-cli
|
||||
@@ -87,5 +93,4 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
[6]: https://velero.io/docs/v1.13/upgrade-to-1.13
|
||||
[7]: https://velero.io/docs/v1.14/upgrade-to-1.14
|
||||
[8]: https://velero.io/docs/v1.15/upgrade-to-1.15
|
||||
[9]: https://velero.io/docs/v1.16/upgrade-to-1.16
|
||||
[10]: https://velero.io/docs/v1.17/upgrade-to-1.17
|
||||
[9]: https://velero.io/docs/v1.16/upgrade-to-1.16
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "Upgrading to Velero 1.18"
|
||||
title: "Upgrading to Velero 1.17"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Velero [v1.17.x][9] installed.
|
||||
- Velero [v1.16.x][9] installed.
|
||||
|
||||
If you're not yet running at least Velero v1.17, see the following:
|
||||
If you're not yet running at least Velero v1.16, see the following:
|
||||
|
||||
- [Upgrading to v1.8][1]
|
||||
- [Upgrading to v1.9][2]
|
||||
@@ -18,14 +18,13 @@ If you're not yet running at least Velero v1.17, see the following:
|
||||
- [Upgrading to v1.14][7]
|
||||
- [Upgrading to v1.15][8]
|
||||
- [Upgrading to v1.16][9]
|
||||
- [Upgrading to v1.17][10]
|
||||
|
||||
Before upgrading, check the [Velero compatibility matrix](https://github.com/vmware-tanzu/velero#velero-compatibility-matrix) to make sure your version of Kubernetes is supported by the new version of Velero.
|
||||
|
||||
## Instructions
|
||||
|
||||
### Upgrade from v1.17
|
||||
1. Install the Velero v1.18 command-line interface (CLI) by following the [instructions here][0].
|
||||
### Upgrade from v1.16
|
||||
1. Install the Velero v1.17 command-line interface (CLI) by following the [instructions here][0].
|
||||
|
||||
Verify that you've properly installed it by running:
|
||||
|
||||
@@ -37,7 +36,7 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
|
||||
```bash
|
||||
Client:
|
||||
Version: v1.18.0
|
||||
Version: v1.17.0
|
||||
Git commit: <git SHA>
|
||||
```
|
||||
|
||||
@@ -47,21 +46,28 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
velero install --crds-only --dry-run -o yaml | kubectl apply -f -
|
||||
```
|
||||
|
||||
3. Update the container image used by the Velero deployment, plugin and (optionally) the node agent daemon set:
|
||||
3. (optional) Update the `uploader-type` to `kopia` if you are using `restic`:
|
||||
```bash
|
||||
kubectl get deploy -n velero -ojson \
|
||||
| sed "s/\"--uploader-type=restic\"/\"--uploader-type=kopia\"/g" \
|
||||
| kubectl apply -f -
|
||||
```
|
||||
|
||||
4. Update the container image used by the Velero deployment, plugin and (optionally) the node agent daemon set:
|
||||
```bash
|
||||
# set the container and image of the init container for plugin accordingly,
|
||||
# if you are using other plugin
|
||||
kubectl set image deployment/velero \
|
||||
velero=velero/velero:v1.18.0 \
|
||||
velero-plugin-for-aws=velero/velero-plugin-for-aws:v1.14.0 \
|
||||
velero=velero/velero:v1.17.0 \
|
||||
velero-plugin-for-aws=velero/velero-plugin-for-aws:v1.13.0 \
|
||||
--namespace velero
|
||||
|
||||
# optional, if using the node agent daemonset
|
||||
kubectl set image daemonset/node-agent \
|
||||
node-agent=velero/velero:v1.18.0 \
|
||||
node-agent=velero/velero:v1.17.0 \
|
||||
--namespace velero
|
||||
```
|
||||
4. Confirm that the deployment is up and running with the correct version by running:
|
||||
5. Confirm that the deployment is up and running with the correct version by running:
|
||||
|
||||
```bash
|
||||
velero version
|
||||
@@ -71,11 +77,11 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
|
||||
```bash
|
||||
Client:
|
||||
Version: v1.18.0
|
||||
Version: v1.17.0
|
||||
Git commit: <git SHA>
|
||||
|
||||
Server:
|
||||
Version: v1.18.0
|
||||
Version: v1.17.0
|
||||
```
|
||||
|
||||
[0]: basic-install.md#install-the-cli
|
||||
@@ -87,5 +93,4 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
|
||||
[6]: https://velero.io/docs/v1.13/upgrade-to-1.13
|
||||
[7]: https://velero.io/docs/v1.14/upgrade-to-1.14
|
||||
[8]: https://velero.io/docs/v1.15/upgrade-to-1.15
|
||||
[9]: https://velero.io/docs/v1.16/upgrade-to-1.16
|
||||
[10]: https://velero.io/docs/v1.17/upgrade-to-1.17
|
||||
[9]: https://velero.io/docs/v1.16/upgrade-to-1.16
|
||||
@@ -13,8 +13,8 @@ toc:
|
||||
url: /basic-install
|
||||
- page: Customize Installation
|
||||
url: /customize-installation
|
||||
- page: Upgrade to 1.18
|
||||
url: /upgrade-to-1.18
|
||||
- page: Upgrade to 1.17
|
||||
url: /upgrade-to-1.17
|
||||
- page: Supported providers
|
||||
url: /supported-providers
|
||||
- page: Evaluation install
|
||||
|
||||
@@ -13,8 +13,8 @@ toc:
|
||||
url: /basic-install
|
||||
- page: Customize Installation
|
||||
url: /customize-installation
|
||||
- page: Upgrade to 1.18
|
||||
url: /upgrade-to-1.18
|
||||
- page: Upgrade to 1.17
|
||||
url: /upgrade-to-1.17
|
||||
- page: Supported providers
|
||||
url: /supported-providers
|
||||
- page: Evaluation install
|
||||
|
||||
@@ -27,6 +27,16 @@
|
||||
<div class="col-md-3 toc">
|
||||
{{ .Render "versions" }}
|
||||
<br/>
|
||||
<div id="docsearch">
|
||||
<!-- <form class="d-flex align-items-center">
|
||||
<span class="algolia-autocomplete" style="position: relative; display: inline-block; direction: ltr;">
|
||||
<input type="search" class="form-control docsearch" id="search-input" placeholder="Search..."
|
||||
aria-label="Search for..." autocomplete="off" spellcheck="false" role="combobox"
|
||||
aria-autocomplete="list" aria-expanded="false" aria-owns="algolia-autocomplete-listbox-0"
|
||||
dir="auto" style="position: relative; vertical-align: top;">
|
||||
</span>
|
||||
</form> -->
|
||||
</div>
|
||||
{{ .Render "nav" }}
|
||||
</div>
|
||||
<div class="col-md-8">
|
||||
@@ -48,6 +58,16 @@
|
||||
{{ .Render "footer" }}
|
||||
</div>
|
||||
</div>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@docsearch/js@3"></script>
|
||||
<script type="text/javascript"> docsearch({
|
||||
appId: '9ASKQJ1HR3',
|
||||
apiKey: '170ba79bfa16cebfdf10726ae4771d7e',
|
||||
indexName: 'velero_new',
|
||||
container: '#docsearch',
|
||||
searchParameters: {
|
||||
facetFilters: ["version:{{ .CurrentSection.Params.version }}"]},
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
@@ -8,4 +8,6 @@
|
||||
{{ $styles := resources.Get "styles.scss" | toCSS $options | resources.Fingerprint }}
|
||||
<link rel="stylesheet" href="{{ $styles.RelPermalink }}" integrity="{{ $styles.Data.Integrity }}">
|
||||
{{/* TODO {% seo %}*/}}
|
||||
<link rel="preconnect" href="https://9ASKQJ1HR3-dsn.algolia.net" crossorigin />
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@docsearch/css@3" />
|
||||
</head>
|
||||
|
||||
@@ -1,150 +0,0 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package basic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
"github.com/vmware-tanzu/velero/test/util/common"
|
||||
. "github.com/vmware-tanzu/velero/test/util/k8s"
|
||||
)
|
||||
|
||||
// RestoreExecHooks tests that a pod with multiple restore exec hooks does not hang
|
||||
// at the Finalizing phase during restore (Issue #9359 / PR #9366).
|
||||
type RestoreExecHooks struct {
|
||||
TestCase
|
||||
podName string
|
||||
}
|
||||
|
||||
var RestoreExecHooksTest func() = test.TestFunc(&RestoreExecHooks{})
|
||||
|
||||
func (r *RestoreExecHooks) Init() error {
|
||||
Expect(r.TestCase.Init()).To(Succeed())
|
||||
r.CaseBaseName = "restore-exec-hooks-" + r.UUIDgen
|
||||
r.BackupName = "backup-" + r.CaseBaseName
|
||||
r.RestoreName = "restore-" + r.CaseBaseName
|
||||
r.podName = "pod-multiple-hooks"
|
||||
r.NamespacesTotal = 1
|
||||
r.NSIncluded = &[]string{}
|
||||
|
||||
for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ {
|
||||
createNSName := fmt.Sprintf("%s-%00000d", r.CaseBaseName, nsNum)
|
||||
*r.NSIncluded = append(*r.NSIncluded, createNSName)
|
||||
}
|
||||
|
||||
r.TestMsg = &test.TestMSG{
|
||||
Desc: "Restore pod with multiple restore exec hooks",
|
||||
Text: "Should successfully backup and restore without hanging at Finalizing phase",
|
||||
FailedMSG: "Failed to successfully backup and restore pod with multiple hooks",
|
||||
}
|
||||
|
||||
r.BackupArgs = []string{
|
||||
"create", "--namespace", r.VeleroCfg.VeleroNamespace, "backup", r.BackupName,
|
||||
"--include-namespaces", strings.Join(*r.NSIncluded, ","),
|
||||
"--default-volumes-to-fs-backup", "--wait",
|
||||
}
|
||||
|
||||
r.RestoreArgs = []string{
|
||||
"create", "--namespace", r.VeleroCfg.VeleroNamespace, "restore", r.RestoreName,
|
||||
"--from-backup", r.BackupName, "--wait",
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RestoreExecHooks) CreateResources() error {
|
||||
for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ {
|
||||
createNSName := fmt.Sprintf("%s-%00000d", r.CaseBaseName, nsNum)
|
||||
|
||||
By(fmt.Sprintf("Creating namespace %s", createNSName), func() {
|
||||
Expect(CreateNamespace(r.Ctx, r.Client, createNSName)).
|
||||
To(Succeed(), fmt.Sprintf("Failed to create namespace %s", createNSName))
|
||||
})
|
||||
|
||||
// Prepare images and commands adaptively for the target OS
|
||||
imageAddress := LinuxTestImage
|
||||
initCommand := `["/bin/sh", "-c", "echo init-hook-done"]`
|
||||
execCommand1 := `["/bin/sh", "-c", "echo hook1"]`
|
||||
execCommand2 := `["/bin/sh", "-c", "echo hook2"]`
|
||||
|
||||
if r.VeleroCfg.WorkerOS == common.WorkerOSLinux && r.VeleroCfg.ImageRegistryProxy != "" {
|
||||
imageAddress = path.Join(r.VeleroCfg.ImageRegistryProxy, LinuxTestImage)
|
||||
} else if r.VeleroCfg.WorkerOS == common.WorkerOSWindows {
|
||||
imageAddress = WindowTestImage
|
||||
initCommand = `["cmd", "/c", "echo init-hook-done"]`
|
||||
execCommand1 = `["cmd", "/c", "echo hook1"]`
|
||||
execCommand2 = `["cmd", "/c", "echo hook2"]`
|
||||
}
|
||||
|
||||
// Inject mixing InitContainer hook and multiple Exec post-restore hooks.
|
||||
// This guarantees that the loop index 'i' mismatched 'hook.hookIndex' (Issue #9359),
|
||||
// ensuring the bug is properly reproduced and the fix is verified.
|
||||
ann := map[string]string{
|
||||
// Inject InitContainer Restore Hook
|
||||
"init.hook.restore.velero.io/container-image": imageAddress,
|
||||
"init.hook.restore.velero.io/container-name": "test-init-hook",
|
||||
"init.hook.restore.velero.io/command": initCommand,
|
||||
|
||||
// Inject multiple Exec Restore Hooks
|
||||
"post.hook.restore.velero.io/test1.command": execCommand1,
|
||||
"post.hook.restore.velero.io/test1.container": r.podName,
|
||||
"post.hook.restore.velero.io/test2.command": execCommand2,
|
||||
"post.hook.restore.velero.io/test2.container": r.podName,
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating pod %s with multiple restore hooks in namespace %s", r.podName, createNSName), func() {
|
||||
_, err := CreatePod(
|
||||
r.Client,
|
||||
createNSName,
|
||||
r.podName,
|
||||
"", // No storage class needed
|
||||
"", // No PVC needed
|
||||
[]string{}, // No volumes
|
||||
nil,
|
||||
ann,
|
||||
r.VeleroCfg.ImageRegistryProxy,
|
||||
r.VeleroCfg.WorkerOS,
|
||||
)
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("Failed to create pod with hooks in namespace %s", createNSName))
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Waiting for pod %s to be ready", r.podName), func() {
|
||||
err := WaitForPods(r.Ctx, r.Client, createNSName, []string{r.podName})
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("Failed to wait for pod %s in namespace %s", r.podName, createNSName))
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RestoreExecHooks) Verify() error {
|
||||
for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ {
|
||||
createNSName := fmt.Sprintf("%s-%00000d", r.CaseBaseName, nsNum)
|
||||
|
||||
By(fmt.Sprintf("Verifying pod %s in namespace %s after restore", r.podName, createNSName), func() {
|
||||
err := WaitForPods(r.Ctx, r.Client, createNSName, []string{r.podName})
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("Failed to verify pod %s in namespace %s after restore", r.podName, createNSName))
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -440,12 +440,6 @@ var _ = Describe(
|
||||
StorageClasssChangingTest,
|
||||
)
|
||||
|
||||
var _ = Describe(
|
||||
"Restore phase does not block at Finalizing when a container has multiple exec hooks",
|
||||
Label("Basic", "Hooks"),
|
||||
RestoreExecHooksTest,
|
||||
)
|
||||
|
||||
var _ = Describe(
|
||||
"Backup/restore of 2500 namespaces",
|
||||
Label("Scale", "LongTime"),
|
||||
@@ -500,11 +494,6 @@ var _ = Describe(
|
||||
Label("ResourceFiltering", "IncludeNamespaces", "Restore"),
|
||||
RestoreWithIncludeNamespaces,
|
||||
)
|
||||
var _ = Describe(
|
||||
"Velero test on backup/restore with wildcard namespaces",
|
||||
Label("ResourceFiltering", "WildcardNamespaces"),
|
||||
WildcardNamespacesTest,
|
||||
)
|
||||
var _ = Describe(
|
||||
"Velero test on include resources from the cluster backup",
|
||||
Label("ResourceFiltering", "IncludeResources", "Backup"),
|
||||
|
||||
@@ -55,10 +55,12 @@ var GlobalRepoMaintenanceTest func() = TestFunc(&RepoMaintenanceTestCase{
|
||||
jobConfigs: velerotypes.JobConfigs{
|
||||
KeepLatestMaintenanceJobs: &keepJobNum,
|
||||
PodResources: &velerokubeutil.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "100Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "200Mi",
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "100Mi",
|
||||
EphemeralStorageRequest: "5Gi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "200Mi",
|
||||
EphemeralStorageLimit: "10Gi",
|
||||
},
|
||||
PriorityClassName: test.PriorityClassNameForRepoMaintenance,
|
||||
},
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filtering
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
. "github.com/vmware-tanzu/velero/test/util/k8s"
|
||||
)
|
||||
|
||||
// WildcardNamespaces tests the inclusion and exclusion of namespaces using wildcards
|
||||
// introduced in PR #9255 (Issue #1874). It verifies filtering at both Backup and Restore stages.
|
||||
type WildcardNamespaces struct {
|
||||
TestCase // Inherit from basic TestCase instead of FilteringCase to customize a single flow
|
||||
restoredNS []string
|
||||
excludedByBackupNS []string
|
||||
excludedByRestoreNS []string
|
||||
}
|
||||
|
||||
// Register as a single E2E test
|
||||
var WildcardNamespacesTest func() = TestFunc(&WildcardNamespaces{})
|
||||
|
||||
func (w *WildcardNamespaces) Init() error {
|
||||
Expect(w.TestCase.Init()).To(Succeed())
|
||||
|
||||
w.CaseBaseName = "wildcard-ns-" + w.UUIDgen
|
||||
w.BackupName = "backup-" + w.CaseBaseName
|
||||
w.RestoreName = "restore-" + w.CaseBaseName
|
||||
|
||||
// 1. Define namespaces for different filtering lifecycle scenarios
|
||||
nsIncBoth := w.CaseBaseName + "-inc-both" // Included in both backup and restore
|
||||
nsExact := w.CaseBaseName + "-exact" // Included exactly without wildcards
|
||||
nsIncExc := w.CaseBaseName + "-inc-exc" // Included in backup, but excluded during restore
|
||||
nsBakExc := w.CaseBaseName + "-test-bak" // Excluded during backup
|
||||
|
||||
// Group namespaces for validation
|
||||
w.restoredNS = []string{nsIncBoth, nsExact}
|
||||
w.excludedByRestoreNS = []string{nsIncExc}
|
||||
w.excludedByBackupNS = []string{nsBakExc}
|
||||
|
||||
w.TestMsg = &TestMSG{
|
||||
Desc: "Backup and restore with wildcard namespaces",
|
||||
Text: "Should correctly filter namespaces using wildcards during both backup and restore stages",
|
||||
FailedMSG: "Failed to properly filter namespaces using wildcards",
|
||||
}
|
||||
|
||||
// 2. Setup Backup Args
|
||||
backupIncWildcard1 := fmt.Sprintf("%s-inc-*", w.CaseBaseName) // Matches nsIncBoth, nsIncExc
|
||||
backupIncWildcard2 := fmt.Sprintf("%s-test-*", w.CaseBaseName) // Matches nsBakExc
|
||||
backupExcWildcard := fmt.Sprintf("%s-test-bak", w.CaseBaseName) // Excludes nsBakExc
|
||||
nonExistentWildcard := "non-existent-ns-*" // Tests zero-match boundary condition
|
||||
|
||||
w.BackupArgs = []string{
|
||||
"create", "--namespace", w.VeleroCfg.VeleroNamespace, "backup", w.BackupName,
|
||||
// Use broad wildcards for inclusion to bypass Velero CLI's literal string collision validation
|
||||
"--include-namespaces", fmt.Sprintf("%s,%s,%s,%s", backupIncWildcard1, backupIncWildcard2, nsExact, nonExistentWildcard),
|
||||
"--exclude-namespaces", backupExcWildcard,
|
||||
"--default-volumes-to-fs-backup", "--wait",
|
||||
}
|
||||
|
||||
// 3. Setup Restore Args
|
||||
restoreExcWildcard := fmt.Sprintf("%s-*-exc", w.CaseBaseName) // Excludes nsIncExc
|
||||
|
||||
w.RestoreArgs = []string{
|
||||
"create", "--namespace", w.VeleroCfg.VeleroNamespace, "restore", w.RestoreName,
|
||||
"--from-backup", w.BackupName,
|
||||
"--include-namespaces", fmt.Sprintf("%s,%s,%s", backupIncWildcard1, nsExact, nonExistentWildcard),
|
||||
"--exclude-namespaces", restoreExcWildcard,
|
||||
"--wait",
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WildcardNamespaces) CreateResources() error {
|
||||
allNamespaces := append(w.restoredNS, w.excludedByRestoreNS...)
|
||||
allNamespaces = append(allNamespaces, w.excludedByBackupNS...)
|
||||
|
||||
for _, ns := range allNamespaces {
|
||||
By(fmt.Sprintf("Creating namespace %s", ns), func() {
|
||||
Expect(CreateNamespace(w.Ctx, w.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))
|
||||
})
|
||||
|
||||
// Create a ConfigMap in each namespace to verify resource restoration
|
||||
cmName := "configmap-" + ns
|
||||
By(fmt.Sprintf("Creating ConfigMap %s in namespace %s", cmName, ns), func() {
|
||||
_, err := CreateConfigMap(w.Client.ClientGo, ns, cmName, map[string]string{"wildcard-test": "true"}, nil)
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("Failed to create configmap in namespace %s", ns))
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WildcardNamespaces) Verify() error {
|
||||
// 1. Verify namespaces that should be successfully restored
|
||||
for _, ns := range w.restoredNS {
|
||||
By(fmt.Sprintf("Checking included namespace %s exists", ns), func() {
|
||||
_, err := GetNamespace(w.Ctx, w.Client, ns)
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("Included namespace %s should exist after restore", ns))
|
||||
|
||||
_, err = GetConfigMap(w.Client.ClientGo, ns, "configmap-"+ns)
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("ConfigMap in included namespace %s should exist", ns))
|
||||
})
|
||||
}
|
||||
|
||||
// 2. Verify namespaces excluded during Backup
|
||||
for _, ns := range w.excludedByBackupNS {
|
||||
By(fmt.Sprintf("Checking namespace %s excluded by backup does NOT exist", ns), func() {
|
||||
_, err := GetNamespace(w.Ctx, w.Client, ns)
|
||||
Expect(err).To(HaveOccurred(), fmt.Sprintf("Namespace %s excluded by backup should NOT exist after restore", ns))
|
||||
Expect(apierrors.IsNotFound(err)).To(BeTrue(), "Error should be NotFound")
|
||||
})
|
||||
}
|
||||
|
||||
// 3. Verify namespaces excluded during Restore
|
||||
for _, ns := range w.excludedByRestoreNS {
|
||||
By(fmt.Sprintf("Checking namespace %s excluded by restore does NOT exist", ns), func() {
|
||||
_, err := GetNamespace(w.Ctx, w.Client, ns)
|
||||
Expect(err).To(HaveOccurred(), fmt.Sprintf("Namespace %s excluded by restore should NOT exist after restore", ns))
|
||||
Expect(apierrors.IsNotFound(err)).To(BeTrue(), "Error should be NotFound")
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user