Compare commits
141 Commits
release-1.
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aad794af97 | ||
|
|
a75775ef49 | ||
|
|
ac1f6e7f3e | ||
|
|
8e9e6b4d36 | ||
|
|
71b230f82e | ||
|
|
455f3ba305 | ||
|
|
fc6361ba06 | ||
|
|
39db9f9c1e | ||
|
|
4d9bd91200 | ||
|
|
c5fa50bedc | ||
|
|
df2686c146 | ||
|
|
8a6ac7af1c | ||
|
|
a990bd81f1 | ||
|
|
15db9d2552 | ||
|
|
1b4c7fe4be | ||
|
|
5b9bcc99f1 | ||
|
|
e921c177cc | ||
|
|
cf605c948e | ||
|
|
cd89c0ffa7 | ||
|
|
eb0a1814c6 | ||
|
|
eaef4ead42 | ||
|
|
7562011b79 | ||
|
|
4a6756d57b | ||
|
|
e1cc07cec3 | ||
|
|
1730b7f414 | ||
|
|
37abfb4bfa | ||
|
|
0cf8f94268 | ||
|
|
1b5503e20b | ||
|
|
e439977117 | ||
|
|
dd82645909 | ||
|
|
22f93ad457 | ||
|
|
9598c50295 | ||
|
|
54761092c1 | ||
|
|
dca3d3001f | ||
|
|
e8fa708933 | ||
|
|
fca4d405b1 | ||
|
|
d3f4b2c67e | ||
|
|
235e579581 | ||
|
|
dd1def9d33 | ||
|
|
baf2491344 | ||
|
|
e79ad64a10 | ||
|
|
e9226527de | ||
|
|
78fba2146c | ||
|
|
4dbdd2df3a | ||
|
|
6869b7bf54 | ||
|
|
30ddf3f35f | ||
|
|
38d9e96130 | ||
|
|
531fc4810f | ||
|
|
94259e8a5c | ||
|
|
5433eb3081 | ||
|
|
238b1e1f13 | ||
|
|
ef7b468fb9 | ||
|
|
f0aa64172e | ||
|
|
3f8e358849 | ||
|
|
bbd5ae079d | ||
|
|
91922103b4 | ||
|
|
e6bdff61bd | ||
|
|
c74d5e7aba | ||
|
|
905a561c84 | ||
|
|
e9d312c27e | ||
|
|
a5391e13e7 | ||
|
|
e368fc8803 | ||
|
|
b5734a6ba2 | ||
|
|
65c88f3425 | ||
|
|
bb9a94bebe | ||
|
|
74401b20b0 | ||
|
|
417d3d2562 | ||
|
|
68cee893f1 | ||
|
|
fce276bca9 | ||
|
|
ade433ecbd | ||
|
|
48e66b1790 | ||
|
|
29a9f80f10 | ||
|
|
70043af85b | ||
|
|
66ac235e1f | ||
|
|
afe7df17d4 | ||
|
|
a31f4abcb3 | ||
|
|
2145c57642 | ||
|
|
a9b3cfa062 | ||
|
|
bca6afada7 | ||
|
|
d1cc303553 | ||
|
|
befa61cee1 | ||
|
|
245525c26b | ||
|
|
55737b9cf1 | ||
|
|
ffea850522 | ||
|
|
d315bca32b | ||
|
|
b3aff97684 | ||
|
|
23a3c242fa | ||
|
|
b7bc16f190 | ||
|
|
bbec46f6ee | ||
|
|
475050108b | ||
|
|
b5f7cd92c7 | ||
|
|
ab31b811ee | ||
|
|
19360622e7 | ||
|
|
932d27541c | ||
|
|
b0642b3078 | ||
|
|
9cada8fc11 | ||
|
|
25d5fa1b88 | ||
|
|
1c08af8461 | ||
|
|
6c3d81a146 | ||
|
|
81029d64ff | ||
|
|
8f32696449 | ||
|
|
3f15e9219f | ||
|
|
62aa70219b | ||
|
|
544b184d6c | ||
|
|
250c4db158 | ||
|
|
f0d81c56e2 | ||
|
|
8b5559274d | ||
|
|
a230929111 | ||
|
|
7235180de4 | ||
|
|
ba5e7681ff | ||
|
|
fc0a16d734 | ||
|
|
bcdee1b116 | ||
|
|
2a696a4431 | ||
|
|
991bf1b000 | ||
|
|
4d47471932 | ||
|
|
0bf968d24d | ||
|
|
158681e927 | ||
|
|
05c9a8d8f8 | ||
|
|
bc957a22b7 | ||
|
|
7e3d66adc7 | ||
|
|
710ebb9d92 | ||
|
|
1315399f35 | ||
|
|
eadacf43e1 | ||
|
|
ddd83a66c5 | ||
|
|
7af688fbf5 | ||
|
|
41fa774844 | ||
|
|
5121417457 | ||
|
|
ece04e6e39 | ||
|
|
71ddeefcd6 | ||
|
|
e159992f48 | ||
|
|
48b14194df | ||
|
|
4ada356bf1 | ||
|
|
7f51017842 | ||
|
|
18c32ed29c | ||
|
|
598c8c528b | ||
|
|
8f9beb04f0 | ||
|
|
bb518e6d89 | ||
|
|
89c5182c3c | ||
|
|
d17435542e | ||
|
|
e3b501d0d9 | ||
|
|
060b3364f2 |
2
.github/workflows/nightly-trivy-scan.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@master
|
||||
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1
|
||||
with:
|
||||
image-ref: 'docker.io/velero/${{ matrix.images }}:${{ matrix.versions }}'
|
||||
severity: 'CRITICAL,HIGH,MEDIUM'
|
||||
|
||||
93
.github/workflows/pr-filepath-check.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
name: Pull Request File Path Check
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
|
||||
filepath-check:
|
||||
name: Check for invalid characters in file paths
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Validate file paths for Go module compatibility
|
||||
run: |
|
||||
# Go's module zip rejects filenames containing certain characters.
|
||||
# See golang.org/x/mod/module fileNameOK() for the full specification.
|
||||
#
|
||||
# Allowed ASCII: letters, digits, and: !#$%&()+,-.=@[]^_{}~ and space
|
||||
# Allowed non-ASCII: unicode letters only
|
||||
# Rejected: " ' * < > ? ` | / \ : and any non-letter unicode (control
|
||||
# chars, format chars like U+200E LEFT-TO-RIGHT MARK, etc.)
|
||||
#
|
||||
# This check catches issues like the U+200E incident in PR #9552.
|
||||
|
||||
EXIT_STATUS=0
|
||||
|
||||
git ls-files -z | python3 -c "
|
||||
import sys, unicodedata
|
||||
|
||||
data = sys.stdin.buffer.read()
|
||||
files = data.split(b'\x00')
|
||||
|
||||
# Characters explicitly rejected by Go's fileNameOK
|
||||
# (path separators / and \ are inherent to paths so we check per-element)
|
||||
bad_ascii = set('\"' + \"'\" + '*<>?\`|:')
|
||||
|
||||
allowed_ascii = set('!#$%&()+,-.=@[]^_{}~ ')
|
||||
|
||||
def is_ok(ch):
|
||||
if ch.isascii():
|
||||
return ch.isalnum() or ch in allowed_ascii
|
||||
return ch.isalpha()
|
||||
|
||||
bad_files = [] # list of (original_path, clean_path, char_desc)
|
||||
for f in files:
|
||||
if not f:
|
||||
continue
|
||||
try:
|
||||
name = f.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
print(f'::error::Non-UTF-8 bytes in filename: {f!r}')
|
||||
bad_files.append((repr(f), None, 'non-UTF-8 bytes'))
|
||||
continue
|
||||
|
||||
# Check each path element (split on /)
|
||||
for element in name.split('/'):
|
||||
for ch in element:
|
||||
if not is_ok(ch):
|
||||
cp = ord(ch)
|
||||
char_name = unicodedata.name(ch, f'U+{cp:04X}')
|
||||
char_desc = f'U+{cp:04X} ({char_name})'
|
||||
# Build cleaned path by stripping invalid chars
|
||||
clean = '/'.join(
|
||||
''.join(c for c in elem if is_ok(c))
|
||||
for elem in name.split('/')
|
||||
)
|
||||
print(f'::error file={name}::File \"{name}\" contains invalid char {char_desc}')
|
||||
bad_files.append((name, clean, char_desc))
|
||||
break
|
||||
|
||||
if bad_files:
|
||||
print()
|
||||
print('The following files have characters that are invalid in Go module zip archives:')
|
||||
print()
|
||||
for original, clean, desc in bad_files:
|
||||
print(f' {original} — {desc}')
|
||||
print()
|
||||
print('To fix, rename the files to remove the problematic characters:')
|
||||
print()
|
||||
for original, clean, desc in bad_files:
|
||||
if clean:
|
||||
print(f' mv \"{original}\" \"{clean}\" && git add \"{clean}\"')
|
||||
print(f' # or: git mv \"{original}\" \"{clean}\"')
|
||||
else:
|
||||
print(f' # {original} — cannot auto-suggest rename (non-UTF-8)')
|
||||
print()
|
||||
print('See https://github.com/vmware-tanzu/velero/pull/9552 for context.')
|
||||
sys.exit(1)
|
||||
else:
|
||||
print('All file paths are valid for Go module zip.')
|
||||
" || EXIT_STATUS=1
|
||||
|
||||
exit $EXIT_STATUS
|
||||
29
Dockerfile
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-trixie AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -48,30 +48,6 @@ RUN mkdir -p /output/usr/bin && \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG RESTIC_VERSION
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOOS=${TARGETOS} \
|
||||
GOARCH=${TARGETARCH} \
|
||||
GOARM=${TARGETVARIANT}
|
||||
|
||||
COPY . /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
RUN mkdir -p /output/usr/bin && \
|
||||
export GOARM=$(echo "${GOARM}" | cut -c2-) && \
|
||||
/go/src/github.com/vmware-tanzu/velero/hack/build-restic.sh && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
|
||||
@@ -79,7 +55,4 @@ LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
COPY --from=velero-builder /output /
|
||||
|
||||
COPY --from=restic-builder /output /
|
||||
|
||||
USER cnb:cnb
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-trixie AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
3
Makefile
@@ -105,8 +105,6 @@ see: https://velero.io/docs/main/build-from-source/#making-images-and-updating-v
|
||||
endef
|
||||
# comma cannot be escaped and can only be used in Make function arguments by putting into variable
|
||||
comma=,
|
||||
# The version of restic binary to be downloaded
|
||||
RESTIC_VERSION ?= 0.15.0
|
||||
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 windows-amd64 linux-ppc64le linux-s390x
|
||||
BUILD_OUTPUT_TYPE ?= docker
|
||||
@@ -260,7 +258,6 @@ container-linux:
|
||||
--build-arg=GIT_SHA=$(GIT_SHA) \
|
||||
--build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \
|
||||
--build-arg=REGISTRY=$(REGISTRY) \
|
||||
--build-arg=RESTIC_VERSION=$(RESTIC_VERSION) \
|
||||
--provenance=false \
|
||||
--sbom=false \
|
||||
-f $(VELERO_DOCKERFILE) .
|
||||
|
||||
6
Tiltfile
@@ -103,11 +103,6 @@ local_resource(
|
||||
deps = ["internal", "pkg/cmd"],
|
||||
)
|
||||
|
||||
local_resource(
|
||||
"restic_binary",
|
||||
cmd = 'cd ' + '.' + ';mkdir -p _tiltbuild/restic; BIN=velero GOOS=linux GOARCH=amd64 GOARM="" RESTIC_VERSION=0.13.1 OUTPUT_DIR=_tiltbuild/restic ./hack/build-restic.sh',
|
||||
)
|
||||
|
||||
# Note: we need a distro with a bash shell to exec into the Velero container
|
||||
tilt_dockerfile_header = """
|
||||
FROM ubuntu:22.04 as tilt
|
||||
@@ -118,7 +113,6 @@ WORKDIR /
|
||||
COPY --from=tilt-helper /start.sh .
|
||||
COPY --from=tilt-helper /restart.sh .
|
||||
COPY velero .
|
||||
COPY restic/restic /usr/bin/restic
|
||||
"""
|
||||
|
||||
dockerfile_contents = "\n".join([
|
||||
|
||||
@@ -16,7 +16,7 @@ https://velero.io/docs/v1.18/upgrade-to-1.18/
|
||||
#### Concurrent backup
|
||||
In v1.18, Velero is capable to process multiple backups concurrently. This is a significant usability improvement, especially for multiple tenants or multiple users case, backups submitted from different users could run their backups simultaneously without interfering with each other.
|
||||
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/concurrent-backup-processing.md for more details.
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/concurrent-backup-processing.md for more details.
|
||||
|
||||
#### Cache volume for data movers
|
||||
In v1.18, Velero allows users to configure cache volumes for data mover pods during restore for CSI snapshot data movement and fs-backup. This brings below benefits:
|
||||
@@ -24,7 +24,7 @@ In v1.18, Velero allows users to configure cache volumes for data mover pods dur
|
||||
- Solve the problem that multiple data mover pods fail to run concurrently in one node when the node's ephemeral disk is limited
|
||||
- Working together with backup repository's cache limit configuration, cache volume with appropriate size helps to improve the restore throughput
|
||||
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/backup-repo-cache-volume.md for more details.
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/backup-repo-cache-volume.md for more details.
|
||||
|
||||
#### Incremental size for data movers
|
||||
In v1.18, Velero allows users to observe the incremental size of data movers backups for CSI snapshot data movement and fs-backup, so that users could visually see the data reduction due to incremental backup.
|
||||
|
||||
1
changelogs/unreleased/9403-GabriFedi97
Normal file
@@ -0,0 +1 @@
|
||||
Include InitContainer configured as Sidecars when validating the existence of the target containers configured for the Backup Hooks
|
||||
1
changelogs/unreleased/9502-Joeavaikath
Normal file
@@ -0,0 +1 @@
|
||||
Support all glob wildcard characters in namespace validation
|
||||
1
changelogs/unreleased/9508-kaovilai
Normal file
@@ -0,0 +1 @@
|
||||
Fix VolumePolicy PVC phase condition filter for unbound PVCs (#9507)
|
||||
1
changelogs/unreleased/9516-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Fix VolumeGroupSnapshot restore failure with Ceph RBD CSI driver by creating stub VolumeGroupSnapshotContent during restore and looking up VolumeSnapshotClass by driver for credential support
|
||||
1
changelogs/unreleased/9528-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Add block data mover design for block level incremental backup by integrating with Kubernetes CBT
|
||||
1
changelogs/unreleased/9532-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9343, include PV topology to data mover pod affinities
|
||||
1
changelogs/unreleased/9533-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9496, support customized host os
|
||||
1
changelogs/unreleased/9540-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Add custom action type to volume policies
|
||||
1
changelogs/unreleased/9547-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
If BIA return updateObj with SkipFromBackupAnnotation, treat it as skip the resource from backup.
|
||||
1
changelogs/unreleased/9554-testsabirweb
Normal file
@@ -0,0 +1 @@
|
||||
Issue #9544: Add test coverage for S3 bucket name in MRAP ARN notation and fix bucket validation to accept ARN format
|
||||
1
changelogs/unreleased/9560-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9475, use node-selector instead of nodName for generic restore
|
||||
1
changelogs/unreleased/9561-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9460, flush buffer before data mover completes
|
||||
1
changelogs/unreleased/9570-H-M-Quang-Ngo
Normal file
@@ -0,0 +1 @@
|
||||
Add schedule_expected_interval_seconds metric for dynamic backup alerting thresholds (#9559)
|
||||
1
changelogs/unreleased/9574-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Add ephemeral storage limit and request support for data mover and maintenance job
|
||||
1
changelogs/unreleased/9581-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Fix DBR stuck when CSI snapshot no longer exists in cloud provider
|
||||
1
changelogs/unreleased/9614-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Add check for file extraction from tarball.
|
||||
1
changelogs/unreleased/9628-priyansh17
Normal file
@@ -0,0 +1 @@
|
||||
Implement original VolumeSnapshotContent deletion for legacy backups
|
||||
1
changelogs/unreleased/9634-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9626, let go for uninitialized repo under readonly mode
|
||||
1
changelogs/unreleased/9638-adam-jian-zhang
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9636, fix configmap lookup in non-default namespaces
|
||||
1
changelogs/unreleased/9643-priyansh17
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9641, Remove redundant ReadyToUse polling in CSI VolumeSnapshotContent delete plugin
|
||||
1
changelogs/unreleased/9653-BassinD
Normal file
@@ -0,0 +1 @@
|
||||
Fix service restore with null healthCheckNodePort in last-applied-configuration label
|
||||
1
changelogs/unreleased/9663-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9659, in the case that PVB/PVR/DU/DD is cancelled before the data path is really started, call EndEvent to prevent data mover pod from crashing because of delay event distribution
|
||||
1
changelogs/unreleased/9668-adam-jian-zhang
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9666, fix node-agent node detection in multiple instances scenario
|
||||
1
changelogs/unreleased/9676-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9470, remove restic from repository
|
||||
1
changelogs/unreleased/9677-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9469, remove restic for uploader
|
||||
1
changelogs/unreleased/9682-adam-jian-zhang
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9681, fix restores and podvolumerestores list options to only list in installed namespace
|
||||
1
changelogs/unreleased/9683-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9428, increase repo maintenance history queue length from 3 to 25
|
||||
1
changelogs/unreleased/9693-priyansh17
Normal file
@@ -0,0 +1 @@
|
||||
Enhance backup deletion logic to handle tarball download failures
|
||||
1
changelogs/unreleased/9695-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Bump external-snapshotter to v8.4.0 and migrate VolumeGroupSnapshot API from v1beta1 to v1beta2 for Kubernetes 1.34+ compatibility
|
||||
1
changelogs/unreleased/9700-priyansh17
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9699, add a 2-second gap between temporary CSI VolumeSnapshotContent create and delete operations
|
||||
1
changelogs/unreleased/9701-emirot
Normal file
@@ -0,0 +1 @@
|
||||
Update Debian base image from bookworm to trixie
|
||||
1
changelogs/unreleased/9704-adam-jian-zhang
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9703, fix CSI PVC Backup Plugin list options to only list in installed namespace
|
||||
1
changelogs/unreleased/9705-emirot
Normal file
@@ -0,0 +1 @@
|
||||
perf: better string concatenation
|
||||
1
changelogs/unreleased/9724-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9723, extend Unified Repo Interface to support block uploader
|
||||
1
changelogs/unreleased/9728-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Remove Restic build from Dockerfile, Makefile and Tiltfile.
|
||||
@@ -69,9 +69,7 @@ spec:
|
||||
- ""
|
||||
type: string
|
||||
resticIdentifier:
|
||||
description: |-
|
||||
ResticIdentifier is the full restic-compatible string for identifying
|
||||
this repository. This field is only used when RepositoryType is "restic".
|
||||
description: Deprecated
|
||||
type: string
|
||||
volumeNamespace:
|
||||
description: |-
|
||||
|
||||
BIN
design/block-data-mover/backup-architecture.png
Normal file
|
After Width: | Height: | Size: 498 KiB |
551
design/block-data-mover/block-data-mover.md
Normal file
@@ -0,0 +1,551 @@
|
||||
# Block Data Mover Design
|
||||
|
||||
## Glossary & Abbreviation
|
||||
|
||||
**Backup Storage**: The storage to store the backup data. Check [Unified Repository design][1] for details.
|
||||
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features that is introduced in [Unified Repository design][1].
|
||||
**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository.
|
||||
**Velero Built-in Data Mover (VBDM)**: VBDM, which is introduced in [Volume Snapshot Data Movement design][2] and [Unified Repository design][1], is the built-in data mover shipped along with Velero, it includes Velero data mover controllers and VGDP.
|
||||
**Data Mover Pods**: Intermediate pods which hold VGDP and complete the data transfer. See [VGDP Micro Service for Volume Snapshot Data Movement][3] for details.
|
||||
**Change Block Tracking (CBT)**: CBT is the mechanism to track changed blocks, so that backups could back up the changed data only. CBT usually provides by the computing/storage platform.
|
||||
**TCO**: Total Cost of Ownership. This is a general criteria for products/solutions, but also means a lot for BR solutions. For example, this means what kind of backup storage (and its cost) it requires, the retention policy of backup copies, the ways to remove backup data redundancy, etc.
|
||||
**PodVolume Backup**: This is the Velero backup method which accesses the data from live file system, see [Kopia Integration design][1] for how it works.
|
||||
**CAOS and CABS**: Content-Addressable Object Storage and Content-Addressable Block Storage, they are the parts from Kopia repository, see [Kopia Architecture][5].
|
||||
|
||||
## Background
|
||||
Kubernetes supports two kinds of volume mode, `FileSystem` and `Block`, for persistent volumes. Underlyingly, the storage could use a block storage to provision either `FileSystem` mode or `Block` mode volumes; and the storage could use a file storage to provision `FileSystem` mode volumes.
|
||||
For volumes provisioned by block storage, they could be backed up/restored from the block level, regardless the volume mode of the persistent volume.
|
||||
On the other hand, as long as the data could be accessed from the file system, a backup/restore could be conducted from the file system level. That is to say `FileSystem` mode volumes could be backed up/restored from the file system level, regardless of the backend storage type.
|
||||
Then if a `FileSystem` mode volume is provisioned by a block storage, the volume could be backed up/restored either from the file system level or block level.
|
||||
|
||||
For Velero, [CSI Snapshot Data Movement][2] which is implemented by VBDM, ships a file system uploader, so the backup/restore is done from file system only.
|
||||
|
||||
Once possible, block level backup/restore is better than file system level backup/restore:
|
||||
- Block level backup could leverage CBT to process minimal size of data, so it significantly reduces the overhead to network, backup repository and backup storage. As a result, TCO is significantly reduced.
|
||||
- Block level backup/restore is performant in throughput and resource consumption, because it doesn't need to handle the complexity of the file system, especially for the case that huge number of small files in the file system.
|
||||
- Block level backup/restore is less OS dependent because the uploader doesn't need the OS to be aware of the file system in the volume.
|
||||
|
||||
At present, [Kubernetes CBT API][4] is mature and close to Beta stage. Many platform/storage has supported/is going to support it.
|
||||
|
||||
Therefore, it is very important for Velero to deliver the block level backup/restore and recommend users to use it over the file system data mover as long as:
|
||||
- The volume is backed by block storage so block level access is possible
|
||||
- The platform supports CBT
|
||||
|
||||
Meanwhile, file system level backup/restore is still valuable for below scenarios:
|
||||
- The volume is backed by file storage, e.g., AWS EFS, Azure File, CephFS, VKS File Volume, etc.
|
||||
- The volume is backed by block storage but CBT is not available
|
||||
- The volume doesn't support CSI snapshot, so Velero PodVolume Backup method is used
|
||||
|
||||
There are rich features delivered with VGDP, VBDM and [VGDP micro service][3], to reuse these features, block data mover should be built based on these modules.
|
||||
|
||||
Velero VBDM supports linux and Windows nodes, however, Windows container doesn't support block mode volumes, so backing up/restoring from Windows nodes is not supported until Windows container removes this limitation. As a result, if there are both linux and Windows nodes in the cluster, block data mover can only run in linux nodes.
|
||||
|
||||
Both the Kubernetes CBT service and Velero work in the boundary of the cluster, even though the backend storage may be shared by multiple clusters, Velero can only protection workloads in the same cluster where it is running.
|
||||
|
||||
## Goals
|
||||
|
||||
Add a block data mover to VBDM and support block level backup/restore for [CSI Snapshot Data Movement][2], which includes:
|
||||
- Support block level full backup for both `FileSystem` and `Block` mode volumes
|
||||
- Support block level incremental backup for both `FileSystem` and `Block` mode volumes
|
||||
- Support block level restore from full/incremental backup for both `FileSystem` and `Block` mode volumes
|
||||
- Support block level backup/restore for both linux and Windows workloads from linux cluster nodes
|
||||
- Support all existing features, i.e., load concurrency, node selection, cache volume, deduplication, compression, encryption, etc. for the block data mover
|
||||
- Support volumes processed from file system level and block level in the same backup/restore
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- PodVolume Backup does the backup/restore from file system level only, so block level backup/restore is not supported
|
||||
- Volumes that are backed by file system storages, can only be backed up/restored from file system level, so block level backup/restore is not supported
|
||||
- Backing up/restoring from Windows nodes is not supported
|
||||
- Block level incremental backup requires special capabilities of the backup repository, and Velero [Unified Repository][1] supports multiple kinds of backup repositories. The current design focus on Kopia repository only, block level incremental backup support of other repositories will be considered when the specific backup repository is integrated to [Velero Unified Repository][1]
|
||||
|
||||
## Architecture
|
||||
|
||||
### Data Path
|
||||
|
||||
Below shows the architecture of VGDP when integrating to Unified Repository (implemented by Kopia repository).
|
||||
A new block data mover will be added besides the existing file system data mover, the both data movers read/write data from/to the same backup repository through Unified Repo interface.
|
||||
Unified Repo interface and the backup repository needs to be enhanced to support incremental backups.
|
||||
|
||||

|
||||
|
||||
For more details of VGDP architecture, see [Unified Repository design][1], [Volume Snapshot Data Movement design][2] and [VGDP Micro Service for Volume Snapshot Data Movement][3].
|
||||
|
||||
### Backup
|
||||
|
||||
Below is the architecture for block data mover backup which is developed based on the existing VBDM:
|
||||
|
||||

|
||||
|
||||
The existing VBDM is reused, below are the major changes based on the existing VBDM:
|
||||
**Exposer**: Exposer needs to create block mode backupPVC all the time regardless of the sourcePVC mode.
|
||||
**CBT**: This is a new layer to retrieve, transform and store the changed blocks, it interacts with CSI SnapshotMetadataService through gRPC.
|
||||
**Uploader**: A new block uploader is added. It interacts with CBT layer, holds special logics to make performant data read from block devices and holds special logics to write incremental data to Unified Repository.
|
||||
**Extended Kopia repo**: A new Incremental Aware Object Extension is added to Kopia's CAOS, so as to support incremental data write. Other parts of Kopia repository, including the existing CAOS and CABS, are not changed.
|
||||
|
||||
### Restore
|
||||
|
||||
Below is architecture for block data mover restore which is developed based on the existing VBDM:
|
||||
|
||||

|
||||
|
||||
The existing VBDM is reused, below are the major changes based on the existing VBDM:
|
||||
**Exposer**: While the restorePV is in block mode, exposer needs to rebind the restorePV to a targetPVC in either file system mode or block mode.
|
||||
**Uploader**: The same block uploader holds special logics to make performant data write to block devices and holds special logics to read data from the backup chain in Unified repository.
|
||||
|
||||
For more details of VBDM, see [Volume Snapshot Data Movement design][2].
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Selectable Data Mover Type
|
||||
|
||||
#### Per Backup Selection
|
||||
At present, the backup accepts a `DataMover` parameter and when its value is empty or `velero`, VBDM is used.
|
||||
After block data mover is introduced, VBDM will have two types of data movers, Velero file system data mover and Velero block data mover.
|
||||
A new type string `velero-block` is introduced for Velero block data mover, that is, when `DataMover` is set as `velero-block`, Velero block data mover is used.
|
||||
Another new value `velero-fs` is introduced for Velero file system data mover, that is, when `DataMover` is set as `velero-fs`, Velero file system data mover is used.
|
||||
For backwards compatibility consideration, `velero` is preserved a valid value, it refers to the default data mover, and the default data mover may change among releases. At present, Velero file system data mover is the default data mover; we can change the default one to Velero block data mover in future releases.
|
||||
|
||||
#### Volume Policy
|
||||
It is a valid case that users have multiple volumes in a single backup, while they want to use Velero file system data mover for some of the volumes and use Velero block data mover for some others.
|
||||
To meet this requirement, a combined solution of Per Backup Selection and Volume Policy is used.
|
||||
|
||||
Here are the data structs for VolumePolicy:
|
||||
```go
|
||||
type volPolicy struct {
|
||||
action Action
|
||||
conditions []volumeCondition
|
||||
}
|
||||
|
||||
type volumeCondition interface {
|
||||
match(v *structuredVolume) bool
|
||||
validate() error
|
||||
}
|
||||
|
||||
type structuredVolume struct {
|
||||
capacity resource.Quantity
|
||||
storageClass string
|
||||
nfs *nFSVolumeSource
|
||||
csi *csiVolumeSource
|
||||
volumeType SupportedVolume
|
||||
pvcLabels map[string]string
|
||||
pvcPhase string
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Type VolumeActionType `yaml:"type"`
|
||||
Parameters map[string]any `yaml:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
ConfigmapRefType string = "configmap"
|
||||
Skip VolumeActionType = "skip"
|
||||
FSBackup VolumeActionType = "fs-backup"
|
||||
Snapshot VolumeActionType = "snapshot"
|
||||
)
|
||||
```
|
||||
|
||||
`action.parameters` is used to provide extra information of the action. This is an ideal place to differentiate Velero file system data mover and Velero block data mover.
|
||||
Therefore, Velero built-in data mover will support `dataMover` key in `parameters`, with the value either `velero-fs` or `velero-block`. While `velero-fs` and `velero-block` are with the same meaning with Per Backup Selection.
|
||||
|
||||
As an example, here is how a user might use both `velero-block` and `velero-fs` in a single backup:
|
||||
- Users set `DataMover` parameter for the backup as `velero-block`
|
||||
- Users add a record into Volume Policy, make `conditions` to filter the volumes they want to backup through Velero file system data mover, make `action.type` as `snapshot` and insert a record into `action.parameter` as `dataMover:velero-fs`
|
||||
|
||||
In this way, all volumes matched by `conditions` will be backed up with Velero file system data mover; while the others will fallback to the per backup method Velero block data mover.
|
||||
|
||||
Vice versa, users could set the per backup method as file system data mover and select volumes for Velero block data mover.
|
||||
|
||||
The selected data mover for each volume should be recorded to `volumeInfo.json`.
|
||||
|
||||
### Controllers
|
||||
Backup controller and Restore controller are kept as is, async operations are still used to interact with VBDM with block data mover.
|
||||
DataUpload controller and DataDownload controller are almost kept as is, with some minor changes to handle the data mover type and backup type appropriately and convey it to the exposers. With [VGDP Micro Service][3], the controllers are almost isolated from VGDP, so no major changes are required.
|
||||
|
||||
### Exposer
|
||||
|
||||
#### CSI Snapshot Exposer
|
||||
The existing CSI Snapshot Exposer is reused with some changes to decide the backupPVC volume mode by access mode. Specifically, for Velero block data mover, access mode is always `Block`, so the backupPVC volume mode is always `Block`.
|
||||
Once the backupPVC is created with correct volume mode, the existing code could create the backupPod and mount the backupPVC appropriately.
|
||||
|
||||
#### Generic Restore Exposer
|
||||
The existing Generic Restore Exposer is reused, but the workflow needs some changes.
|
||||
For block data mover, the restorePV is in Block mode all the time, whereas, the targetPVC may be in either file system mode or block mode.
|
||||
However, Kubernetes doesn't allow to bound a PV to a PVC with mismatch volume mode.
|
||||
|
||||
Therefore, the workflow of ***Finish Volume Readiness*** as introduced in [Volume Snapshot Data Movement design][2] is changed as below:
|
||||
- When restore completes and restorePV is created, set restorePV's `deletionPolicy` to `Retain`
|
||||
- Create another rebindPV and copy restorePV's `volumeHandle` but the `volumeMode` matches to the targetPVC
|
||||
- Delete restorePV
|
||||
- Set the rebindPV's claim reference (the ```claimRef``` filed) to targetPVC
|
||||
- Add the ```velero.io/dynamic-pv-restore``` label to the rebindPV
|
||||
|
||||
In this way, the targetPVC will be bound immediately by Kubernetes to rebindPV.
|
||||
|
||||
These changes work for file system data mover as well, so the old workflow will be replaced, only the new workflow is kept.
|
||||
|
||||
### VGDP
|
||||
|
||||
Below is the VGDP workflow during backup:
|
||||
|
||||

|
||||
|
||||
Below is the VGDP workflow during restore:
|
||||
|
||||

|
||||
|
||||
#### Unified Repo
|
||||
For block data mover, one Unified Repo Object is created for each volume, and some metadata is also saved into Unified Repo to describe the volume.
|
||||
During the backup, the write conducts a skippable-write manner:
|
||||
- For the data range that the write does not skip, object is written with the real data
|
||||
- For the data range that is skipped, the data is either filled as ZERO or cloned from the parent object. Specifically, for a full backup, data is filled as ZERO; for an incremental backup, data is cloned from the parent object
|
||||
|
||||
To support incremental backup, `ObjectWriter` interface needs to extend to support `io.WriterAt`, so that uploader could conduct a skippable-write manner:
|
||||
```go
|
||||
type ObjectWriter interface {
|
||||
io.WriteCloser
|
||||
io.WriterAt
|
||||
|
||||
// Seeker is used in the cases that the object is not written sequentially
|
||||
io.Seeker
|
||||
|
||||
// Checkpoint is periodically called to preserve the state of data written to the repo so far.
|
||||
// Checkpoint returns a unified identifier that represent the current state.
|
||||
// An empty ID could be returned on success if the backup repository doesn't support this.
|
||||
Checkpoint() (ID, error)
|
||||
|
||||
// Result waits for the completion of the object write.
|
||||
// Result returns the object's unified identifier after the write completes.
|
||||
Result() (ID, error)
|
||||
}
|
||||
```
|
||||
|
||||
To clone data from parent object, the caller needs to specify the parent object. To support this, `ObjectWriteOptions` is extended with `ParentObject`.
|
||||
The existing `AccessMode` could be used to indicate the data access type, either file system or block:
|
||||
|
||||
```go
|
||||
// ObjectWriteOptions defines the options when creating an object for write
|
||||
type ObjectWriteOptions struct {
|
||||
FullPath string // Full logical path of the object
|
||||
DataType int // OBJECT_DATA_TYPE_*
|
||||
Description string // A description of the object, could be empty
|
||||
Prefix ID // A prefix of the name used to save the object
|
||||
AccessMode int // OBJECT_DATA_ACCESS_*
|
||||
BackupMode int // OBJECT_DATA_BACKUP_*
|
||||
AsyncWrites int // Num of async writes for the object, 0 means no async write
|
||||
ParentObject ID // the parent object based on which incremental write will be done
|
||||
}
|
||||
```
|
||||
|
||||
To support non-Kopia uploader to save snapshots to Unified Repo, snapshot related methods will be added to `BackupRepo` interface:
|
||||
```go
|
||||
// SaveSnapshot saves a repo snapshot
|
||||
SaveSnapshot(ctx context.Context, snapshot Snapshot) (ID, error)
|
||||
|
||||
// GetSnapshot returns a repo snapshot from snapshot ID
|
||||
GetSnapshot(ctx context.Context, id ID) (Snapshot, error)
|
||||
|
||||
// DeleteSnapshot deletes a repo snapshot
|
||||
DeleteSnapshot(ctx context.Context, id ID) error
|
||||
|
||||
// ListSnapshot lists all snapshots in repo for the given source (if specified)
|
||||
ListSnapshot(ctx context.Context, source string) ([]Snapshot, error)
|
||||
```
|
||||
|
||||
To support non-Kopia uploader to save metadata, which is used to describe the backed up objects, some metadata related methods will be added to `BackupRepo` interface:
|
||||
```go
|
||||
// WriteMetadata writes metadata to the repo, metadata is used to describe data, e.g., file system
|
||||
// dirs are saved as metadata
|
||||
WriteMetadata(ctx context.Context, meta *Metadata, opt ObjectWriteOptions) (ID, error)
|
||||
|
||||
// ReadMetadata reads a metadata from repo by the metadata's object ID
|
||||
ReadMetadata(ctx context.Context, id ID) (*Metadata, error)
|
||||
```
|
||||
|
||||
kopia-lib for Unified Repo will implement these interfaces by calling the corresponding Kopia repository functions.
|
||||
|
||||
### Kopia Repository
|
||||
CAOS of Kopia repository implements Unified Repo's Objects. However, CAOS supports full and sequential write only.
|
||||
To make it support skippable write, a new Incremental Aware Object Extension is created based on the existing CAOS.
|
||||
|
||||
#### Block Address Table
|
||||
Kopia CAOS uses Block Address Table (BAT) to track objects. It will be reused for both full backups and incremental backups.
|
||||
|
||||

|
||||
|
||||
For Incremental Aware Object Extension, one object represents one volume.
|
||||
For full backup, the skipped areas will be written as all ZERO by Incremental Aware Object Extension, since Kopia repository's interface doesn't support skippable write. But it is fine, the ZERO data will be deduplicated by Kopia repository so nothing is actually written to the backup storage.
|
||||
For incremental backup, Incremental Aware Object Extension clones the table entries from the parent object for the skipped areas; for the written area, Incremental Aware Object Extension writes the data to Kopia repository and generate new entries. Finally, Incremental Aware Object Extension generates a new block address table for the incremental object which covers its entire logical space.
|
||||
|
||||
Incremental Aware Object Extension is automatically activated for block mode data access as set by `AccessMode` of `ObjectWriteOptions`.
|
||||
|
||||
#### Deduplication
|
||||
The Incremental Aware Object Extension uses fix-sized splitter for deduplication, this is good enough for block level backup, reasons:
|
||||
- Not like a file, a disk write never inserts data to the middle of the disk, it only does in-place update or append. So the data never shifts between two disks or the same disk of two different backups
|
||||
- File system IO to disk general aligned to a specific size, e.g., 4KB for NTFS and ext4, as long as the chunk size is a multiply of this size, it effectively reduces the case that one IO kills two deduplication chunks
|
||||
- For the usage cases that the disk is used as raw block device without a file system, the IO is still conducted by aligning to a specific boundary
|
||||
|
||||
The chunk size is intentionally chosen as 1MB, reasons:
|
||||
- 1MB is a multiply of 4KB for file systems or common block sizes for raw block device usages
|
||||
- 1MB is the start boundary of partitions for modern operating systems, for both MBR and GPT, so partition metadata could be isolated to a separate chunk
|
||||
- The more chunks are there, the more indexes in the repository, 1MB is a moderate value regarding to the overhead of indexes for Kopia repository
|
||||
|
||||
#### Benefits
|
||||
Since the existing block address table(BAT) of CAOS is reused and kept as is, it brings below benefits:
|
||||
- All the entries are still managed by Kopia CAOS, so Velero doesn't need to keep an extra data
|
||||
- The objects written by Velero block uploader is still recognizable by Kopia, for both full backup and incremental backup
|
||||
- The existing data management in Kopia repository still works for objects generated by Velero block uploader, e.g., snapshot GC, repository maintenance, etc.
|
||||
|
||||
Most importantly, this solution is super performant:
|
||||
- During incremental write, it doesn't copy any data from the parent object, instead, it only clones object block address entries
|
||||
- During backup deletion, it doesn't need to move any data, it only deletes the BAT for the object
|
||||
|
||||
#### Uploader behavior
|
||||
The block uploader's skippable write must also be aligned to this 1MB boundary, because Incremental Aware Object Extension needs to clone the entries that have been skipped from the parent object.
|
||||
File system uploader is still using variable-sized deduplication, it is fine to keep data from the two uploaders into the same Kopia repository, though normally they won't be mutually deduplicated.
|
||||
Volume could be resized; and volume size may not be aligned to 1MB boundary. The uploader need to handle the resize appropriately since Incremental Aware Object Extension cannot copy a BAT entry partially.
|
||||
|
||||
#### CBT Layer
|
||||
CBT provides below functionalities:
|
||||
1. For a full backup, it provides the allocated data ranges. E.g., for a 1TB volume, there may be only 1MB of files, with this functionality, the uploader could skip the ranges without real data
|
||||
2. For an incremental backup, it provides the changed data ranges based on the provided parent snapshot. In this way, the uploader could skip the unchanged data and achieves an incremental backup
|
||||
|
||||
For case 1, the uploader calls Unified Repo Object's `WriteAt` method with the offset for the allocated data, ranges ahead of the offset will be filled as ZERO by unified repository.
|
||||
For case 2, the uploader calls Unified Repo Object's `WriteAt` method with the offset for the changed data, ranges ahead of the offset will be cloned from the parent object unified repository.
|
||||
|
||||
A changeId is stored with each backup, the next backup will retrieve the parent snapshot's changeId and use it to retrieve the CBT.
|
||||
|
||||
The CBT retrieved from Kubernetes API are a list of `BlockMetadata`, each of range could be with fixed size or variable size.
|
||||
Block uploader needs to maintain its own granularity that is friendly to its backup repository and uploader, as mentioned above.
|
||||
|
||||
From Kubernetes API, `GetMetadataAllocated` or `GetMetadataDelta` are called looply until all `BlockMetadata` are retrieved.
|
||||
On the other hand, considering the complexity in uploader, e.g., multiple stream between read and write, the workflow should be driven by the uploader instead of the CBT iterator, therefore, in practice, all the allocated/changed blocks should be retrieved and preserved before passing it to the uploader.
|
||||
|
||||
As another fact, directly saving `BlockMetadata` list will be memory consuming.
|
||||
|
||||
With all the above considerations, the `Bitmap` data structure is used to save the allocated/changed blocks, calling CBT Bitmap.
|
||||
CBT Bitmap chunk size could be set as 1MB or a multiply of it, but a larger chunk size would amplify the backup size, so 1MB size will be use.
|
||||
|
||||
Finally, interactions among CSI Snapshot Metadata Service, CBT Layer and Uploader is like below:
|
||||
|
||||

|
||||
|
||||
In this way, CBT layer and uploader are decoupled and CBT bitmap plays as a north bound parameter of the uploader.
|
||||
|
||||
#### Block Uploader
|
||||
Block uploader consists of the reader and writer which are running asynchronously.
|
||||
During backup, reader reads data from the block device and also refers to CBT Bitmap for allocated/changed blocks; writer writes data to the Unified Repo.
|
||||
During restore, reader reads data from the Unified Repo; writer writes data to the block device.
|
||||
|
||||
Reader and writer connects by a ring buffer, that is, reader pushes the block data to the ring buffer and writer gets data from the ring buffer and write to the target.
|
||||
|
||||
To improve performance, block device is opened with direct IO, so that no data is going through the system cache unnecessarily.
|
||||
|
||||
During restore, to optimize the write throughput and storage usage, zero blocks should be either skipped (for restoring to a new volume) or unmapped (for restoring to an existing volume). To cover the both cases in a unified way, the SCSI command `WRITE_SAME` is used. Logics are as below:
|
||||
- Detect if a block read from the backup is with all zero data
|
||||
- If true, the uploader sends `WRITE_SAME` SCSI command by calling `BLKZEROOUT` ioctl
|
||||
- If the call fails, the uploader fallbaks to use the conservative way to write all zero bytes to the disk
|
||||
|
||||
Uploader implementation is OS dependent, but since Windows container doesn't support block volumes, the current implementation is for linux only.
|
||||
|
||||
#### ChangeId
|
||||
ChangeId identifies the base that CBT is generated from, it must strictly map to the parent snapshot in the repository. Otherwise, there will be data corruption in the incremental backup.
|
||||
Therefore, ChangeId is saved together with the repository snapshot.
|
||||
The data mover always queries parent snapshot from Unified Repo together with the ChangeId. In this way, no mismatch would happen.
|
||||
Inside the uploader, the upper layer (DataUpload controller) could also provide the ChangeId as a mechanism of double confirmation. The received ChangeId would be re-evaluated against the one in the provided snapshot.
|
||||
|
||||
For Kubernetes API, changeId is represented by `BaseSnapshotId`.
|
||||
changeId retrieval is storage specific, generally, it is retrieved from the `SnapshotHandle` of the VolumeSnapshotContent object; however, storages may also refer to other places to retrieve the changeId.
|
||||
That is, `SnapshotHandle` and changeId may be two different values, in this case, the both values need to be preserved.
|
||||
|
||||
#### Volume Snapshot Retention
|
||||
Storages/CSI drivers may support the changeId differently based on the storage's capabilities:
|
||||
1. In order to calculate the changes, some storages require the parent snapshot mapping to the changeId always exists at the time of `GetMetadataDelta` is called, then the parent snapshot can NOT be deleted as long as there are incremental backups based on it.
|
||||
2. Some storages don't require the parent snapshot itself at the time of calculating changes, then parent snapshot could be deleted immediately after the parent backup completes.
|
||||
|
||||
The existing exposer works perfectly with Case 1, that is, the snapshot is always deleted when the backup completes.
|
||||
However, for Case 2, since the snapshot must be retained, the exposer needs changes as below:
|
||||
- At the end of each backup, keep the current VolumeSnapshot's `deletionPolicy` as `Retain`, then when the VolumeSnapshot is deleted at the end of the backup, the current snapshot is retained in the storage
|
||||
- `GetMetadataDelta` is called with `BaseSnapshotId` set as the preserved changeId
|
||||
- When deleting a backup, a VolumeSnapshot-VolumeSnapshotContent pair is rebuilt with `deletionPolicy` as `delete` and `snapshotHandle` as the preserved one
|
||||
- Then the rebuilt VolumeSnapshot is deleted so that the volume snapshot is deleted from the storage
|
||||
|
||||
There is no way to automatically detect which way a specific volume support, so an interface is exposed to users to set the volume snapshot retention method.
|
||||
The interface could be added to the `Action.Parameters` of Volume Policy. By default, Velero block data mover takes Way 1, so volume snapshot is never retained; if users specify `RetainSnapshot` parameter, Way 2 will be taken.
|
||||
```go
|
||||
type Action struct {
|
||||
Type VolumeActionType `yaml:"type"`
|
||||
Parameters map[string]any `yaml:"parameters,omitempty"`
|
||||
}
|
||||
```
|
||||
In this way, users could specify --- for storage class "xxx" or CSI driver "yyy", backup through CSI snapshot with Velero block data mover and retain the snapshot.
|
||||
|
||||
#### Incremental Size
|
||||
By the end of the backup, incremental size is also returned by the uploader, as same as Velero file system uploader. The size indicates how much data are unique so processed by the uploader, based on the provided CBT.
|
||||
|
||||
### Fallback to Full Backup
|
||||
There are some occasions that the incremental backup won't continue, so the data mover fallbacks to full backup:
|
||||
- `GetMetadataAllocated` or `GetMetadataDelta` returns error
|
||||
- ChangeId is missing
|
||||
- Parent snapshot is missing
|
||||
|
||||
When the fallback happens, the volume will be fully backed up from block level, but since because of the data deduplication from the backup repository, the unallocated/unchanged data would be probably deduplicated.
|
||||
During restore, the volume will also be fully restored. The zero blocks handling as mentioned above is still working, so that write IO for unallocated data would be probably eliminated.
|
||||
|
||||
Fallback is to handle the exceptional cases, for most of the backups/restores, fallback is never expected.
|
||||
|
||||
### Irregular Volume Size
|
||||
As mentioned above, during incremental backup, block uploader IO should be restricted to be aligned to the deduplication chunk size (1MB); on the other hand, there is no hard limit for users' volume size to be aligned.
|
||||
To support volumes with irregular size, below measures are taken:
|
||||
- Volume objects in the repository is always aligned to 1MB
|
||||
- If the volume size is irregular, zero bytes will be padded to the tail of the volume object
|
||||
- A real size is recorded in the repository snapshot
|
||||
- During restore, the real size of data is restored
|
||||
|
||||
The padding must be always with zero bytes.
|
||||
|
||||
### Volume Size Change
|
||||
Incremental backup could continue when volume is resized.
|
||||
Block uploader supports to write disk with arbitrary size.
|
||||
The volume resize cases don't need to be handled case by case.
|
||||
|
||||
Instead, when volume resize happens, block uploader needs to handle it appropriately in below ways:
|
||||
- Loop with CBT
|
||||
- Read data between RoundDownTo1M(newSize) and newSize to get the tail data
|
||||
- If there is no tail data, which means the volume size is aligned to 1MB, then call `WriteAt(newSize, nil)`
|
||||
- Otherwise, call `WriteAt(RoundDownTo1M(newSize), taildata)`, `taildata` is also padded to 1MB
|
||||
|
||||
That is to say:
|
||||
- If CBT covers the tail of the volume, loop with CBT is enough for both shrink and expand case
|
||||
- Otherwise, if volume is expanded, `WriteAt` guarantees to clone appropriate objects entries from the parent object and append zero data for the expanded areas. Particularly, if the parent volume is not in regular size, the zero padding bytes is also reused. Therefore, the parent object's padding bytes must be zero
|
||||
- In the case the volume is shrunk, writing the tail data makes sure zero bytes are padding to the new volume object instead of inheriting non-zero data from the parent object
|
||||
|
||||
### Cancellation
|
||||
The existing Cancellation mechanism is reused, so there is no change outside of the block uploader.
|
||||
Inside the uploader, cancellation checkpoints are embedded to the uploader reader and writer, so that the execution could quit in a reasonable time once cancellation happens.
|
||||
|
||||
### Parallelism
|
||||
Parallelism among data movers will reuse the existing mechanism --- load concurrency.
|
||||
Inside the data mover, uploader reader and writer are always running in parallel. The number of reader and writer is always 1.
|
||||
Sequential read/write of the volume is always optimized, there is no prove that multiple readers/writers are beneficial.
|
||||
|
||||
### Progress Report
|
||||
Progress report outside of the data mover will reuse the existing mechanism.
|
||||
Inside the data mover, progress update is embedded to the uploader writer.
|
||||
The progress struct is kept as is, Velero block data mover still supports `TotalBytes` and `BytesDone`:
|
||||
```go
|
||||
type Progress struct {
|
||||
TotalBytes int64 `json:"totalBytes,omitempty"`
|
||||
BytesDone int64 `json:"doneBytes,omitempty"`
|
||||
}
|
||||
```
|
||||
By the end of the backup, the progress for block data mover provides the same `GetIncrementalSize` which reports the incremental size of the backup, so that the incremental size is reported to users in the same way as the file system data mover.
|
||||
|
||||
### Selectable Backup Type
|
||||
For many reasons, a periodical full backup is required:
|
||||
- From user experience, a periodical full is required to make sure the data integrity among the incremental backups, e.g., every 1 week or 1 month
|
||||
|
||||
Therefore, backup type (full/incremental) should be supported in Velero's manual backup and backup schedule.
|
||||
Backup type will also be added to `volumeInfo.json` to support observability purposes.
|
||||
|
||||
Backup TTL is still used for users to specify a backup's retention time. By default, both full and incremental backups are with 30 days retention, even though this is not so reasonable for the full backups. This could be enhanced when Velero supports sophisticated retention policy.
|
||||
As a workaround, users could create two schedules for the same scope of backup, one is for full backups, with less frequency and longer backup TTL; the other one is for incremental backups, with normal frequency and shorter backup TTL.
|
||||
|
||||
#### File System Data Mover
|
||||
At present, Velero file system data mover doesn't support selectable backup type, instead, incremental backups are always conducted once possible.
|
||||
From user experience this is not reasonable.
|
||||
|
||||
Therefore, to solve this problem and to make it align with Velero block data mover, Velero file system data mover will support backup type as well.
|
||||
|
||||
At present, the data path for Velero file system data mover has already supported it, we only need to expose this functionality to users.
|
||||
|
||||
### Backup Describe
|
||||
Backup type should be added to backup description, there are two appearances:
|
||||
- The `backupType` in the Backup CR. This is the selected backup type by users
|
||||
- The backup type recorded in `volumeInfo.json`, which is the actual type taken by the backup
|
||||
With these two values, users are able to know the actual backup type and also whether a fallback happens.
|
||||
|
||||
The `DataMover` item in the existing backup description should be updated to reflect the actual data mover completing the backup, this information could be retrieved from `volumeInfo.json`.
|
||||
|
||||
### Backup Sync
|
||||
No more data is required for sync, so Backup Sync is kept as is.
|
||||
|
||||
### Backup Deletion
|
||||
As mentioned above, no data is moved when deleting a repo snapshot for Velero block data mover, so Backup Deletion is kept as is regarding to repo snapshot; and for volume snapshot retention case, backup deletion logics will be modified accordingly to delete the retained snapshots.
|
||||
|
||||
### Restarts
|
||||
Restarts mechanism is reused without any change.
|
||||
|
||||
### Logging
|
||||
Logging mechanism is not changed.
|
||||
|
||||
### Backup CRD
|
||||
A `backupType` field is added to Backup CRD, two values are supported `full` or `incremental`.
|
||||
`full` indicates the data mover to take a full backup.
|
||||
`incremental` which is the default value, indicates the data mover to take an incremental backup.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
description: BackupSpec defines the specification for a Velero backup.
|
||||
properties:
|
||||
backupType:
|
||||
description: BackupType indicates the type of the backup
|
||||
enum:
|
||||
- full
|
||||
- incremental
|
||||
type: string
|
||||
```
|
||||
|
||||
### DataUpload CRD
|
||||
A `parentSnapshot` field is added to the DataUpload CRD, below values are supported:
|
||||
- `""`: it fallbacks to `auto`
|
||||
- `auto`: it means the data mover finds the recent snapshot of the same volume from Unified Repository and use it as the parent
|
||||
- `none`: it means the data mover is not assigned with a parent snapshot, so it runs a full backup
|
||||
- a specific snapshotID: it means the data mover use the specific snapshotID to find the parent snapshot. If it cannot be found, the data mover fallbacks to a full backup
|
||||
|
||||
The last option is for a backup plan, it will not be used for now and may be useful when Velero supports sophisticated retention policy. This means, Velero always finds the recent backup as the parent.
|
||||
|
||||
When `backupType` of the Backup is `full`, the data mover controller sets `none` to `parentSnapshot` of DataUpload.
|
||||
When `backupType` of the Backup is `incremental`, the data mover controller sets `auto` to `parentSnapshot` of DataUpload. And `""` is just kept for backwards compatibility consideration.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
description: DataUploadSpec is the specification for a DataUpload.
|
||||
properties:
|
||||
parentSnapshot:
|
||||
description: |-
|
||||
ParentSnapshot specifies the parent snapshot that current backup is based on.
|
||||
If its value is "" or "auto", the data mover finds the recent backup of the same volume as parent.
|
||||
If its value is "none", the data mover will do a full backup
|
||||
If its value is a specific snapshotID, the data mover finds the specific snapshot as parent.
|
||||
type: string
|
||||
```
|
||||
|
||||
### DataDownload CRD
|
||||
No change is required to DataDownload CRD.
|
||||
|
||||
## Plugin Data Movers
|
||||
The current design doesn't break anything for plugin data movers.
|
||||
The enhancement in VolumePolicy could also be used for plugin data movers. That is, users could select a plugin data mover through VolumePolicy as same as Velero built-in data movers.
|
||||
|
||||
## Installation
|
||||
No change to Installation.
|
||||
|
||||
## Upgrade
|
||||
No impacts to Upgrade. The new fields in the CRDs are all optional fields and have backwards compatible values.
|
||||
|
||||
## CLI
|
||||
Backup type parameter is added to Velero CLI as below:
|
||||
```
|
||||
velero backup create --full
|
||||
velero schedule create --full
|
||||
```
|
||||
When the parameter is not specified, by default, Velero goes with incremental backups.
|
||||
|
||||
|
||||
|
||||
[1]: ../Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: ../Implemented/volume-snapshot-data-movement/volume-snapshot-data-movement.md
|
||||
[3]: ../Implemented/vgdp-micro-service/vgdp-micro-service.md
|
||||
[4]: https://kubernetes.io/blog/2025/09/25/csi-changed-block-tracking/
|
||||
[5]: https://kopia.io/docs/advanced/architecture/
|
||||
BIN
design/block-data-mover/caos-extension.png
Normal file
|
After Width: | Height: | Size: 518 KiB |
BIN
design/block-data-mover/cbt.png
Normal file
|
After Width: | Height: | Size: 377 KiB |
BIN
design/block-data-mover/data-path-overview.png
Normal file
|
After Width: | Height: | Size: 389 KiB |
BIN
design/block-data-mover/restore-architecture.png
Normal file
|
After Width: | Height: | Size: 476 KiB |
BIN
design/block-data-mover/vgdp-backup.png
Normal file
|
After Width: | Height: | Size: 547 KiB |
BIN
design/block-data-mover/vgdp-restore.png
Normal file
|
After Width: | Height: | Size: 504 KiB |
93
design/custom-volume-policy-action.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Add custom volume policy action
|
||||
|
||||
## Abstract
|
||||
|
||||
Currently, velero supports 3 different volume policy actions:
|
||||
snapshot, fs-backup, and skip, which tell Velero how to handle backing
|
||||
up PVC contents. Any other policy action is not allowed. This prevents
|
||||
third party BackupItemAction (BIA) plugins which might want to perform
|
||||
different actions on PVC via defined volume policies.
|
||||
|
||||
## Background
|
||||
|
||||
An external BIA plugin that wants to back up volumes via some custom
|
||||
means (i.e. not CSI snapshots or fs-backup with kopia) is not able to
|
||||
make use of the existing volume policy API. While the plugin could use
|
||||
something like PVC annotations instead, this won't integrate with
|
||||
existing volume policies, which is desirable in case the user wants to
|
||||
specify some PVCs to use the custom plugin while leaving others using
|
||||
CSI snapshots or fs-backup.
|
||||
|
||||
## Goals
|
||||
|
||||
- Add a fourth valid volume policy action "custom"
|
||||
- Make use of the existing action parameters field to distinguish between multiple custom actions.
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Implementing custom action logic in velero repo
|
||||
|
||||
## High-Level Design
|
||||
|
||||
A new VolumeActionType with the value "custom" will be added to
|
||||
`internal/resourcepolicies`. When the action is "custom", velero will
|
||||
not perform a snapshot or use fs-backup on the PVC. If there is no
|
||||
registered plugin which implements the desired custom action, then it
|
||||
will be equivalent to the "skip" action. Since there could be
|
||||
different plugins that implement custom actions, when making the API
|
||||
call (defined below) the plugin should also pass in a partial action
|
||||
parameters map containing the portion of the map that identifies the
|
||||
custom plugin as belonging to a particular external
|
||||
implementation. For example, there might be a custom BIA that's
|
||||
looking for a `custom` volume policy action with the parameter
|
||||
`myCustomAction=true`. The volume policy action would be defined like
|
||||
this:
|
||||
|
||||
```yaml
|
||||
action:
|
||||
type: custom
|
||||
parameters:
|
||||
myCustomAction: true
|
||||
```
|
||||
|
||||
In `internal/volumehelper/volume_policy_helper.go` a new interface
|
||||
method will be added, similar to `ShouldPerformSnapshot` but it takes
|
||||
a partial parameter map as an additional input, since for custom
|
||||
actions to match, the action type must be `custom`, but also there may
|
||||
be some parameter that needs to match (to distinguish between
|
||||
different custom actions). We also want a way for the plugin to get
|
||||
the parameter map for the action. This should probably just return the
|
||||
map rather than the Action struct is under `internal`.
|
||||
|
||||
```go
|
||||
type VolumeHelper interface {
|
||||
ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error)
|
||||
ShouldPerformFSBackup(volume corev1api.Volume, pod corev1api.Pod) (bool, error)
|
||||
ShouldPerformCustomAction(obj runtime.Unstructured, groupResource schema.GroupResource, map[string]any) (bool, error)
|
||||
GetActionParameters(obj runtime.Unstructured, groupResource schema.GroupResource) (map[string]any, error)
|
||||
}
|
||||
```
|
||||
|
||||
In addition, since the VolumeHelper interface is expected to be called by external plugins, the interface (but not the implementation) should be moved from `internal/volumehelper` to `pkg/util/volumehelper`.
|
||||
|
||||
In `pkg/plugin/utils/volumehelper/volume_policy_helper.go`, a new helper func will be added which delegates to the internal volumehelper.NewVolumeHelperImplWithNamespaces
|
||||
|
||||
```go
|
||||
func NewVolumeHelper(
|
||||
volumePolicy *resourcepolicies.Policies,
|
||||
snapshotVolumes *bool,
|
||||
logger logrus.FieldLogger,
|
||||
client crclient.Client,
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
namespaces []string,
|
||||
) (VolumeHelper, error) {
|
||||
```
|
||||
|
||||
|
||||
## Alternative Considered
|
||||
|
||||
An alternate approach was to create a new server arg to allow
|
||||
user-defined parameters. That was rejected in favor of this approach,
|
||||
as the explicitly-supported "custom" option integrates more easily
|
||||
into a supportable plugin-callable API.
|
||||
46
go.mod
@@ -26,7 +26,7 @@ require (
|
||||
github.com/hashicorp/go-plugin v1.6.0
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/kopia/kopia v0.16.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0
|
||||
github.com/onsi/ginkgo/v2 v2.22.0
|
||||
github.com/onsi/gomega v1.36.1
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9
|
||||
@@ -42,10 +42,11 @@ require (
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
go.uber.org/zap v1.27.1
|
||||
golang.org/x/mod v0.30.0
|
||||
golang.org/x/oauth2 v0.33.0
|
||||
golang.org/x/text v0.31.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/sys v0.40.0
|
||||
golang.org/x/text v0.32.0
|
||||
google.golang.org/api v0.256.0
|
||||
google.golang.org/grpc v1.77.0
|
||||
google.golang.org/grpc v1.79.3
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.33.3
|
||||
@@ -63,7 +64,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cel.dev/expr v0.25.1 // indirect
|
||||
cloud.google.com/go v0.121.6 // indirect
|
||||
cloud.google.com/go/auth v0.17.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
@@ -93,13 +94,13 @@ require (
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.2.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
@@ -143,7 +144,7 @@ require (
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.97 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/spdystream v0.5.1 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
@@ -168,29 +169,28 @@ require (
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/crypto v0.46.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/net v0.48.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/term v0.38.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
|
||||
96
go.sum
@@ -1,7 +1,7 @@
|
||||
al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho=
|
||||
al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890=
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
|
||||
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
@@ -189,8 +189,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
|
||||
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w=
|
||||
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
@@ -227,15 +227,15 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM=
|
||||
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
|
||||
github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA=
|
||||
github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
@@ -507,8 +507,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 h1:Q3jQ1NkFqv5o+F8dMmHd8SfEmlcwNeo1immFApntEwE=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 h1:bMqrb3UHgHbP+PW9VwiejfDJU1R0PpXVZNMdeH8WYKI=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
@@ -550,8 +550,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/spdystream v0.5.1 h1:9sNYeYZUcci9R6/w7KDaFWEWeV4LStVG78Mpyq/Zm/Y=
|
||||
github.com/moby/spdystream v0.5.1/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -742,24 +742,24 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
go.starlark.net v0.0.0-20201006213952-227f4aabceb5/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
|
||||
@@ -790,8 +790,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -876,8 +876,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -891,8 +891,8 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
||||
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -904,8 +904,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -969,14 +969,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -986,8 +986,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1047,8 +1047,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1134,10 +1134,10 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -1159,8 +1159,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
|
||||
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
|
||||
google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=
|
||||
google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.25-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.25-trixie
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -21,9 +21,11 @@ ENV GO111MODULE=on
|
||||
ENV GOPROXY=${GOPROXY}
|
||||
|
||||
# kubebuilder test bundle is separated from kubebuilder. Need to setup it for CI test.
|
||||
RUN curl -sSLo envtest-bins.tar.gz https://go.kubebuilder.io/test-tools/1.22.1/linux/$(go env GOARCH) && \
|
||||
mkdir /usr/local/kubebuilder && \
|
||||
tar -C /usr/local/kubebuilder --strip-components=1 -zvxf envtest-bins.tar.gz
|
||||
# Using setup-envtest to download envtest binaries
|
||||
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20260305094418-8122a6266696 && \
|
||||
mkdir -p /usr/local/kubebuilder/bin && \
|
||||
ENVTEST_ASSETS_DIR=$(setup-envtest use 1.33.0 --bin-dir /usr/local/kubebuilder/bin -p path) && \
|
||||
cp -r ${ENVTEST_ASSETS_DIR}/* /usr/local/kubebuilder/bin/
|
||||
|
||||
RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/download/v3.2.0/kubebuilder_linux_$(go env GOARCH) && \
|
||||
mv kubebuilder_linux_$(go env GOARCH) /usr/local/kubebuilder/bin/kubebuilder && \
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Use /output/usr/bin/ as the default output directory as this
|
||||
# is the path expected by the Velero Dockerfile.
|
||||
output_dir=${OUTPUT_DIR:-/output/usr/bin}
|
||||
restic_bin=${output_dir}/restic
|
||||
build_path=$(dirname "$PWD")
|
||||
|
||||
if [[ -z "${BIN}" ]]; then
|
||||
echo "BIN must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${BIN}" != "velero" ]]; then
|
||||
echo "${BIN} does not need the restic binary"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z "${GOOS}" ]]; then
|
||||
echo "GOOS must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${GOARCH}" ]]; then
|
||||
echo "GOARCH must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${RESTIC_VERSION}" ]]; then
|
||||
echo "RESTIC_VERSION must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir ${build_path}/restic
|
||||
git clone -b v${RESTIC_VERSION} https://github.com/restic/restic.git ${build_path}/restic
|
||||
pushd ${build_path}/restic
|
||||
git apply /go/src/github.com/vmware-tanzu/velero/hack/fix_restic_cve.txt
|
||||
go run build.go --goos "${GOOS}" --goarch "${GOARCH}" --goarm "${GOARM}" -o ${restic_bin}
|
||||
chmod +x ${restic_bin}
|
||||
popd
|
||||
@@ -1,274 +0,0 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..f6205aa3c 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,31 @@ require (
|
||||
github.com/restic/chunker v0.4.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
- golang.org/x/crypto v0.5.0
|
||||
- golang.org/x/net v0.5.0
|
||||
- golang.org/x/oauth2 v0.4.0
|
||||
- golang.org/x/sync v0.1.0
|
||||
- golang.org/x/sys v0.4.0
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/crypto v0.45.0
|
||||
+ golang.org/x/net v0.47.0
|
||||
+ golang.org/x/oauth2 v0.28.0
|
||||
+ golang.org/x/sync v0.18.0
|
||||
+ golang.org/x/sys v0.38.0
|
||||
+ golang.org/x/term v0.37.0
|
||||
+ golang.org/x/text v0.31.0
|
||||
+ google.golang.org/api v0.114.0
|
||||
)
|
||||
|
||||
require (
|
||||
- cloud.google.com/go v0.108.0 // indirect
|
||||
- cloud.google.com/go/compute v1.15.1 // indirect
|
||||
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
- cloud.google.com/go/iam v0.10.0 // indirect
|
||||
+ cloud.google.com/go v0.110.0 // indirect
|
||||
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
+ cloud.google.com/go/iam v0.13.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/dnaeon/go-vcr v1.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
- github.com/golang/protobuf v1.5.2 // indirect
|
||||
+ github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
- github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
|
||||
- github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
+ github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
+ github.com/googleapis/gax-go/v2 v2.7.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
||||
@@ -63,11 +62,13 @@ require (
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
- google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
||||
- google.golang.org/grpc v1.52.0 // indirect
|
||||
- google.golang.org/protobuf v1.28.1 // indirect
|
||||
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
+ google.golang.org/grpc v1.56.3 // indirect
|
||||
+ google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
-go 1.18
|
||||
+go 1.24.0
|
||||
+
|
||||
+toolchain go1.24.11
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..4a37e7ac7 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,23 +1,24 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
-cloud.google.com/go v0.108.0 h1:xntQwnfn8oHGX0crLVinvHM+AhXvi3QHQIEcX/2hiWk=
|
||||
-cloud.google.com/go v0.108.0/go.mod h1:lNUfQqusBJp0bgAg6qrHgYFYbTB+dOiob1itwnlD33Q=
|
||||
-cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
|
||||
-cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
|
||||
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
-cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI=
|
||||
-cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM=
|
||||
-cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
|
||||
+cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
|
||||
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
+cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
|
||||
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
|
||||
+cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
|
||||
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
|
||||
cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI=
|
||||
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 h1:VuHAcMq8pU1IWNT/m5yRaGqbK0BiQKHT8X4DTp9CHdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
|
||||
+github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do=
|
||||
@@ -54,6 +55,7 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
+github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@@ -70,8 +72,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -82,17 +84,18 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
-github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
+github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
|
||||
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
-github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg=
|
||||
-github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
-github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
-github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
|
||||
+github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
|
||||
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
+github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A=
|
||||
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
@@ -114,6 +117,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6 h1:nz7i1au+nDzgExfqW5Zl6q85XNTvYoGnM5DHiQC0yYs=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.46 h1:Vo3tNmNXuj7ME5qrvN4iadO7b4mzu/RSFdUkUhaPldk=
|
||||
@@ -129,6 +133,7 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P
|
||||
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
|
||||
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
|
||||
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -172,8 +177,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
+golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -189,17 +194,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
+golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||
+golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -214,17 +219,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -237,8 +242,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
-google.golang.org/api v0.106.0 h1:ffmW0faWCwKkpbbtvlY/K/8fUl+JKvNS5CVzRoyfCv8=
|
||||
-google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
|
||||
+google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE=
|
||||
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
@@ -246,15 +251,15 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
-google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
|
||||
-google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
-google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk=
|
||||
-google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
||||
+google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
||||
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -266,14 +271,15 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -27,14 +27,11 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
@@ -44,6 +41,10 @@ type volumeSnapshotContentDeleteItemAction struct {
|
||||
crClient crclient.Client
|
||||
}
|
||||
|
||||
const tempVSCCreateDeleteGap = 2 * time.Second
|
||||
|
||||
var sleepBetweenTempVSCCreateAndDelete = time.Sleep
|
||||
|
||||
// AppliesTo returns information indicating
|
||||
// VolumeSnapshotContentRestoreItemAction action should be invoked
|
||||
// while restoring VolumeSnapshotContent.snapshot.storage.k8s.io resources
|
||||
@@ -71,7 +72,7 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
// So skip deleting VolumeSnapshotContent not have the backup name
|
||||
// in its labels.
|
||||
if !kubeutil.HasBackupLabel(&snapCont.ObjectMeta, input.Backup.Name) {
|
||||
p.log.Info(
|
||||
p.log.Infof(
|
||||
"VolumeSnapshotContent %s was not taken by backup %s, skipping deletion",
|
||||
snapCont.Name,
|
||||
input.Backup.Name,
|
||||
@@ -81,6 +82,17 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
|
||||
p.log.Infof("Deleting VolumeSnapshotContent %s", snapCont.Name)
|
||||
|
||||
// Try to delete the original VSC from the cluster first.
|
||||
// This handles legacy (pre-1.15) backups where the original VSC
|
||||
// with DeletionPolicy=Retain still exists in the cluster.
|
||||
originalVSCName := snapCont.Name
|
||||
if cleaned := p.tryDeleteOriginalVSC(context.TODO(), originalVSCName); cleaned {
|
||||
p.log.Infof("Successfully deleted original VolumeSnapshotContent %s from cluster, skipping temp VSC creation", originalVSCName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a temp VSC to trigger cloud snapshot deletion
|
||||
// (for backups where the original VSC no longer exists in cluster)
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
p.log.WithError(err).Errorf("Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
@@ -114,60 +126,65 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
p.log.Infof("Created temp VolumeSnapshotContent %s with DeletionPolicy=Delete to trigger cloud snapshot cleanup", snapCont.Name)
|
||||
|
||||
// Read resource timeout from backup annotation, if not set, use default value.
|
||||
timeout, err := time.ParseDuration(
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation])
|
||||
if err != nil {
|
||||
p.log.Warnf("fail to parse resource timeout annotation %s: %s",
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation], err.Error())
|
||||
timeout = 10 * time.Minute
|
||||
}
|
||||
p.log.Debugf("resource timeout is set to %s", timeout.String())
|
||||
|
||||
interval := 5 * time.Second
|
||||
|
||||
// Wait until VSC created and ReadyToUse is true.
|
||||
if err := wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
return checkVSCReadiness(ctx, &snapCont, p.crClient)
|
||||
},
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "fail to wait VolumeSnapshotContent %s becomes ready.", snapCont.Name)
|
||||
}
|
||||
// Add a small delay before delete to avoid create/delete race conditions in CSI controllers.
|
||||
sleepBetweenTempVSCCreateAndDelete(tempVSCCreateDeleteGap)
|
||||
|
||||
// Delete the temp VSC immediately to trigger cloud snapshot removal.
|
||||
// The CSI driver will handle the actual cloud snapshot deletion.
|
||||
if err := p.crClient.Delete(
|
||||
context.TODO(),
|
||||
&snapCont,
|
||||
); err != nil && !apierrors.IsNotFound(err) {
|
||||
p.log.Infof("VolumeSnapshotContent %s not found", snapCont.Name)
|
||||
p.log.WithError(err).Errorf("Failed to delete temp VolumeSnapshotContent %s", snapCont.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
p.log.Infof("Successfully triggered deletion of VolumeSnapshotContent %s and its cloud snapshot", snapCont.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
var checkVSCReadiness = func(
|
||||
// tryDeleteOriginalVSC attempts to find and delete the original VSC from
|
||||
// the cluster (legacy pre-1.15 backups). It patches the DeletionPolicy to
|
||||
// Delete so the CSI driver also removes the cloud snapshot, then deletes
|
||||
// the VSC object itself.
|
||||
// Returns true if the original VSC was found and deletion was initiated.
|
||||
func (p *volumeSnapshotContentDeleteItemAction) tryDeleteOriginalVSC(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
tmpVSC := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := client.Get(ctx, crclient.ObjectKeyFromObject(vsc), tmpVSC); err != nil {
|
||||
return false, errors.Wrapf(
|
||||
err, "failed to get VolumeSnapshotContent %s", vsc.Name,
|
||||
)
|
||||
vscName string,
|
||||
) bool {
|
||||
existing := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := p.crClient.Get(ctx, crclient.ObjectKey{Name: vscName}, existing); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
p.log.Debugf("Original VolumeSnapshotContent %s not found in cluster, will use temp VSC flow", vscName)
|
||||
} else {
|
||||
p.log.WithError(err).Warnf("Error looking up original VolumeSnapshotContent %s, will use temp VSC flow", vscName)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if tmpVSC.Status != nil && boolptr.IsSetToTrue(tmpVSC.Status.ReadyToUse) {
|
||||
return true, nil
|
||||
p.log.Debugf("Found original VolumeSnapshotContent %s in cluster (legacy backup), cleaning up directly", vscName)
|
||||
|
||||
// Patch DeletionPolicy to Delete so the CSI driver removes the cloud snapshot
|
||||
if existing.Spec.DeletionPolicy != snapshotv1api.VolumeSnapshotContentDelete {
|
||||
original := existing.DeepCopy()
|
||||
existing.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentDelete
|
||||
if err := p.crClient.Patch(ctx, existing, crclient.MergeFrom(original)); err != nil {
|
||||
p.log.WithError(err).Warnf("Failed to patch DeletionPolicy on original VSC %s, will use temp VSC flow", vscName)
|
||||
return false
|
||||
}
|
||||
p.log.Debugf("Patched DeletionPolicy to Delete on original VolumeSnapshotContent %s", vscName)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
// Delete the original VSC — the CSI driver will clean up the cloud snapshot
|
||||
if err := p.crClient.Delete(ctx, existing); err != nil && !apierrors.IsNotFound(err) {
|
||||
p.log.WithError(err).Warnf("Failed to delete original VolumeSnapshotContent %s, will use temp VSC flow", vscName)
|
||||
return false
|
||||
}
|
||||
|
||||
p.log.Infof("Deleted original VolumeSnapshotContent %s with DeletionPolicy=Delete, CSI driver will remove cloud snapshot", vscName)
|
||||
return true
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotContentDeleteItemAction(
|
||||
|
||||
@@ -20,11 +20,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -37,19 +39,59 @@ import (
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
// fakeClientWithErrors wraps a real client and injects errors for specific operations.
|
||||
type fakeClientWithErrors struct {
|
||||
crclient.Client
|
||||
getError error
|
||||
patchError error
|
||||
deleteError error
|
||||
}
|
||||
|
||||
type fakeClientWithCallTracking struct {
|
||||
crclient.Client
|
||||
events *[]string
|
||||
}
|
||||
|
||||
func (c *fakeClientWithCallTracking) Create(ctx context.Context, obj crclient.Object, opts ...crclient.CreateOption) error {
|
||||
*c.events = append(*c.events, "create")
|
||||
return c.Client.Create(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
func (c *fakeClientWithCallTracking) Delete(ctx context.Context, obj crclient.Object, opts ...crclient.DeleteOption) error {
|
||||
*c.events = append(*c.events, "delete")
|
||||
return c.Client.Delete(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
func (c *fakeClientWithErrors) Get(ctx context.Context, key crclient.ObjectKey, obj crclient.Object, opts ...crclient.GetOption) error {
|
||||
if c.getError != nil {
|
||||
return c.getError
|
||||
}
|
||||
return c.Client.Get(ctx, key, obj, opts...)
|
||||
}
|
||||
|
||||
func (c *fakeClientWithErrors) Patch(ctx context.Context, obj crclient.Object, patch crclient.Patch, opts ...crclient.PatchOption) error {
|
||||
if c.patchError != nil {
|
||||
return c.patchError
|
||||
}
|
||||
return c.Client.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
|
||||
func (c *fakeClientWithErrors) Delete(ctx context.Context, obj crclient.Object, opts ...crclient.DeleteOption) error {
|
||||
if c.deleteError != nil {
|
||||
return c.deleteError
|
||||
}
|
||||
return c.Client.Delete(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
func TestVSCExecute(t *testing.T) {
|
||||
snapshotHandleStr := "test"
|
||||
tests := []struct {
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
function func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error)
|
||||
expectErr bool
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
preExistingVSC *snapshotv1api.VolumeSnapshotContent
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumeSnapshotContent doesn't have backup label",
|
||||
@@ -71,27 +113,22 @@ func TestVSCExecute(t *testing.T) {
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).VolumeSnapshotClassName("volumesnapshotclass").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return true, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error case, deletion fails",
|
||||
name: "Original VSC exists in cluster, cleaned up directly",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return false, errors.Errorf("test error case")
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
preExistingVSC: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{SnapshotHandle: stringPtr("snap-123")},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{Name: "vs-1", Namespace: "default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -100,7 +137,10 @@ func TestVSCExecute(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
logger := logrus.StandardLogger()
|
||||
checkVSCReadiness = test.function
|
||||
|
||||
if test.preExistingVSC != nil {
|
||||
require.NoError(t, crClient.Create(t.Context(), test.preExistingVSC))
|
||||
}
|
||||
|
||||
p := volumeSnapshotContentDeleteItemAction{log: logger, crClient: crClient}
|
||||
|
||||
@@ -158,52 +198,186 @@ func TestNewVolumeSnapshotContentDeleteItemAction(t *testing.T) {
|
||||
require.NoError(t, err1)
|
||||
}
|
||||
|
||||
func TestCheckVSCReadiness(t *testing.T) {
|
||||
func TestTryDeleteOriginalVSC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
createVSC bool
|
||||
expectErr bool
|
||||
ready bool
|
||||
vscName string
|
||||
existing *snapshotv1api.VolumeSnapshotContent
|
||||
createIt bool
|
||||
expectRet bool
|
||||
}{
|
||||
{
|
||||
name: "VSC not exist",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
createVSC: false,
|
||||
expectErr: true,
|
||||
ready: false,
|
||||
name: "VSC not found in cluster, returns false",
|
||||
vscName: "not-found",
|
||||
expectRet: false,
|
||||
},
|
||||
{
|
||||
name: "VSC not ready",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
name: "VSC found with Retain policy, patches and deletes",
|
||||
vscName: "legacy-vsc",
|
||||
existing: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "legacy-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: stringPtr("snap-123"),
|
||||
},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: "vs-1",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
createVSC: true,
|
||||
expectErr: false,
|
||||
ready: false,
|
||||
createIt: true,
|
||||
expectRet: true,
|
||||
},
|
||||
{
|
||||
name: "VSC found with Delete policy already, just deletes",
|
||||
vscName: "already-delete-vsc",
|
||||
existing: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "already-delete-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentDelete,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: stringPtr("snap-456"),
|
||||
},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: "vs-2",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
createIt: true,
|
||||
expectRet: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
if test.createVSC {
|
||||
require.NoError(t, crClient.Create(t.Context(), test.vsc))
|
||||
logger := logrus.StandardLogger()
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logger,
|
||||
crClient: crClient,
|
||||
}
|
||||
|
||||
ready, err := checkVSCReadiness(t.Context(), test.vsc, crClient)
|
||||
require.Equal(t, test.ready, ready)
|
||||
if test.expectErr {
|
||||
require.Error(t, err)
|
||||
if test.createIt && test.existing != nil {
|
||||
require.NoError(t, crClient.Create(t.Context(), test.existing))
|
||||
}
|
||||
|
||||
result := p.tryDeleteOriginalVSC(t.Context(), test.vscName)
|
||||
require.Equal(t, test.expectRet, result)
|
||||
|
||||
// If cleanup succeeded, verify the VSC is gone
|
||||
if test.expectRet {
|
||||
err := crClient.Get(t.Context(), crclient.ObjectKey{Name: test.vscName},
|
||||
&snapshotv1api.VolumeSnapshotContent{})
|
||||
require.True(t, apierrors.IsNotFound(err),
|
||||
"VSC should have been deleted from cluster")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Error injection tests for tryDeleteOriginalVSC
|
||||
t.Run("Get returns non-NotFound error, returns false", func(t *testing.T) {
|
||||
errClient := &fakeClientWithErrors{
|
||||
Client: velerotest.NewFakeControllerRuntimeClient(t),
|
||||
getError: fmt.Errorf("connection refused"),
|
||||
}
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
crClient: errClient,
|
||||
}
|
||||
require.False(t, p.tryDeleteOriginalVSC(t.Context(), "some-vsc"))
|
||||
})
|
||||
|
||||
t.Run("Patch fails, returns false", func(t *testing.T) {
|
||||
realClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
vsc := &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "patch-fail-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{SnapshotHandle: stringPtr("snap-789")},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{Name: "vs-3", Namespace: "default"},
|
||||
},
|
||||
}
|
||||
require.NoError(t, realClient.Create(t.Context(), vsc))
|
||||
|
||||
errClient := &fakeClientWithErrors{
|
||||
Client: realClient,
|
||||
patchError: fmt.Errorf("patch forbidden"),
|
||||
}
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
crClient: errClient,
|
||||
}
|
||||
require.False(t, p.tryDeleteOriginalVSC(t.Context(), "patch-fail-vsc"))
|
||||
})
|
||||
|
||||
t.Run("Delete fails, returns false", func(t *testing.T) {
|
||||
realClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
vsc := &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "delete-fail-vsc"},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentDelete,
|
||||
Driver: "disk.csi.azure.com",
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{SnapshotHandle: stringPtr("snap-999")},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{Name: "vs-4", Namespace: "default"},
|
||||
},
|
||||
}
|
||||
require.NoError(t, realClient.Create(t.Context(), vsc))
|
||||
|
||||
errClient := &fakeClientWithErrors{
|
||||
Client: realClient,
|
||||
deleteError: fmt.Errorf("delete forbidden"),
|
||||
}
|
||||
p := &volumeSnapshotContentDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
crClient: errClient,
|
||||
}
|
||||
require.False(t, p.tryDeleteOriginalVSC(t.Context(), "delete-fail-vsc"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestVSCExecute_CreateSleepDeleteOrder(t *testing.T) {
|
||||
snapshotHandleStr := "test"
|
||||
vsc := builder.ForVolumeSnapshotContent("bar").
|
||||
ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).
|
||||
Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).
|
||||
Result()
|
||||
|
||||
vscMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(vsc)
|
||||
require.NoError(t, err)
|
||||
|
||||
events := make([]string, 0, 3)
|
||||
realClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
trackingClient := &fakeClientWithCallTracking{Client: realClient, events: &events}
|
||||
|
||||
originalSleep := sleepBetweenTempVSCCreateAndDelete
|
||||
t.Cleanup(func() {
|
||||
sleepBetweenTempVSCCreateAndDelete = originalSleep
|
||||
})
|
||||
|
||||
sleepBetweenTempVSCCreateAndDelete = func(d time.Duration) {
|
||||
require.Equal(t, tempVSCCreateDeleteGap, d)
|
||||
events = append(events, "sleep")
|
||||
}
|
||||
|
||||
p := volumeSnapshotContentDeleteItemAction{log: logrus.StandardLogger(), crClient: trackingClient}
|
||||
err = p.Execute(&velero.DeleteItemActionExecuteInput{
|
||||
Item: &unstructured.Unstructured{Object: vscMap},
|
||||
Backup: builder.ForBackup("velero", "backup").Result(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"create", "sleep", "delete"}, events)
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func stringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
@@ -42,6 +42,8 @@ const (
|
||||
FSBackup VolumeActionType = "fs-backup"
|
||||
// snapshot action can have 3 different meaning based on velero configuration and backup spec - cloud provider based snapshots, local csi snapshots and datamover snapshots
|
||||
Snapshot VolumeActionType = "snapshot"
|
||||
// custom action is used to identify a volume that will be handled by an external plugin. Velero will not snapshot or use fs-backup if action=="custom"
|
||||
Custom VolumeActionType = "custom"
|
||||
)
|
||||
|
||||
// Action defined as one action for a specific way of backup
|
||||
|
||||
@@ -90,7 +90,7 @@ func decodeStruct(r io.Reader, s any) error {
|
||||
func (a *Action) validate() error {
|
||||
// validate Type
|
||||
valid := false
|
||||
if a.Type == Skip || a.Type == Snapshot || a.Type == FSBackup {
|
||||
if a.Type == Skip || a.Type == Snapshot || a.Type == FSBackup || a.Type == Custom {
|
||||
valid = true
|
||||
}
|
||||
if !valid {
|
||||
|
||||
@@ -146,6 +146,10 @@ type CSISnapshotInfo struct {
|
||||
|
||||
// The VolumeSnapshot's Status.ReadyToUse value
|
||||
ReadyToUse *bool
|
||||
|
||||
// The VolumeGroupSnapshotHandle from VSC status, used to create stub VGSC during restore
|
||||
// for CSI drivers that populate this field (e.g., Ceph RBD).
|
||||
VolumeGroupSnapshotHandle string `json:"volumeGroupSnapshotHandle,omitempty"`
|
||||
}
|
||||
|
||||
// SnapshotDataMovementInfo is used for displaying the snapshot data mover status.
|
||||
@@ -456,6 +460,10 @@ func (v *BackupVolumesInformation) generateVolumeInfoForCSIVolumeSnapshot() {
|
||||
if volumeSnapshotContent.Status.SnapshotHandle != nil {
|
||||
snapshotHandle = *volumeSnapshotContent.Status.SnapshotHandle
|
||||
}
|
||||
volumeGroupSnapshotHandle := ""
|
||||
if volumeSnapshotContent.Status != nil && volumeSnapshotContent.Status.VolumeGroupSnapshotHandle != nil {
|
||||
volumeGroupSnapshotHandle = *volumeSnapshotContent.Status.VolumeGroupSnapshotHandle
|
||||
}
|
||||
if pvcPVInfo := v.pvMap.retrieve("", *volumeSnapshot.Spec.Source.PersistentVolumeClaimName, volumeSnapshot.Namespace); pvcPVInfo != nil {
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
BackupMethod: CSISnapshot,
|
||||
@@ -466,12 +474,13 @@ func (v *BackupVolumesInformation) generateVolumeInfoForCSIVolumeSnapshot() {
|
||||
SnapshotDataMoved: false,
|
||||
PreserveLocalSnapshot: true,
|
||||
CSISnapshotInfo: &CSISnapshotInfo{
|
||||
VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName,
|
||||
Size: size,
|
||||
Driver: volumeSnapshotContent.Spec.Driver,
|
||||
SnapshotHandle: snapshotHandle,
|
||||
OperationID: operation.Spec.OperationID,
|
||||
ReadyToUse: volumeSnapshot.Status.ReadyToUse,
|
||||
VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName,
|
||||
Size: size,
|
||||
Driver: volumeSnapshotContent.Spec.Driver,
|
||||
SnapshotHandle: snapshotHandle,
|
||||
OperationID: operation.Spec.OperationID,
|
||||
ReadyToUse: volumeSnapshot.Status.ReadyToUse,
|
||||
VolumeGroupSnapshotHandle: volumeGroupSnapshotHandle,
|
||||
},
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
|
||||
@@ -18,13 +18,9 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
vhutil "github.com/vmware-tanzu/velero/pkg/util/volumehelper"
|
||||
)
|
||||
|
||||
type VolumeHelper interface {
|
||||
ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error)
|
||||
ShouldPerformFSBackup(volume corev1api.Volume, pod corev1api.Pod) (bool, error)
|
||||
}
|
||||
|
||||
type volumeHelperImpl struct {
|
||||
volumePolicy *resourcepolicies.Policies
|
||||
snapshotVolumes *bool
|
||||
@@ -53,7 +49,7 @@ func NewVolumeHelperImpl(
|
||||
client crclient.Client,
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
) VolumeHelper {
|
||||
) vhutil.VolumeHelper {
|
||||
// Pass nil namespaces - no cache will be built, so this never fails.
|
||||
// This is used by plugins that don't need the cache optimization.
|
||||
vh, _ := NewVolumeHelperImplWithNamespaces(
|
||||
@@ -81,7 +77,7 @@ func NewVolumeHelperImplWithNamespaces(
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
namespaces []string,
|
||||
) (VolumeHelper, error) {
|
||||
) (vhutil.VolumeHelper, error) {
|
||||
var pvcPodCache *podvolumeutil.PVCPodCache
|
||||
if len(namespaces) > 0 {
|
||||
pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
@@ -110,7 +106,7 @@ func NewVolumeHelperImplWithCache(
|
||||
client crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
pvcPodCache *podvolumeutil.PVCPodCache,
|
||||
) (VolumeHelper, error) {
|
||||
) (vhutil.VolumeHelper, error) {
|
||||
resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup(backup, client, logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get volume policies from backup")
|
||||
@@ -134,6 +130,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
pv := new(corev1api.PersistentVolume)
|
||||
var err error
|
||||
|
||||
var pvNotFoundErr error
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pvc); err != nil {
|
||||
v.logger.WithError(err).Error("fail to convert unstructured into PVC")
|
||||
@@ -142,8 +139,10 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
|
||||
pv, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, err
|
||||
// Any error means PV not available - save to return later if no policy matches
|
||||
v.logger.Debugf("PV not found for PVC %s: %v", pvc.Namespace+"/"+pvc.Name, err)
|
||||
pvNotFoundErr = err
|
||||
pv = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,7 +157,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
action, err := v.volumePolicy.GetMatchAction(vfd)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for PV %s", pv.Name)
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for %+v", vfd)
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -167,15 +166,21 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
// If there is no match action, go on to the next check.
|
||||
if action != nil {
|
||||
if action.Type == resourcepolicies.Snapshot {
|
||||
v.logger.Infof(fmt.Sprintf("performing snapshot action for pv %s", pv.Name))
|
||||
v.logger.Infof("performing snapshot action for %+v", vfd)
|
||||
return true, nil
|
||||
} else {
|
||||
v.logger.Infof("Skip snapshot action for pv %s as the action type is %s", pv.Name, action.Type)
|
||||
v.logger.Infof("Skip snapshot action for %+v as the action type is %s", vfd, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If resource is PVC, and PV is nil (e.g., Pending/Lost PVC with no matching policy), return the original error
|
||||
if groupResource == kuberesource.PersistentVolumeClaims && pv == nil && pvNotFoundErr != nil {
|
||||
v.logger.WithError(pvNotFoundErr).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, pvNotFoundErr
|
||||
}
|
||||
|
||||
// If this PV is claimed, see if we've already taken a (pod volume backup)
|
||||
// snapshot of the contents of this PV. If so, don't take a snapshot.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
@@ -209,7 +214,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
return true, nil
|
||||
}
|
||||
|
||||
v.logger.Infof(fmt.Sprintf("skipping snapshot action for pv %s possibly due to no volume policy setting or snapshotVolumes is false", pv.Name))
|
||||
v.logger.Infof("skipping snapshot action for pv %s possibly due to no volume policy setting or snapshotVolumes is false", pv.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -219,6 +224,7 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var pvNotFoundErr error
|
||||
if v.volumePolicy != nil {
|
||||
var resource any
|
||||
var err error
|
||||
@@ -230,10 +236,13 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
|
||||
return false, err
|
||||
}
|
||||
resource, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
pvResource, err := kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, err
|
||||
// Any error means PV not available - save to return later if no policy matches
|
||||
v.logger.Debugf("PV not found for PVC %s: %v", pvc.Namespace+"/"+pvc.Name, err)
|
||||
pvNotFoundErr = err
|
||||
} else {
|
||||
resource = pvResource
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,6 +269,12 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no policy matched and PV was not found, return the original error
|
||||
if pvNotFoundErr != nil {
|
||||
v.logger.WithError(pvNotFoundErr).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, pvNotFoundErr
|
||||
}
|
||||
}
|
||||
|
||||
if v.shouldPerformFSBackupLegacy(volume, pod) {
|
||||
@@ -300,6 +315,121 @@ func (v volumeHelperImpl) shouldPerformFSBackupLegacy(
|
||||
}
|
||||
}
|
||||
|
||||
func (v *volumeHelperImpl) ShouldPerformCustomAction(obj runtime.Unstructured, groupResource schema.GroupResource, matchParams map[string]any) (bool, error) {
|
||||
// check if volume policy exists and also check if the object(pv/pvc) fits a volume policy criteria and see if the associated action is custom with the provided param values
|
||||
pvc := new(corev1api.PersistentVolumeClaim)
|
||||
pv := new(corev1api.PersistentVolume)
|
||||
var err error
|
||||
|
||||
var pvNotFoundErr error
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pvc); err != nil {
|
||||
v.logger.WithError(err).Error("fail to convert unstructured into PVC")
|
||||
return false, err
|
||||
}
|
||||
|
||||
pv, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
// Any error means PV not available - save to return later if no policy matches
|
||||
v.logger.Debugf("PV not found for PVC %s: %v", pvc.Namespace+"/"+pvc.Name, err)
|
||||
pvNotFoundErr = err
|
||||
pv = nil
|
||||
}
|
||||
}
|
||||
|
||||
if groupResource == kuberesource.PersistentVolumes {
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pv); err != nil {
|
||||
v.logger.WithError(err).Error("fail to convert unstructured into PV")
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if v.volumePolicy != nil {
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
action, err := v.volumePolicy.GetMatchAction(vfd)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for %+v", vfd)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If there is a match action, and the action type is custom, return true
|
||||
// if the provided parameters match as well, else return false.
|
||||
// If there is no match action, also return false
|
||||
if action != nil {
|
||||
if action.Type == resourcepolicies.Custom {
|
||||
for k, requiredValue := range matchParams {
|
||||
if actionValue, ok := action.Parameters[k]; !ok || actionValue != requiredValue {
|
||||
v.logger.Infof("Skipping custom action for %+v as value for parameter %s is %s rather than the required %s", vfd, k, actionValue, requiredValue)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
v.logger.Infof("performing custom action for %+v", vfd)
|
||||
return true, nil
|
||||
} else {
|
||||
v.logger.Infof("Skipping custom action for %+v as the action type is %s", vfd, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If resource is PVC, and PV is nil (e.g., Pending/Lost PVC with no matching policy), return the original error
|
||||
// Don't error out on no PV, just return false
|
||||
if groupResource == kuberesource.PersistentVolumeClaims && pv == nil && pvNotFoundErr != nil {
|
||||
v.logger.WithError(pvNotFoundErr).Warnf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
v.logger.Infof("skipping custom action for pv %s due to no matching volume policy", pv.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// returns false if no matching action found. Returns true with the action name and Parameters map if there is a matching policy
|
||||
func (v *volumeHelperImpl) GetActionParameters(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, string, map[string]any, error) {
|
||||
// if volume policy exists, return action parameters.
|
||||
pvc := new(corev1api.PersistentVolumeClaim)
|
||||
pv := new(corev1api.PersistentVolume)
|
||||
var err error
|
||||
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pvc); err != nil {
|
||||
v.logger.WithError(err).Error("fail to convert unstructured into PVC")
|
||||
return false, "", nil, err
|
||||
}
|
||||
|
||||
pv, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Warnf("failed to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, "", nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
if groupResource == kuberesource.PersistentVolumes {
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pv); err != nil {
|
||||
v.logger.WithError(err).Error("fail to convert unstructured into PV")
|
||||
return false, "", nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if v.volumePolicy != nil {
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
action, err := v.volumePolicy.GetMatchAction(vfd)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for PV %s", pv.Name)
|
||||
return false, "", nil, err
|
||||
}
|
||||
|
||||
// If there is a match action, and the action type is custom, return true
|
||||
// if the provided parameters match as well, else return false.
|
||||
// If there is no match action, also return false
|
||||
if action != nil {
|
||||
v.logger.Infof("found matching action for pv %s, returning parameters", pv.Name)
|
||||
return true, string(action.Type), action.Parameters, nil
|
||||
}
|
||||
}
|
||||
|
||||
v.logger.Infof("no matching volume policy found for pv %s, no parameters to return", pv.Name)
|
||||
return false, "", nil, nil
|
||||
}
|
||||
|
||||
func (v *volumeHelperImpl) shouldIncludeVolumeInBackup(vol corev1api.Volume) bool {
|
||||
includeVolumeInBackup := true
|
||||
// cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods
|
||||
|
||||
@@ -286,7 +286,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "PVC not having PV, return false and error case PV not found",
|
||||
name: "PVC not having PV, return false and error when no matching policy",
|
||||
inputObj: builder.ForPersistentVolumeClaim("default", "example-pvc").StorageClass("gp2-csi").Result(),
|
||||
groupResource: kuberesource.PersistentVolumeClaims,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
@@ -1234,3 +1234,312 @@ func TestNewVolumeHelperImplWithCache_UsesCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.False(t, shouldSnapshot, "Expected snapshot to be skipped due to fs-backup selection via cache")
|
||||
}
|
||||
|
||||
// TestVolumeHelperImpl_ShouldPerformSnapshot_UnboundPVC tests that Pending and Lost PVCs with
|
||||
// phase-based skip policies don't cause errors when GetPVForPVC would fail.
|
||||
func TestVolumeHelperImpl_ShouldPerformSnapshot_UnboundPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
inputPVC *corev1api.PersistentVolumeClaim
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
shouldSnapshot bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with phase-based skip policy should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-pending").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC without matching skip policy should error (no PV)",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-pending-no-policy").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with phase-based skip policy should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with policy for Pending and Lost should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
vh := NewVolumeHelperImpl(
|
||||
p,
|
||||
ptr.To(true),
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.inputPVC)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, kuberesource.PersistentVolumeClaims)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError, "Want error; Got nil error")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldSnapshot, actualShouldSnapshot, "Want shouldSnapshot as %t; Got shouldSnapshot as %t", tc.shouldSnapshot, actualShouldSnapshot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeHelperImpl_ShouldPerformFSBackup_UnboundPVC tests that Pending and Lost PVCs with
|
||||
// phase-based skip policies don't cause errors when GetPVForPVC would fail.
|
||||
func TestVolumeHelperImpl_ShouldPerformFSBackup_UnboundPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
shouldFSBackup bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with phase-based skip policy should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-pending",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-pending",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-pending").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC without matching skip policy should error (no PV)",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-pending",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-pending-no-policy",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-pending-no-policy").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with phase-based skip policy should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-lost",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-lost",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with policy for Pending and Lost should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-lost",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-lost",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.pvc)
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
vh := NewVolumeHelperImpl(
|
||||
p,
|
||||
ptr.To(true),
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
actualShouldFSBackup, actualError := vh.ShouldPerformFSBackup(tc.pod.Spec.Volumes[0], *tc.pod)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError, "Want error; Got nil error")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldFSBackup, actualShouldFSBackup, "Want shouldFSBackup as %t; Got shouldFSBackup as %t", tc.shouldFSBackup, actualShouldFSBackup)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,8 +35,7 @@ type BackupRepositorySpec struct {
|
||||
// +optional
|
||||
RepositoryType string `json:"repositoryType"`
|
||||
|
||||
// ResticIdentifier is the full restic-compatible string for identifying
|
||||
// this repository. This field is only used when RepositoryType is "restic".
|
||||
// Deprecated
|
||||
// +optional
|
||||
ResticIdentifier string `json:"resticIdentifier,omitempty"`
|
||||
|
||||
@@ -58,8 +57,7 @@ const (
|
||||
BackupRepositoryPhaseReady BackupRepositoryPhase = "Ready"
|
||||
BackupRepositoryPhaseNotReady BackupRepositoryPhase = "NotReady"
|
||||
|
||||
BackupRepositoryTypeRestic string = "restic"
|
||||
BackupRepositoryTypeKopia string = "kopia"
|
||||
BackupRepositoryTypeKopia string = "kopia"
|
||||
)
|
||||
|
||||
// BackupRepositoryStatus is the current status of a BackupRepository.
|
||||
|
||||
@@ -102,6 +102,15 @@ const (
|
||||
// even if the resource contains a matching selector label.
|
||||
ExcludeFromBackupLabel = "velero.io/exclude-from-backup"
|
||||
|
||||
// SkipFromBackupAnnotation is the annotation used by internal BackupItemActions
|
||||
// to indicate that a resource should be skipped from backup,
|
||||
// even if it doesn't have the ExcludeFromBackupLabel.
|
||||
// This is used in cases where we want to skip backup of a resource based on some logic in a plugin.
|
||||
//
|
||||
// Notice: SkipFromBackupAnnotation's priority is higher than MustIncludeAdditionalItemAnnotation.
|
||||
// If SkipFromBackupAnnotation is set, the resource will be skipped even if MustIncludeAdditionalItemAnnotation is set.
|
||||
SkipFromBackupAnnotation = "velero.io/skip-from-backup"
|
||||
|
||||
// defaultVGSLabelKey is the default label key used to group PVCs under a VolumeGroupSnapshot
|
||||
DefaultVGSLabelKey = "velero.io/volume-group"
|
||||
|
||||
@@ -132,6 +141,7 @@ const (
|
||||
VolumeSnapshotRestoreSize = "velero.io/csi-volumesnapshot-restore-size"
|
||||
DriverNameAnnotation = "velero.io/csi-driver-name"
|
||||
VSCDeletionPolicyAnnotation = "velero.io/csi-vsc-deletion-policy"
|
||||
VolumeGroupSnapshotHandleAnnotation = "velero.io/csi-volumegroupsnapshot-handle"
|
||||
VolumeSnapshotClassSelectorLabel = "velero.io/csi-volumesnapshot-class"
|
||||
VolumeSnapshotClassDriverBackupAnnotationPrefix = "velero.io/csi-volumesnapshot-class"
|
||||
VolumeSnapshotClassDriverPVCAnnotation = "velero.io/csi-volumesnapshot-class"
|
||||
|
||||
@@ -19,8 +19,10 @@ package archive
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
@@ -66,6 +68,16 @@ func (e *Extractor) writeFile(target string, tarRdr *tar.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sanitizeArchivePath sanitizes archive file path from "G305: Zip Slip vulnerability"
|
||||
func sanitizeArchivePath(destDir, sourcePath string) (targetPath string, err error) {
|
||||
targetPath = filepath.Join(destDir, sourcePath)
|
||||
if strings.HasPrefix(targetPath, filepath.Clean(destDir)) {
|
||||
return targetPath, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("invalid archive path %q: escapes target directory", sourcePath)
|
||||
}
|
||||
|
||||
func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) {
|
||||
dir, err := e.fs.TempDir("", "")
|
||||
if err != nil {
|
||||
@@ -84,7 +96,11 @@ func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
target := filepath.Join(dir, header.Name) //nolint:gosec // Internal usage. No need to check.
|
||||
target, err := sanitizeArchivePath(dir, header.Name)
|
||||
if err != nil {
|
||||
e.log.Infof("error sanitizing archive path: %s", err.Error())
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
|
||||
@@ -18,6 +18,7 @@ package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"os"
|
||||
@@ -87,6 +88,31 @@ func TestUnzipAndExtractBackup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnzipAndExtractBackupRejectsPathTraversal(t *testing.T) {
|
||||
ext := NewExtractor(test.NewLogger(), test.NewFakeFileSystem())
|
||||
|
||||
var buf bytes.Buffer
|
||||
gzw := gzip.NewWriter(&buf)
|
||||
tw := tar.NewWriter(gzw)
|
||||
|
||||
err := tw.WriteHeader(&tar.Header{
|
||||
Name: "../escape.txt",
|
||||
Mode: 0600,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len("data")),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = tw.Write([]byte("data"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tw.Close())
|
||||
require.NoError(t, gzw.Close())
|
||||
|
||||
_, err = ext.UnzipAndExtractBackup(&buf)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "invalid archive path")
|
||||
}
|
||||
|
||||
func createArchive(files []string, fs filesystem.Interface) (string, error) {
|
||||
outName := "output.tar.gz"
|
||||
out, err := fs.Create(outName)
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
volumegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
|
||||
volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -44,7 +44,6 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
internalvolumehelper "github.com/vmware-tanzu/velero/internal/volumehelper"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
@@ -59,6 +58,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
vhutil "github.com/vmware-tanzu/velero/pkg/util/volumehelper"
|
||||
)
|
||||
|
||||
// TODO: Replace hardcoded VolumeSnapshot finalizer strings with constants from
|
||||
@@ -128,9 +128,9 @@ func (p *pvcBackupItemAction) ensurePVCPodCacheForNamespace(ctx context.Context,
|
||||
|
||||
// getVolumeHelperWithCache creates a VolumeHelper using the pre-built PVC-to-Pod cache.
|
||||
// The cache should be ensured for the relevant namespace(s) before calling this.
|
||||
func (p *pvcBackupItemAction) getVolumeHelperWithCache(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
func (p *pvcBackupItemAction) getVolumeHelperWithCache(backup *velerov1api.Backup) (vhutil.VolumeHelper, error) {
|
||||
// Create VolumeHelper with our lazy-built cache
|
||||
vh, err := internalvolumehelper.NewVolumeHelperImplWithCache(
|
||||
vh, err := volumehelper.NewVolumeHelperWithCache(
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
@@ -149,7 +149,7 @@ func (p *pvcBackupItemAction) getVolumeHelperWithCache(backup *velerov1api.Backu
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we can safely cache this.
|
||||
// See issue #9179 and PR #9226 for details.
|
||||
func (p *pvcBackupItemAction) getOrCreateVolumeHelper(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
func (p *pvcBackupItemAction) getOrCreateVolumeHelper(backup *velerov1api.Backup) (vhutil.VolumeHelper, error) {
|
||||
// Initialize the PVC-to-Pod cache if needed
|
||||
if p.pvcPodCache == nil {
|
||||
p.pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
@@ -322,13 +322,9 @@ func (p *pvcBackupItemAction) Execute(
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
shouldSnapshot, err := vh.ShouldPerformSnapshot(
|
||||
item,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
@@ -471,7 +467,7 @@ func (p *pvcBackupItemAction) Progress(
|
||||
return progress, biav2.InvalidOperationIDError(operationID)
|
||||
}
|
||||
|
||||
dataUpload, err := getDataUpload(context.Background(), p.crClient, operationID)
|
||||
dataUpload, err := getDataUpload(context.Background(), p.crClient, backup.Namespace, operationID)
|
||||
if err != nil {
|
||||
p.log.Errorf(
|
||||
"fail to get DataUpload for backup %s/%s by operation ID %s: %s",
|
||||
@@ -516,7 +512,7 @@ func (p *pvcBackupItemAction) Cancel(operationID string, backup *velerov1api.Bac
|
||||
return biav2.InvalidOperationIDError(operationID)
|
||||
}
|
||||
|
||||
dataUpload, err := getDataUpload(context.Background(), p.crClient, operationID)
|
||||
dataUpload, err := getDataUpload(context.Background(), p.crClient, backup.Namespace, operationID)
|
||||
if err != nil {
|
||||
p.log.Errorf(
|
||||
"fail to get DataUpload for backup %s/%s: %s",
|
||||
@@ -609,10 +605,12 @@ func createDataUpload(
|
||||
func getDataUpload(
|
||||
ctx context.Context,
|
||||
crClient crclient.Client,
|
||||
namespace string,
|
||||
operationID string,
|
||||
) (*velerov2alpha1.DataUpload, error) {
|
||||
dataUploadList := new(velerov2alpha1.DataUploadList)
|
||||
err := crClient.List(ctx, dataUploadList, &crclient.ListOptions{
|
||||
Namespace: namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{velerov1api.AsyncOperationIDLabel: operationID},
|
||||
),
|
||||
@@ -708,7 +706,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
}
|
||||
|
||||
// Filter PVCs by volume policy
|
||||
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup, vh)
|
||||
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, vh)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
@@ -769,7 +767,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
}
|
||||
|
||||
// Re-fetch latest VGS to ensure status is populated after VGSC binding
|
||||
latestVGS := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{}
|
||||
latestVGS := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{}
|
||||
if err := p.crClient.Get(ctx, crclient.ObjectKeyFromObject(newVGS), latestVGS); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to re-fetch VolumeGroupSnapshot %s after VGSC binding wait", newVGS.Name)
|
||||
}
|
||||
@@ -844,8 +842,7 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la
|
||||
|
||||
func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
backup *velerov1api.Backup,
|
||||
vh internalvolumehelper.VolumeHelper,
|
||||
vh vhutil.VolumeHelper,
|
||||
) ([]corev1api.PersistentVolumeClaim, error) {
|
||||
var filteredPVCs []corev1api.PersistentVolumeClaim
|
||||
|
||||
@@ -859,13 +856,9 @@ func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
|
||||
|
||||
// Check if this PVC should be snapshotted according to volume policies
|
||||
// Uses the cached VolumeHelper for better performance with many PVCs/pods
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
shouldSnapshot, err := vh.ShouldPerformSnapshot(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
@@ -922,7 +915,7 @@ func (p *pvcBackupItemAction) determineVGSClass(
|
||||
}
|
||||
|
||||
// 3. Fallback to label-based default
|
||||
vgsClassList := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotClassList{}
|
||||
vgsClassList := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotClassList{}
|
||||
if err := p.crClient.List(ctx, vgsClassList); err != nil {
|
||||
return "", errors.Wrap(err, "failed to list VolumeGroupSnapshotClasses")
|
||||
}
|
||||
@@ -951,22 +944,22 @@ func (p *pvcBackupItemAction) createVolumeGroupSnapshot(
|
||||
backup *velerov1api.Backup,
|
||||
pvc corev1api.PersistentVolumeClaim,
|
||||
vgsLabelKey, vgsLabelValue, vgsClassName string,
|
||||
) (*volumegroupsnapshotv1beta1.VolumeGroupSnapshot, error) {
|
||||
) (*volumegroupsnapshotv1beta2.VolumeGroupSnapshot, error) {
|
||||
vgsLabels := map[string]string{
|
||||
velerov1api.BackupNameLabel: label.GetValidName(backup.Name),
|
||||
velerov1api.BackupUIDLabel: string(backup.UID),
|
||||
vgsLabelKey: vgsLabelValue,
|
||||
}
|
||||
|
||||
vgs := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
vgs := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("velero-%s-", vgsLabelValue),
|
||||
Namespace: pvc.Namespace,
|
||||
Labels: vgsLabels,
|
||||
},
|
||||
Spec: volumegroupsnapshotv1beta1.VolumeGroupSnapshotSpec{
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotSpec{
|
||||
VolumeGroupSnapshotClassName: &vgsClassName,
|
||||
Source: volumegroupsnapshotv1beta1.VolumeGroupSnapshotSource{
|
||||
Source: volumegroupsnapshotv1beta2.VolumeGroupSnapshotSource{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
vgsLabelKey: vgsLabelValue,
|
||||
@@ -994,7 +987,7 @@ func (p *pvcBackupItemAction) createVolumeGroupSnapshot(
|
||||
func (p *pvcBackupItemAction) waitForVGSAssociatedVS(
|
||||
ctx context.Context,
|
||||
groupedPVCs []corev1api.PersistentVolumeClaim,
|
||||
vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot,
|
||||
vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot,
|
||||
timeout time.Duration,
|
||||
) (map[string]*snapshotv1api.VolumeSnapshot, error) {
|
||||
expected := len(groupedPVCs)
|
||||
@@ -1037,10 +1030,10 @@ func (p *pvcBackupItemAction) waitForVGSAssociatedVS(
|
||||
return vsMap, nil
|
||||
}
|
||||
|
||||
func hasOwnerReference(obj metav1.Object, vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot) bool {
|
||||
func hasOwnerReference(obj metav1.Object, vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot) bool {
|
||||
for _, ref := range obj.GetOwnerReferences() {
|
||||
if ref.Kind == kuberesource.VGSKind &&
|
||||
ref.APIVersion == volumegroupsnapshotv1beta1.GroupName+"/"+volumegroupsnapshotv1beta1.SchemeGroupVersion.Version &&
|
||||
ref.APIVersion == volumegroupsnapshotv1beta2.GroupName+"/"+volumegroupsnapshotv1beta2.SchemeGroupVersion.Version &&
|
||||
ref.UID == vgs.UID {
|
||||
return true
|
||||
}
|
||||
@@ -1051,7 +1044,7 @@ func hasOwnerReference(obj metav1.Object, vgs *volumegroupsnapshotv1beta1.Volume
|
||||
func (p *pvcBackupItemAction) updateVGSCreatedVS(
|
||||
ctx context.Context,
|
||||
vsMap map[string]*snapshotv1api.VolumeSnapshot,
|
||||
vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot,
|
||||
vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot,
|
||||
backup *velerov1api.Backup,
|
||||
) error {
|
||||
for pvcName, vs := range vsMap {
|
||||
@@ -1094,7 +1087,7 @@ func (p *pvcBackupItemAction) updateVGSCreatedVS(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) patchVGSCDeletionPolicy(ctx context.Context, vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot) error {
|
||||
func (p *pvcBackupItemAction) patchVGSCDeletionPolicy(ctx context.Context, vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot) error {
|
||||
if vgs == nil || vgs.Status == nil || vgs.Status.BoundVolumeGroupSnapshotContentName == nil {
|
||||
return errors.New("VolumeGroupSnapshotContent name not found in VGS status")
|
||||
}
|
||||
@@ -1102,7 +1095,7 @@ func (p *pvcBackupItemAction) patchVGSCDeletionPolicy(ctx context.Context, vgs *
|
||||
vgscName := vgs.Status.BoundVolumeGroupSnapshotContentName
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
vgsc := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{}
|
||||
vgsc := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{}
|
||||
if err := p.crClient.Get(ctx, crclient.ObjectKey{Name: *vgscName}, vgsc); err != nil {
|
||||
return errors.Wrapf(err, "failed to get VolumeGroupSnapshotContent %s for VolumeGroupSnapshot %s/%s", *vgscName, vgs.Namespace, vgs.Name)
|
||||
}
|
||||
@@ -1121,9 +1114,9 @@ func (p *pvcBackupItemAction) patchVGSCDeletionPolicy(ctx context.Context, vgs *
|
||||
})
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) deleteVGSAndVGSC(ctx context.Context, vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot) error {
|
||||
func (p *pvcBackupItemAction) deleteVGSAndVGSC(ctx context.Context, vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot) error {
|
||||
if vgs.Status != nil && vgs.Status.BoundVolumeGroupSnapshotContentName != nil {
|
||||
vgsc := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{
|
||||
vgsc := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: *vgs.Status.BoundVolumeGroupSnapshotContentName,
|
||||
},
|
||||
@@ -1148,11 +1141,11 @@ func (p *pvcBackupItemAction) deleteVGSAndVGSC(ctx context.Context, vgs *volumeg
|
||||
|
||||
func (p *pvcBackupItemAction) waitForVGSCBinding(
|
||||
ctx context.Context,
|
||||
vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot,
|
||||
vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot,
|
||||
timeout time.Duration,
|
||||
) error {
|
||||
return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
vgsRef := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{}
|
||||
vgsRef := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{}
|
||||
if err := p.crClient.Get(ctx, crclient.ObjectKeyFromObject(vgs), vgsRef); err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1165,8 +1158,8 @@ func (p *pvcBackupItemAction) waitForVGSCBinding(
|
||||
})
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) getVGSByLabels(ctx context.Context, namespace string, labels map[string]string) (*volumegroupsnapshotv1beta1.VolumeGroupSnapshot, error) {
|
||||
vgsList := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotList{}
|
||||
func (p *pvcBackupItemAction) getVGSByLabels(ctx context.Context, namespace string, labels map[string]string) (*volumegroupsnapshotv1beta2.VolumeGroupSnapshot, error) {
|
||||
vgsList := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotList{}
|
||||
if err := p.crClient.List(ctx, vgsList,
|
||||
crclient.InNamespace(namespace),
|
||||
crclient.MatchingLabels(labels),
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
|
||||
volumegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
|
||||
volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -307,6 +307,28 @@ func TestProgress(t *testing.T) {
|
||||
operationID: "testing",
|
||||
expectedErr: "not found DataUpload for operationID testing",
|
||||
},
|
||||
{
|
||||
name: "DataUpload in different namespace is not found",
|
||||
backup: builder.ForBackup("velero", "test").Result(),
|
||||
dataUpload: &velerov2alpha1.DataUpload{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DataUpload",
|
||||
APIVersion: "v2alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "other-namespace",
|
||||
Name: "testing",
|
||||
Labels: map[string]string{
|
||||
velerov1api.AsyncOperationIDLabel: "testing",
|
||||
},
|
||||
},
|
||||
Status: velerov2alpha1.DataUploadStatus{
|
||||
Phase: velerov2alpha1.DataUploadPhaseFailed,
|
||||
},
|
||||
},
|
||||
operationID: "testing",
|
||||
expectedErr: "not found DataUpload for operationID testing",
|
||||
},
|
||||
{
|
||||
name: "DataUpload is found",
|
||||
backup: builder.ForBackup("velero", "test").Result(),
|
||||
@@ -375,15 +397,15 @@ func TestCancel(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
backup *velerov1api.Backup
|
||||
dataUpload velerov2alpha1.DataUpload
|
||||
dataUpload *velerov2alpha1.DataUpload
|
||||
operationID string
|
||||
expectedErr error
|
||||
expectedErr string
|
||||
expectedDataUpload velerov2alpha1.DataUpload
|
||||
}{
|
||||
{
|
||||
name: "Cancel DataUpload",
|
||||
backup: builder.ForBackup("velero", "test").Result(),
|
||||
dataUpload: velerov2alpha1.DataUpload{
|
||||
dataUpload: &velerov2alpha1.DataUpload{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DataUpload",
|
||||
APIVersion: velerov2alpha1.SchemeGroupVersion.String(),
|
||||
@@ -414,6 +436,31 @@ func TestCancel(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DataUpload cannot be found",
|
||||
backup: builder.ForBackup("velero", "test").Result(),
|
||||
operationID: "testing",
|
||||
expectedErr: "not found DataUpload for operationID testing",
|
||||
},
|
||||
{
|
||||
name: "DataUpload in different namespace is not found",
|
||||
backup: builder.ForBackup("velero", "test").Result(),
|
||||
dataUpload: &velerov2alpha1.DataUpload{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DataUpload",
|
||||
APIVersion: velerov2alpha1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "other-namespace",
|
||||
Name: "testing",
|
||||
Labels: map[string]string{
|
||||
velerov1api.AsyncOperationIDLabel: "testing",
|
||||
},
|
||||
},
|
||||
},
|
||||
operationID: "testing",
|
||||
expectedErr: "not found DataUpload for operationID testing",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -426,17 +473,23 @@ func TestCancel(t *testing.T) {
|
||||
crClient: crClient,
|
||||
}
|
||||
|
||||
err := crClient.Create(t.Context(), &tc.dataUpload)
|
||||
require.NoError(t, err)
|
||||
if tc.dataUpload != nil {
|
||||
err := crClient.Create(t.Context(), tc.dataUpload)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = pvcBIA.Cancel(tc.operationID, tc.backup)
|
||||
require.NoError(t, err)
|
||||
err := pvcBIA.Cancel(tc.operationID, tc.backup)
|
||||
if tc.expectedErr != "" {
|
||||
require.EqualError(t, err, tc.expectedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
du := new(velerov2alpha1.DataUpload)
|
||||
err = crClient.Get(t.Context(), crclient.ObjectKey{Namespace: tc.dataUpload.Namespace, Name: tc.dataUpload.Name}, du)
|
||||
require.NoError(t, err)
|
||||
du := new(velerov2alpha1.DataUpload)
|
||||
err = crClient.Get(t.Context(), crclient.ObjectKey{Namespace: tc.dataUpload.Namespace, Name: tc.dataUpload.Name}, du)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, cmp.Equal(tc.expectedDataUpload, *du, cmpopts.IgnoreFields(velerov2alpha1.DataUpload{}, "ResourceVersion")))
|
||||
require.True(t, cmp.Equal(tc.expectedDataUpload, *du, cmpopts.IgnoreFields(velerov2alpha1.DataUpload{}, "ResourceVersion")))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -842,9 +895,13 @@ volumePolicies:
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
// Pass nil for VolumeHelper in tests - it will fall back to creating a new one per call
|
||||
// This is the expected behavior for testing and third-party plugins
|
||||
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup, nil)
|
||||
// Create a VolumeHelper using the same method the plugin would use
|
||||
vh, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
|
||||
// Test with the pre-created VolumeHelper
|
||||
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, vh)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
@@ -959,7 +1016,7 @@ volumePolicies:
|
||||
require.NotNil(t, vh)
|
||||
|
||||
// Test with the pre-created VolumeHelper (non-nil path)
|
||||
result, err := action.filterPVCsByVolumePolicy(pvcs, backup, vh)
|
||||
result, err := action.filterPVCsByVolumePolicy(pvcs, vh)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should filter out the NFS PVC, leaving only the CSI PVC
|
||||
@@ -1117,7 +1174,7 @@ func TestDetermineVGSClass(t *testing.T) {
|
||||
name string
|
||||
backup *velerov1api.Backup
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
existingVGSClass []volumegroupsnapshotv1beta1.VolumeGroupSnapshotClass
|
||||
existingVGSClass []volumegroupsnapshotv1beta2.VolumeGroupSnapshotClass
|
||||
expectError bool
|
||||
expectResult string
|
||||
}{
|
||||
@@ -1149,7 +1206,7 @@ func TestDetermineVGSClass(t *testing.T) {
|
||||
name: "Default label-based match",
|
||||
pvc: &corev1api.PersistentVolumeClaim{},
|
||||
backup: &velerov1api.Backup{},
|
||||
existingVGSClass: []volumegroupsnapshotv1beta1.VolumeGroupSnapshotClass{
|
||||
existingVGSClass: []volumegroupsnapshotv1beta2.VolumeGroupSnapshotClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-class",
|
||||
@@ -1170,7 +1227,7 @@ func TestDetermineVGSClass(t *testing.T) {
|
||||
name: "Multiple matching VGS classes",
|
||||
pvc: &corev1api.PersistentVolumeClaim{},
|
||||
backup: &velerov1api.Backup{},
|
||||
existingVGSClass: []volumegroupsnapshotv1beta1.VolumeGroupSnapshotClass{
|
||||
existingVGSClass: []volumegroupsnapshotv1beta2.VolumeGroupSnapshotClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "class1",
|
||||
@@ -1200,7 +1257,7 @@ func TestDetermineVGSClass(t *testing.T) {
|
||||
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, initObjs...)
|
||||
logger := logrus.New()
|
||||
require.NoError(t, volumegroupsnapshotv1beta1.AddToScheme(client.Scheme()))
|
||||
require.NoError(t, volumegroupsnapshotv1beta2.AddToScheme(client.Scheme()))
|
||||
|
||||
action := &pvcBackupItemAction{crClient: client, log: logger}
|
||||
|
||||
@@ -1259,13 +1316,13 @@ func TestCreateVolumeGroupSnapshot(t *testing.T) {
|
||||
assert.Equal(t, string(testBackup.UID), vgs.Labels[velerov1api.BackupUIDLabel])
|
||||
|
||||
// Check that it exists in fake client
|
||||
retrieved := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{}
|
||||
retrieved := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{}
|
||||
err = crClient.Get(t.Context(), crclient.ObjectKey{Name: vgs.Name, Namespace: vgs.Namespace}, retrieved)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestWaitForVGSAssociatedVS(t *testing.T) {
|
||||
vgs := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
vgs := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-vgs",
|
||||
Namespace: "test-ns",
|
||||
@@ -1278,7 +1335,7 @@ func TestWaitForVGSAssociatedVS(t *testing.T) {
|
||||
if owned {
|
||||
refs = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "groupsnapshot.storage.k8s.io/v1beta1",
|
||||
APIVersion: "groupsnapshot.storage.k8s.io/v1beta2",
|
||||
Kind: "VolumeGroupSnapshot",
|
||||
Name: vgs.Name,
|
||||
UID: vgs.UID,
|
||||
@@ -1425,7 +1482,7 @@ func TestUpdateVGSCreatedVS(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
vgs := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
vgs := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-vgs",
|
||||
Namespace: "ns",
|
||||
@@ -1438,7 +1495,7 @@ func TestUpdateVGSCreatedVS(t *testing.T) {
|
||||
if withVGSOwner {
|
||||
refs = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "groupsnapshot.storage.k8s.io/v1beta1",
|
||||
APIVersion: "groupsnapshot.storage.k8s.io/v1beta2",
|
||||
Kind: "VolumeGroupSnapshot",
|
||||
Name: vgs.Name,
|
||||
UID: vgs.UID,
|
||||
@@ -1557,18 +1614,18 @@ func TestPatchVGSCDeletionPolicy(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
vgsc := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{
|
||||
vgsc := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-vgsc"},
|
||||
Spec: volumegroupsnapshotv1beta1.VolumeGroupSnapshotContentSpec{
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSpec{
|
||||
DeletionPolicy: tt.initialPolicy,
|
||||
},
|
||||
}
|
||||
vgs := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
vgs := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-vgs",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Status: &volumegroupsnapshotv1beta1.VolumeGroupSnapshotStatus{
|
||||
Status: &volumegroupsnapshotv1beta2.VolumeGroupSnapshotStatus{
|
||||
BoundVolumeGroupSnapshotContentName: pointer.String("test-vgsc"),
|
||||
},
|
||||
}
|
||||
@@ -1586,7 +1643,7 @@ func TestPatchVGSCDeletionPolicy(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
updated := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{}
|
||||
updated := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{}
|
||||
err = client.Get(t.Context(), crclient.ObjectKey{Name: "test-vgsc"}, updated)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedPolicy, updated.Spec.DeletionPolicy)
|
||||
@@ -1595,20 +1652,20 @@ func TestPatchVGSCDeletionPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteVGSAndVGSC(t *testing.T) {
|
||||
makeVGS := func(name, namespace string, boundVGSCName *string) *volumegroupsnapshotv1beta1.VolumeGroupSnapshot {
|
||||
return &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
makeVGS := func(name, namespace string, boundVGSCName *string) *volumegroupsnapshotv1beta2.VolumeGroupSnapshot {
|
||||
return &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Status: &volumegroupsnapshotv1beta1.VolumeGroupSnapshotStatus{
|
||||
Status: &volumegroupsnapshotv1beta2.VolumeGroupSnapshotStatus{
|
||||
BoundVolumeGroupSnapshotContentName: boundVGSCName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
makeVGSC := func(name string) *volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent {
|
||||
return &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{
|
||||
makeVGSC := func(name string) *volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent {
|
||||
return &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@@ -1617,8 +1674,8 @@ func TestDeleteVGSAndVGSC(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot
|
||||
existingVGSC *volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent
|
||||
vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot
|
||||
existingVGSC *volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent
|
||||
expectVGSCDelete bool
|
||||
expectVGSDelete bool
|
||||
}{
|
||||
@@ -1664,13 +1721,13 @@ func TestDeleteVGSAndVGSC(t *testing.T) {
|
||||
|
||||
// Check VGSC is deleted
|
||||
if tt.expectVGSCDelete {
|
||||
got := &volumegroupsnapshotv1beta1.VolumeGroupSnapshotContent{}
|
||||
got := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{}
|
||||
err = client.Get(t.Context(), crclient.ObjectKey{Name: "test-vgsc"}, got)
|
||||
assert.True(t, apierrors.IsNotFound(err), "expected VGSC to be deleted")
|
||||
}
|
||||
|
||||
// Check VGS is deleted
|
||||
gotVGS := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{}
|
||||
gotVGS := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{}
|
||||
err = client.Get(t.Context(), crclient.ObjectKey{Name: "test-vgs", Namespace: "ns"}, gotVGS)
|
||||
assert.True(t, apierrors.IsNotFound(err), "expected VGS to be deleted")
|
||||
})
|
||||
@@ -1765,8 +1822,8 @@ func TestFindExistingVSForBackup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWaitForVGSCBinding(t *testing.T) {
|
||||
makeVGS := func(name string, withStatus bool) *volumegroupsnapshotv1beta1.VolumeGroupSnapshot {
|
||||
vgs := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
makeVGS := func(name string, withStatus bool) *volumegroupsnapshotv1beta2.VolumeGroupSnapshot {
|
||||
vgs := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "ns",
|
||||
@@ -1774,7 +1831,7 @@ func TestWaitForVGSCBinding(t *testing.T) {
|
||||
}
|
||||
if withStatus {
|
||||
contentName := "vgsc-123"
|
||||
vgs.Status = &volumegroupsnapshotv1beta1.VolumeGroupSnapshotStatus{
|
||||
vgs.Status = &volumegroupsnapshotv1beta2.VolumeGroupSnapshotStatus{
|
||||
BoundVolumeGroupSnapshotContentName: &contentName,
|
||||
}
|
||||
}
|
||||
@@ -1783,7 +1840,7 @@ func TestWaitForVGSCBinding(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
vgs *volumegroupsnapshotv1beta1.VolumeGroupSnapshot
|
||||
vgs *volumegroupsnapshotv1beta2.VolumeGroupSnapshot
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
@@ -1826,8 +1883,8 @@ func TestGetVGSByLabels(t *testing.T) {
|
||||
labelVal := "backup-123"
|
||||
testLabels := map[string]string{labelKey: labelVal}
|
||||
|
||||
makeVGS := func(name string, labels map[string]string) *volumegroupsnapshotv1beta1.VolumeGroupSnapshot {
|
||||
return &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
makeVGS := func(name string, labels map[string]string) *volumegroupsnapshotv1beta2.VolumeGroupSnapshot {
|
||||
return &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "test-ns",
|
||||
@@ -1912,7 +1969,7 @@ func (f *failingClient) List(ctx context.Context, list crclient.ObjectList, opts
|
||||
}
|
||||
|
||||
func TestHasOwnerReference(t *testing.T) {
|
||||
vgs := &volumegroupsnapshotv1beta1.VolumeGroupSnapshot{
|
||||
vgs := &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-vgs",
|
||||
Namespace: "test-ns",
|
||||
@@ -1929,7 +1986,7 @@ func TestHasOwnerReference(t *testing.T) {
|
||||
name: "match kind, apiversion, uid",
|
||||
ownerRef: metav1.OwnerReference{
|
||||
Kind: kuberesource.VGSKind,
|
||||
APIVersion: volumegroupsnapshotv1beta1.GroupName + "/" + volumegroupsnapshotv1beta1.SchemeGroupVersion.Version,
|
||||
APIVersion: volumegroupsnapshotv1beta2.GroupName + "/" + volumegroupsnapshotv1beta2.SchemeGroupVersion.Version,
|
||||
UID: vgs.UID,
|
||||
},
|
||||
expect: true,
|
||||
@@ -1938,7 +1995,7 @@ func TestHasOwnerReference(t *testing.T) {
|
||||
name: "mismatch kind",
|
||||
ownerRef: metav1.OwnerReference{
|
||||
Kind: "other-kind",
|
||||
APIVersion: volumegroupsnapshotv1beta1.GroupName + "/" + volumegroupsnapshotv1beta1.SchemeGroupVersion.Version,
|
||||
APIVersion: volumegroupsnapshotv1beta2.GroupName + "/" + volumegroupsnapshotv1beta2.SchemeGroupVersion.Version,
|
||||
UID: vgs.UID,
|
||||
},
|
||||
expect: false,
|
||||
@@ -1956,7 +2013,7 @@ func TestHasOwnerReference(t *testing.T) {
|
||||
name: "mismatch uid",
|
||||
ownerRef: metav1.OwnerReference{
|
||||
Kind: kuberesource.VGSKind,
|
||||
APIVersion: volumegroupsnapshotv1beta1.GroupName + "/" + volumegroupsnapshotv1beta1.SchemeGroupVersion.Version,
|
||||
APIVersion: volumegroupsnapshotv1beta2.GroupName + "/" + volumegroupsnapshotv1beta2.SchemeGroupVersion.Version,
|
||||
UID: "wrong-uid",
|
||||
},
|
||||
expect: false,
|
||||
|
||||
@@ -151,6 +151,12 @@ func (p *volumeSnapshotBackupItemAction) Execute(
|
||||
annotations[velerov1api.VolumeSnapshotRestoreSize] = resource.NewQuantity(
|
||||
*vsc.Status.RestoreSize, resource.BinarySI).String()
|
||||
}
|
||||
|
||||
// Capture VolumeGroupSnapshotHandle to create stub VGSC during restore
|
||||
// for CSI drivers that populate this field (e.g., Ceph RBD).
|
||||
if vsc.Status.VolumeGroupSnapshotHandle != nil {
|
||||
annotations[velerov1api.VolumeGroupSnapshotHandleAnnotation] = *vsc.Status.VolumeGroupSnapshotHandle
|
||||
}
|
||||
}
|
||||
|
||||
p.log.Infof("Patching VolumeSnapshotContent %s with velero BackupNameLabel",
|
||||
|
||||
@@ -98,6 +98,14 @@ func (m *backedUpItemsMap) AddItem(key itemKey) {
|
||||
m.totalItems[key] = struct{}{}
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) DeleteItem(key itemKey) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
delete(m.backedUpItems, key)
|
||||
delete(m.totalItems, key)
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) AddItemToTotal(key itemKey) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
"github.com/vmware-tanzu/velero/internal/hook"
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
"github.com/vmware-tanzu/velero/internal/volumehelper"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/archive"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
@@ -54,6 +53,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/podvolume"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
csiutil "github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/volumehelper"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -244,6 +244,14 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
|
||||
return false, itemFiles, kubeerrs.NewAggregate(backupErrs)
|
||||
}
|
||||
|
||||
// If err is nil and updatedObj is nil, it means the item is skipped by plugin action,
|
||||
// we should return here to avoid backing up the item, and avoid potential NPE in the following code.
|
||||
if updatedObj == nil {
|
||||
log.Infof("Remove item from the backup's backupItems list and totalItems list because it's skipped by plugin action.")
|
||||
ib.backupRequest.BackedUpItems.DeleteItem(key)
|
||||
return false, itemFiles, nil
|
||||
}
|
||||
|
||||
itemFiles = append(itemFiles, additionalItemFiles...)
|
||||
obj = updatedObj
|
||||
if metadata, err = meta.Accessor(obj); err != nil {
|
||||
@@ -398,6 +406,13 @@ func (ib *itemBackupper) executeActions(
|
||||
}
|
||||
|
||||
u := &unstructured.Unstructured{Object: updatedItem.UnstructuredContent()}
|
||||
|
||||
if _, ok := u.GetAnnotations()[velerov1api.SkipFromBackupAnnotation]; ok {
|
||||
log.Infof("Resource (groupResource=%s, namespace=%s, name=%s) is skipped from backup by action %s.",
|
||||
groupResource.String(), namespace, name, actionName)
|
||||
return nil, itemFiles, nil
|
||||
}
|
||||
|
||||
if actionName == csiBIAPluginName {
|
||||
if additionalItemIdentifiers == nil && u.GetAnnotations()[velerov1api.SkippedNoCSIPVAnnotation] == "true" {
|
||||
// snapshot was skipped by CSI plugin
|
||||
@@ -687,15 +702,14 @@ func (ib *itemBackupper) getMatchAction(obj runtime.Unstructured, groupResource
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
if pvName == "" {
|
||||
return nil, errors.Errorf("PVC has no volume backing this claim")
|
||||
}
|
||||
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := ib.kbClient.Get(context.Background(), kbClient.ObjectKey{Name: pvName}, pv); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
var pv *corev1api.PersistentVolume
|
||||
if pvName := pvc.Spec.VolumeName; pvName != "" {
|
||||
pv = &corev1api.PersistentVolume{}
|
||||
if err := ib.kbClient.Get(context.Background(), kbClient.ObjectKey{Name: pvName}, pv); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
// If pv is nil for unbound PVCs - policy matching will use PVC-only conditions
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
return ib.backupRequest.ResPolicies.GetMatchAction(vfd)
|
||||
}
|
||||
@@ -709,7 +723,10 @@ func (ib *itemBackupper) trackSkippedPV(obj runtime.Unstructured, groupResource
|
||||
if name, err := getPVName(obj, groupResource); len(name) > 0 && err == nil {
|
||||
ib.backupRequest.SkippedPVTracker.Track(name, approach, reason)
|
||||
} else if err != nil {
|
||||
log.WithError(err).Warnf("unable to get PV name, skip tracking.")
|
||||
// Log at info level for tracking purposes. This is not an error because
|
||||
// it's expected for some resources (e.g., PVCs in Pending or Lost phase)
|
||||
// to not have a PV name. This occurs when volume policy skips unbound PVCs.
|
||||
log.WithError(err).Infof("unable to get PV name, skip tracking.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -719,6 +736,17 @@ func (ib *itemBackupper) unTrackSkippedPV(obj runtime.Unstructured, groupResourc
|
||||
if name, err := getPVName(obj, groupResource); len(name) > 0 && err == nil {
|
||||
ib.backupRequest.SkippedPVTracker.Untrack(name)
|
||||
} else if err != nil {
|
||||
// For PVCs in Pending or Lost phase, it's expected that there's no PV name.
|
||||
// Log at debug level instead of warning to reduce noise.
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
pvc := new(corev1api.PersistentVolumeClaim)
|
||||
if convErr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); convErr == nil {
|
||||
if pvc.Status.Phase == corev1api.ClaimPending || pvc.Status.Phase == corev1api.ClaimLost {
|
||||
log.WithError(err).Debugf("unable to get PV name for %s PVC, skip untracking.", pvc.Status.Phase)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
log.WithError(err).Warnf("unable to get PV name, skip untracking.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,12 +17,15 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -269,3 +272,225 @@ func TestAddVolumeInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMatchAction_PendingLostPVC(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
require.NoError(t, corev1api.AddToScheme(scheme))
|
||||
|
||||
// Create resource policies that skip Pending/Lost PVCs
|
||||
resPolicies := &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
policies := &resourcepolicies.Policies{}
|
||||
err := policies.BuildPolicy(resPolicies)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
pv *corev1api.PersistentVolume
|
||||
expectedAction *resourcepolicies.Action
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with no VolumeName should match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
StorageClass("test-sc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
pv: nil,
|
||||
expectedAction: &resourcepolicies.Action{Type: resourcepolicies.Skip},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with no VolumeName should match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
StorageClass("test-sc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
pv: nil,
|
||||
expectedAction: &resourcepolicies.Action{Type: resourcepolicies.Skip},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Bound PVC with VolumeName and matching PV should not match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
StorageClass("test-sc").
|
||||
VolumeName("test-pv").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
pv: builder.ForPersistentVolume("test-pv").StorageClass("test-sc").Result(),
|
||||
expectedAction: nil,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Build fake client with PV if present
|
||||
clientBuilder := ctrlfake.NewClientBuilder().WithScheme(scheme)
|
||||
if tc.pv != nil {
|
||||
clientBuilder = clientBuilder.WithObjects(tc.pv)
|
||||
}
|
||||
fakeClient := clientBuilder.Build()
|
||||
|
||||
ib := &itemBackupper{
|
||||
kbClient: fakeClient,
|
||||
backupRequest: &Request{
|
||||
ResPolicies: policies,
|
||||
},
|
||||
}
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
action, err := ib.getMatchAction(obj, kuberesource.PersistentVolumeClaims, csiBIAPluginName)
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if tc.expectedAction == nil {
|
||||
assert.Nil(t, action)
|
||||
} else {
|
||||
require.NotNil(t, action)
|
||||
assert.Equal(t, tc.expectedAction.Type, action.Type)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackSkippedPV_PendingLostPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "Lost PVC should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "Bound PVC without VolumeName should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ib := &itemBackupper{
|
||||
backupRequest: &Request{
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
},
|
||||
}
|
||||
|
||||
// Set up log capture
|
||||
logOutput := &bytes.Buffer{}
|
||||
logger := logrus.New()
|
||||
logger.SetOutput(logOutput)
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
ib.trackSkippedPV(obj, kuberesource.PersistentVolumeClaims, "", "test reason", logger)
|
||||
|
||||
logStr := logOutput.String()
|
||||
assert.Contains(t, logStr, "level=info")
|
||||
assert.Contains(t, logStr, "unable to get PV name, skip tracking.")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnTrackSkippedPV_PendingLostPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
expectWarningLog bool
|
||||
expectDebugMessage string
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC should log at debug level, not warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
expectWarningLog: false,
|
||||
expectDebugMessage: "unable to get PV name for Pending PVC, skip untracking.",
|
||||
},
|
||||
{
|
||||
name: "Lost PVC should log at debug level, not warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
expectWarningLog: false,
|
||||
expectDebugMessage: "unable to get PV name for Lost PVC, skip untracking.",
|
||||
},
|
||||
{
|
||||
name: "Bound PVC without VolumeName should log warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
expectWarningLog: true,
|
||||
expectDebugMessage: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ib := &itemBackupper{
|
||||
backupRequest: &Request{
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
},
|
||||
}
|
||||
|
||||
// Set up log capture
|
||||
logOutput := &bytes.Buffer{}
|
||||
logger := logrus.New()
|
||||
logger.SetOutput(logOutput)
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
ib.unTrackSkippedPV(obj, kuberesource.PersistentVolumeClaims, logger)
|
||||
|
||||
logStr := logOutput.String()
|
||||
if tc.expectWarningLog {
|
||||
assert.Contains(t, logStr, "level=warning")
|
||||
assert.Contains(t, logStr, "unable to get PV name, skip untracking.")
|
||||
} else {
|
||||
assert.NotContains(t, logStr, "level=warning")
|
||||
if tc.expectDebugMessage != "" {
|
||||
assert.Contains(t, logStr, "level=debug")
|
||||
assert.Contains(t, logStr, tc.expectDebugMessage)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package client
|
||||
import (
|
||||
"os"
|
||||
|
||||
volumegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
|
||||
volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
|
||||
|
||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
@@ -168,7 +168,7 @@ func (f *factory) KubebuilderClient() (kbclient.Client, error) {
|
||||
if err := snapshotv1api.AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := volumegroupsnapshotv1beta1.AddToScheme(scheme); err != nil {
|
||||
if err := volumegroupsnapshotv1beta2.AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kubebuilderClient, err := kbclient.New(clientConfig, kbclient.Options{
|
||||
@@ -207,7 +207,7 @@ func (f *factory) KubebuilderWatchClient() (kbclient.WithWatch, error) {
|
||||
if err := snapshotv1api.AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := volumegroupsnapshotv1beta1.AddToScheme(scheme); err != nil {
|
||||
if err := volumegroupsnapshotv1beta2.AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kubebuilderWatchClient, err := kbclient.NewWithWatch(clientConfig, kbclient.Options{
|
||||
|
||||
@@ -275,11 +275,21 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
veleroPodResources, err := kubeutil.ParseResourceRequirements(o.VeleroPodCPURequest, o.VeleroPodMemRequest, o.VeleroPodCPULimit, o.VeleroPodMemLimit)
|
||||
veleroPodResources, err := kubeutil.ParseCPUAndMemoryResources(
|
||||
o.VeleroPodCPURequest,
|
||||
o.VeleroPodMemRequest,
|
||||
o.VeleroPodCPULimit,
|
||||
o.VeleroPodMemLimit,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeAgentPodResources, err := kubeutil.ParseResourceRequirements(o.NodeAgentPodCPURequest, o.NodeAgentPodMemRequest, o.NodeAgentPodCPULimit, o.NodeAgentPodMemLimit)
|
||||
nodeAgentPodResources, err := kubeutil.ParseCPUAndMemoryResources(
|
||||
o.NodeAgentPodCPURequest,
|
||||
o.NodeAgentPodMemRequest,
|
||||
o.NodeAgentPodCPULimit,
|
||||
o.NodeAgentPodMemLimit,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -371,8 +381,8 @@ This is useful as a starting point for more customized installations.
|
||||
|
||||
# velero install --provider azure --plugins velero/velero-plugin-for-microsoft-azure:v1.0.0 --bucket $BLOB_CONTAINER --secret-file ./credentials-velero --backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] --snapshot-location-config apiTimeout=<YOUR_TIMEOUT>[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID]`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
cmd.CheckError(o.Validate(c, args, f))
|
||||
cmd.CheckError(o.Complete(args, f))
|
||||
cmd.CheckError(o.Validate(c, args, f))
|
||||
cmd.CheckError(o.Run(c, f))
|
||||
},
|
||||
}
|
||||
|
||||
@@ -17,11 +17,18 @@ limitations under the License.
|
||||
package install
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
factorymocks "github.com/vmware-tanzu/velero/pkg/client/mocks"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
func TestPriorityClassNameFlag(t *testing.T) {
|
||||
@@ -91,3 +98,168 @@ func TestPriorityClassNameFlag(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// makeValidateCmd returns a minimal *cobra.Command that satisfies output.ValidateFlags.
|
||||
func makeValidateCmd() *cobra.Command {
|
||||
c := &cobra.Command{}
|
||||
// output.ValidateFlags only inspects the "output" flag; add it so validation passes.
|
||||
c.Flags().StringP("output", "o", "", "output format")
|
||||
return c
|
||||
}
|
||||
|
||||
// configMapInNamespace builds a ConfigMap with a single JSON data entry in the given namespace.
|
||||
func configMapInNamespace(namespace, name, jsonValue string) *corev1api.ConfigMap {
|
||||
return &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"config": jsonValue,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidateConfigMapsUseFactoryNamespace verifies that Validate resolves the target
|
||||
// namespace correctly for all three ConfigMap flags.
|
||||
//
|
||||
// The fix (Option B) calls Complete before Validate in NewCommand so that o.Namespace is
|
||||
// populated from f.Namespace() before VerifyJSONConfigs runs. Tests mirror that order by
|
||||
// calling Complete before Validate.
|
||||
func TestValidateConfigMapsUseFactoryNamespace(t *testing.T) {
|
||||
const targetNS = "tenant-b"
|
||||
const defaultNS = "default"
|
||||
|
||||
// Shared options that satisfy every other validation gate:
|
||||
// - NoDefaultBackupLocation=true + UseVolumeSnapshots=false skips provider/bucket/plugins checks
|
||||
// - NoSecret=true satisfies the secret-file check
|
||||
baseOptions := func() *Options {
|
||||
o := NewInstallOptions()
|
||||
o.NoDefaultBackupLocation = true
|
||||
o.UseVolumeSnapshots = false
|
||||
o.NoSecret = true
|
||||
return o
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupOpts func(o *Options, cmName string)
|
||||
cmJSON string
|
||||
wantErrMsg string // substring expected in error; empty means success
|
||||
}{
|
||||
{
|
||||
name: "NodeAgentConfigMap found in factory namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.NodeAgentConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
},
|
||||
{
|
||||
name: "NodeAgentConfigMap not found when only in default namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.NodeAgentConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
wantErrMsg: "--node-agent-configmap specified ConfigMap",
|
||||
},
|
||||
{
|
||||
name: "RepoMaintenanceJobConfigMap found in factory namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.RepoMaintenanceJobConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
},
|
||||
{
|
||||
name: "RepoMaintenanceJobConfigMap not found when only in default namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.RepoMaintenanceJobConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
wantErrMsg: "--repo-maintenance-job-configmap specified ConfigMap",
|
||||
},
|
||||
{
|
||||
name: "BackupRepoConfigMap found in factory namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.BackupRepoConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
},
|
||||
{
|
||||
name: "BackupRepoConfigMap not found when only in default namespace",
|
||||
setupOpts: func(o *Options, cmName string) {
|
||||
o.BackupRepoConfigMap = cmName
|
||||
},
|
||||
cmJSON: `{}`,
|
||||
wantErrMsg: "--backup-repository-configmap specified ConfigMap",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const cmName = "my-config"
|
||||
|
||||
// Decide where to place the ConfigMap:
|
||||
// "not found" cases put it in "default", so the factory namespace lookup misses it.
|
||||
cmNamespace := targetNS
|
||||
if tc.wantErrMsg != "" {
|
||||
cmNamespace = defaultNS
|
||||
}
|
||||
|
||||
cm := configMapInNamespace(cmNamespace, cmName, tc.cmJSON)
|
||||
kbClient := velerotest.NewFakeControllerRuntimeClient(t, cm)
|
||||
|
||||
f := &factorymocks.Factory{}
|
||||
f.On("Namespace").Return(targetNS)
|
||||
f.On("KubebuilderClient").Return(kbClient, nil)
|
||||
|
||||
o := baseOptions()
|
||||
tc.setupOpts(o, cmName)
|
||||
|
||||
// Mirror the NewCommand call order: Complete populates o.Namespace before Validate runs.
|
||||
require.NoError(t, o.Complete([]string{}, f))
|
||||
|
||||
c := makeValidateCmd()
|
||||
c.SetContext(context.Background())
|
||||
|
||||
err := o.Validate(c, []string{}, f)
|
||||
|
||||
if tc.wantErrMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tc.wantErrMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewCommandRunClosureOrder covers the Run closure in NewCommand (the lines that were
|
||||
// reordered by the fix: Complete → Validate → Run).
|
||||
//
|
||||
// The closure uses CheckError which calls os.Exit on any error, so the only safe path is one
|
||||
// where all three steps return nil. DryRun=true causes o.Run to return after PrintWithFormat
|
||||
// (which is a no-op when no --output flag is set) without touching any cluster clients.
|
||||
func TestNewCommandRunClosureOrder(t *testing.T) {
|
||||
const targetNS = "tenant-b"
|
||||
const cmName = "my-config"
|
||||
|
||||
cm := configMapInNamespace(targetNS, cmName, `{}`)
|
||||
kbClient := velerotest.NewFakeControllerRuntimeClient(t, cm)
|
||||
|
||||
f := &factorymocks.Factory{}
|
||||
f.On("Namespace").Return(targetNS)
|
||||
f.On("KubebuilderClient").Return(kbClient, nil)
|
||||
|
||||
c := NewCommand(f)
|
||||
c.SetArgs([]string{
|
||||
"--no-default-backup-location",
|
||||
"--use-volume-snapshots=false",
|
||||
"--no-secret",
|
||||
"--dry-run",
|
||||
"--node-agent-configmap", cmName,
|
||||
})
|
||||
|
||||
// Execute drives the full Run closure: Complete populates o.Namespace, Validate
|
||||
// looks up the ConfigMap in targetNS (succeeds), Run returns early via DryRun.
|
||||
require.NoError(t, c.Execute())
|
||||
}
|
||||
|
||||
@@ -323,7 +323,25 @@ func (s *nodeAgentServer) run() {
|
||||
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
// To make the PodResources ConfigMap without ephemeral storage request/limit backward compatible,
|
||||
// need to avoid set value as empty, because empty string will cause parsing error.
|
||||
ephemeralStorageRequest := constant.DefaultEphemeralStorageRequest
|
||||
if s.dataPathConfigs.PodResources.EphemeralStorageRequest != "" {
|
||||
ephemeralStorageRequest = s.dataPathConfigs.PodResources.EphemeralStorageRequest
|
||||
}
|
||||
ephemeralStorageLimit := constant.DefaultEphemeralStorageLimit
|
||||
if s.dataPathConfigs.PodResources.EphemeralStorageLimit != "" {
|
||||
ephemeralStorageLimit = s.dataPathConfigs.PodResources.EphemeralStorageLimit
|
||||
}
|
||||
|
||||
if res, err := kube.ParseResourceRequirements(
|
||||
s.dataPathConfigs.PodResources.CPURequest,
|
||||
s.dataPathConfigs.PodResources.MemoryRequest,
|
||||
ephemeralStorageRequest,
|
||||
s.dataPathConfigs.PodResources.CPULimit,
|
||||
s.dataPathConfigs.PodResources.MemoryLimit,
|
||||
ephemeralStorageLimit,
|
||||
); err != nil {
|
||||
s.logger.WithError(err).Warn("Pod resource requirements are invalid, ignore")
|
||||
} else {
|
||||
podResources = res
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
logrusr "github.com/bombsimon/logrusr/v3"
|
||||
volumegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
|
||||
volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
@@ -247,7 +247,7 @@ func newServer(f client.Factory, config *config.Config, logger *logrus.Logger) (
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
if err := volumegroupsnapshotv1beta1.AddToScheme(scheme); err != nil {
|
||||
if err := volumegroupsnapshotv1beta2.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -204,9 +204,9 @@ func Test_newServer(t *testing.T) {
|
||||
}, logger)
|
||||
require.Error(t, err)
|
||||
|
||||
// invalid clientQPS Restic uploader
|
||||
// invalid clientQPS Kopia uploader
|
||||
_, err = newServer(factory, &config.Config{
|
||||
UploaderType: uploader.ResticType,
|
||||
UploaderType: uploader.KopiaType,
|
||||
ClientQPS: -1,
|
||||
}, logger)
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -23,4 +23,7 @@ const (
|
||||
|
||||
PluginCSIPVCRestoreRIA = "velero.io/csi-pvc-restorer"
|
||||
PluginCsiVolumeSnapshotRestoreRIA = "velero.io/csi-volumesnapshot-restorer"
|
||||
|
||||
DefaultEphemeralStorageRequest = "0"
|
||||
DefaultEphemeralStorageLimit = "0"
|
||||
)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch/v5"
|
||||
@@ -267,8 +268,17 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Unable to download tarball for backup %s, skipping associated DeleteItemAction plugins", backup.Name)
|
||||
// for backups which failed before tarball object could be uploaded we do offline cleanup
|
||||
log.Info("Cleaning up CSI volumesnapshots")
|
||||
r.deleteCSIVolumeSnapshotsIfAny(ctx, backup, log)
|
||||
|
||||
// If the tarball simply does not exist (HTTP 404 / not found), the download
|
||||
// failure is permanent and not retryable, so we let deletion proceed.
|
||||
// For transient errors (throttling, auth failures, network issues), record
|
||||
// the error to fail the deletion so it can be retried later.
|
||||
if !isTarballNotFoundError(err) {
|
||||
errs = append(errs, errors.Wrapf(err, "error downloading backup tarball, CSI snapshot cleanup was skipped").Error())
|
||||
}
|
||||
} else {
|
||||
defer closeAndRemoveFile(backupFile, r.logger)
|
||||
deleteCtx := &delete.Context{
|
||||
@@ -351,11 +361,13 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
}
|
||||
|
||||
if backupStore != nil {
|
||||
if backupStore != nil && len(errs) == 0 {
|
||||
log.Info("Removing backup from backup storage")
|
||||
if err := backupStore.DeleteBackup(backup.Name); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
} else if len(errs) > 0 {
|
||||
log.Info("Skipping removal of backup from backup storage due to previous errors")
|
||||
}
|
||||
|
||||
log.Info("Removing restores")
|
||||
@@ -691,3 +703,28 @@ func batchDeleteSnapshots(ctx context.Context, repoEnsurer *repository.Ensurer,
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// isTarballNotFoundError reports whether err indicates that the backup tarball
|
||||
// does not exist in object storage (e.g. HTTP 404 / not-found). Such errors are
|
||||
// permanent and not retryable, so callers should let deletion proceed (skipping
|
||||
// DeleteItemAction plugins) rather than failing the entire deletion.
|
||||
//
|
||||
// Transient errors (throttling, auth failures, network timeouts) return false so
|
||||
// the deletion is failed and can be retried once the storage is reachable again.
|
||||
func isTarballNotFoundError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// Lower-case once for all comparisons.
|
||||
msg := strings.ToLower(err.Error())
|
||||
// Common "not found" indicators across cloud providers:
|
||||
// - "not found" / "does not exist": generic, in-memory object store
|
||||
// - "nosuchkey": AWS S3
|
||||
// - "blobnotfound": Azure Blob Storage
|
||||
// - "objectnotexist": GCS
|
||||
return strings.Contains(msg, "not found") ||
|
||||
strings.Contains(msg, "does not exist") ||
|
||||
strings.Contains(msg, "nosuchkey") ||
|
||||
strings.Contains(msg, "blobnotfound") ||
|
||||
strings.Contains(msg, "objectnotexist")
|
||||
}
|
||||
|
||||
@@ -25,8 +25,6 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -606,7 +604,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
// Make sure snapshot was deleted
|
||||
assert.Equal(t, 0, td.volumeSnapshotter.SnapshotsTaken.Len())
|
||||
})
|
||||
t.Run("backup is still deleted if downloading tarball fails for DeleteItemAction plugins", func(t *testing.T) {
|
||||
t.Run("backup deletion fails with error when downloading tarball fails for DeleteItemAction plugins", func(t *testing.T) {
|
||||
backup := builder.ForBackup(velerov1api.DefaultNamespace, "foo").Result()
|
||||
backup.UID = "uid"
|
||||
backup.Spec.StorageLocation = "primary"
|
||||
@@ -672,6 +670,89 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
|
||||
td.backupStore.On("GetBackupVolumeSnapshots", input.Spec.BackupName).Return(snapshots, nil)
|
||||
td.backupStore.On("GetBackupContents", input.Spec.BackupName).Return(nil, fmt.Errorf("error downloading tarball"))
|
||||
|
||||
_, err := td.controller.Reconcile(t.Context(), td.req)
|
||||
require.NoError(t, err)
|
||||
|
||||
td.backupStore.AssertCalled(t, "GetBackupContents", input.Spec.BackupName)
|
||||
// DeleteBackup (removing backup data from object storage) must NOT be called
|
||||
// when there are errors, so that the deletion can be retried later.
|
||||
td.backupStore.AssertNotCalled(t, "DeleteBackup", input.Spec.BackupName)
|
||||
|
||||
// the dbr should still exist and be marked Processed with errors
|
||||
res := &velerov1api.DeleteBackupRequest{}
|
||||
err = td.fakeClient.Get(ctx, td.req.NamespacedName, res)
|
||||
require.NoError(t, err, "Expected DBR to still exist after tarball download failure")
|
||||
assert.Equal(t, velerov1api.DeleteBackupRequestPhaseProcessed, res.Status.Phase)
|
||||
require.Len(t, res.Status.Errors, 1)
|
||||
assert.Contains(t, res.Status.Errors[0], "error downloading backup tarball, CSI snapshot cleanup was skipped")
|
||||
|
||||
// backup CR should NOT be deleted
|
||||
err = td.fakeClient.Get(t.Context(), types.NamespacedName{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: backup.Name,
|
||||
}, &velerov1api.Backup{})
|
||||
require.NoError(t, err, "Expected backup CR to still exist after tarball download failure")
|
||||
})
|
||||
t.Run("backup is still deleted if downloading tarball returns a not-found error", func(t *testing.T) {
|
||||
backup := builder.ForBackup(velerov1api.DefaultNamespace, "foo").Result()
|
||||
backup.UID = "uid"
|
||||
backup.Spec.StorageLocation = "primary"
|
||||
|
||||
input := defaultTestDbr()
|
||||
input.Labels = nil
|
||||
|
||||
location := &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Spec.StorageLocation,
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotLocation := &velerov1api.VolumeSnapshotLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: "vsl-1",
|
||||
},
|
||||
Spec: velerov1api.VolumeSnapshotLocationSpec{
|
||||
Provider: "provider-1",
|
||||
},
|
||||
}
|
||||
|
||||
td := setupBackupDeletionControllerTest(t, defaultTestDbr(), backup, location, snapshotLocation)
|
||||
td.volumeSnapshotter.SnapshotsTaken.Insert("snap-1")
|
||||
|
||||
snapshots := []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
Location: "vsl-1",
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
ProviderSnapshotID: "snap-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pluginManager := &pluginmocks.Manager{}
|
||||
pluginManager.On("GetVolumeSnapshotter", "provider-1").Return(td.volumeSnapshotter, nil)
|
||||
pluginManager.On("GetDeleteItemActions").Return([]velero.DeleteItemAction{new(mocks.DeleteItemAction)}, nil)
|
||||
pluginManager.On("CleanupClients")
|
||||
td.controller.newPluginManager = func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }
|
||||
|
||||
td.backupStore.On("GetBackupVolumeSnapshots", input.Spec.BackupName).Return(snapshots, nil)
|
||||
// Simulate a 404/not-found error (tarball has already been removed from storage)
|
||||
td.backupStore.On("GetBackupContents", input.Spec.BackupName).Return(nil, fmt.Errorf("key not found"))
|
||||
td.backupStore.On("DeleteBackup", input.Spec.BackupName).Return(nil)
|
||||
|
||||
_, err := td.controller.Reconcile(t.Context(), td.req)
|
||||
@@ -680,30 +761,17 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
td.backupStore.AssertCalled(t, "GetBackupContents", input.Spec.BackupName)
|
||||
td.backupStore.AssertCalled(t, "DeleteBackup", input.Spec.BackupName)
|
||||
|
||||
// the dbr should be deleted
|
||||
// the dbr should be deleted (not-found is treated as permanent, deletion proceeds)
|
||||
res := &velerov1api.DeleteBackupRequest{}
|
||||
err = td.fakeClient.Get(ctx, td.req.NamespacedName, res)
|
||||
assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err)
|
||||
if err == nil {
|
||||
t.Logf("status of the dbr: %s, errors in dbr: %v", res.Status.Phase, res.Status.Errors)
|
||||
}
|
||||
assert.True(t, apierrors.IsNotFound(err), "Expected DBR to be deleted after not-found tarball error, but actual error: %v", err)
|
||||
|
||||
// backup CR should be deleted
|
||||
// backup CR should be deleted because there are no errors in errs
|
||||
err = td.fakeClient.Get(t.Context(), types.NamespacedName{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: backup.Name,
|
||||
}, &velerov1api.Backup{})
|
||||
assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err)
|
||||
|
||||
// leaked CSI snapshot should be deleted
|
||||
err = td.fakeClient.Get(t.Context(), types.NamespacedName{
|
||||
Namespace: "user-ns",
|
||||
Name: "vs-1",
|
||||
}, &snapshotv1api.VolumeSnapshot{})
|
||||
assert.True(t, apierrors.IsNotFound(err), "Expected not found error for the leaked CSI snapshot, but actual value of error: %v", err)
|
||||
|
||||
// Make sure snapshot was deleted
|
||||
assert.Equal(t, 0, td.volumeSnapshotter.SnapshotsTaken.Len())
|
||||
assert.True(t, apierrors.IsNotFound(err), "Expected backup CR to be deleted after not-found tarball error, but actual error: %v", err)
|
||||
})
|
||||
t.Run("Expired request will be deleted if the status is processed", func(t *testing.T) {
|
||||
expired := time.Date(2018, 4, 3, 12, 0, 0, 0, time.UTC)
|
||||
@@ -821,12 +889,12 @@ func TestGetSnapshotsInBackup(t *testing.T) {
|
||||
{
|
||||
VolumeNamespace: "ns-1",
|
||||
SnapshotID: "snap-3",
|
||||
RepositoryType: "restic",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
{
|
||||
VolumeNamespace: "ns-1",
|
||||
SnapshotID: "snap-4",
|
||||
RepositoryType: "restic",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -876,7 +944,7 @@ func TestGetSnapshotsInBackup(t *testing.T) {
|
||||
{
|
||||
VolumeNamespace: "ns-1",
|
||||
SnapshotID: "snap-3",
|
||||
RepositoryType: "restic",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -43,7 +43,6 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/constant"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/metrics"
|
||||
repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/maintenance"
|
||||
repomanager "github.com/vmware-tanzu/velero/pkg/repository/manager"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -53,9 +52,11 @@ import (
|
||||
const (
|
||||
repoSyncPeriod = 5 * time.Minute
|
||||
defaultMaintainFrequency = 7 * 24 * time.Hour
|
||||
defaultMaintenanceStatusQueueLength = 3
|
||||
defaultMaintenanceStatusQueueLength = 25
|
||||
)
|
||||
|
||||
var maintenanceStatusQueueLength = defaultMaintenanceStatusQueueLength
|
||||
|
||||
type BackupRepoReconciler struct {
|
||||
client.Client
|
||||
namespace string
|
||||
@@ -238,6 +239,10 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if backupRepo.Spec.RepositoryType != velerov1api.BackupRepositoryTypeKopia {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
bsl, bslErr := r.getBSL(ctx, backupRepo)
|
||||
if bslErr != nil {
|
||||
log.WithError(bslErr).Error("Fail to get BSL for BackupRepository. Skip reconciling.")
|
||||
@@ -245,7 +250,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
}
|
||||
|
||||
if backupRepo.Status.Phase == "" || backupRepo.Status.Phase == velerov1api.BackupRepositoryPhaseNew {
|
||||
if err := r.initializeRepo(ctx, backupRepo, bsl, log); err != nil {
|
||||
if err := r.initializeRepo(ctx, backupRepo, log); err != nil {
|
||||
log.WithError(err).Error("error initialize repository")
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
}
|
||||
@@ -263,7 +268,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
|
||||
switch backupRepo.Status.Phase {
|
||||
case velerov1api.BackupRepositoryPhaseNotReady:
|
||||
ready, err := r.checkNotReadyRepo(ctx, backupRepo, bsl, log)
|
||||
ready, err := r.checkNotReadyRepo(ctx, backupRepo, log)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else if !ready {
|
||||
@@ -311,35 +316,9 @@ func (r *BackupRepoReconciler) getBSL(ctx context.Context, req *velerov1api.Back
|
||||
return loc, nil
|
||||
}
|
||||
|
||||
func (r *BackupRepoReconciler) getIdentifierByBSL(bsl *velerov1api.BackupStorageLocation, req *velerov1api.BackupRepository) (string, error) {
|
||||
repoIdentifier, err := repoconfig.GetRepoIdentifier(bsl, req.Spec.VolumeNamespace)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error to get identifier for repo %s", req.Name)
|
||||
}
|
||||
|
||||
return repoIdentifier, nil
|
||||
}
|
||||
|
||||
func (r *BackupRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.BackupRepository, bsl *velerov1api.BackupStorageLocation, log logrus.FieldLogger) error {
|
||||
func (r *BackupRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
log.WithField("repoConfig", r.backupRepoConfig).Info("Initializing backup repository")
|
||||
|
||||
var repoIdentifier string
|
||||
// Only get restic identifier for restic repositories
|
||||
if req.Spec.RepositoryType == "" || req.Spec.RepositoryType == velerov1api.BackupRepositoryTypeRestic {
|
||||
var err error
|
||||
repoIdentifier, err = r.getIdentifierByBSL(bsl, req)
|
||||
if err != nil {
|
||||
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
rr.Status.Message = err.Error()
|
||||
rr.Status.Phase = velerov1api.BackupRepositoryPhaseNotReady
|
||||
|
||||
if rr.Spec.MaintenanceFrequency.Duration <= 0 {
|
||||
rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.getRepositoryMaintenanceFrequency(req)}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
config, err := getBackupRepositoryConfig(ctx, r, r.backupRepoConfig, r.namespace, req.Name, req.Spec.RepositoryType, log)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to get repo config, repo config is ignored")
|
||||
@@ -349,11 +328,6 @@ func (r *BackupRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
|
||||
|
||||
// defaulting - if the patch fails, return an error so the item is returned to the queue
|
||||
if err := r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
// Only set ResticIdentifier for restic repositories
|
||||
if rr.Spec.RepositoryType == "" || rr.Spec.RepositoryType == velerov1api.BackupRepositoryTypeRestic {
|
||||
rr.Spec.ResticIdentifier = repoIdentifier
|
||||
}
|
||||
|
||||
if rr.Spec.MaintenanceFrequency.Duration <= 0 {
|
||||
rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.getRepositoryMaintenanceFrequency(req)}
|
||||
}
|
||||
@@ -397,7 +371,7 @@ func ensureRepo(repo *velerov1api.BackupRepository, repoManager repomanager.Mana
|
||||
}
|
||||
|
||||
func (r *BackupRepoReconciler) recallMaintenance(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
history, err := maintenance.WaitAllJobsComplete(ctx, r.Client, req, defaultMaintenanceStatusQueueLength, log)
|
||||
history, err := maintenance.WaitAllJobsComplete(ctx, r.Client, req, maintenanceStatusQueueLength, log)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error waiting incomplete repo maintenance job for repo %s", req.Name)
|
||||
}
|
||||
@@ -455,7 +429,7 @@ func consolidateHistory(coming, cur []velerov1api.BackupRepositoryMaintenanceSta
|
||||
|
||||
truncated := []velerov1api.BackupRepositoryMaintenanceStatus{}
|
||||
for consolidator.Len() > 0 {
|
||||
if len(truncated) == defaultMaintenanceStatusQueueLength {
|
||||
if len(truncated) == maintenanceStatusQueueLength {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -565,8 +539,8 @@ func updateRepoMaintenanceHistory(repo *velerov1api.BackupRepository, result vel
|
||||
}
|
||||
|
||||
startingPos := 0
|
||||
if len(repo.Status.RecentMaintenance) >= defaultMaintenanceStatusQueueLength {
|
||||
startingPos = len(repo.Status.RecentMaintenance) - defaultMaintenanceStatusQueueLength + 1
|
||||
if len(repo.Status.RecentMaintenance) >= maintenanceStatusQueueLength {
|
||||
startingPos = len(repo.Status.RecentMaintenance) - maintenanceStatusQueueLength + 1
|
||||
}
|
||||
|
||||
repo.Status.RecentMaintenance = append(repo.Status.RecentMaintenance[startingPos:], latest)
|
||||
@@ -576,25 +550,9 @@ func dueForMaintenance(req *velerov1api.BackupRepository, now time.Time) bool {
|
||||
return req.Status.LastMaintenanceTime == nil || req.Status.LastMaintenanceTime.Add(req.Spec.MaintenanceFrequency.Duration).Before(now)
|
||||
}
|
||||
|
||||
func (r *BackupRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, bsl *velerov1api.BackupStorageLocation, log logrus.FieldLogger) (bool, error) {
|
||||
func (r *BackupRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) (bool, error) {
|
||||
log.Info("Checking backup repository for readiness")
|
||||
|
||||
// Only check and update restic identifier for restic repositories
|
||||
if req.Spec.RepositoryType == "" || req.Spec.RepositoryType == velerov1api.BackupRepositoryTypeRestic {
|
||||
repoIdentifier, err := r.getIdentifierByBSL(bsl, req)
|
||||
if err != nil {
|
||||
return false, r.patchBackupRepository(ctx, req, repoNotReady(err.Error()))
|
||||
}
|
||||
|
||||
if repoIdentifier != req.Spec.ResticIdentifier {
|
||||
if err := r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
rr.Spec.ResticIdentifier = repoIdentifier
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we need to ensure it (first check, if check fails, attempt to init)
|
||||
// because we don't know if it's been successfully initialized yet.
|
||||
if err := ensureRepo(req, r.repositoryManager); err != nil {
|
||||
|
||||
@@ -98,32 +98,6 @@ func TestPatchBackupRepository(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckNotReadyRepo(t *testing.T) {
|
||||
// Test for restic repository
|
||||
t.Run("restic repository", func(t *testing.T) {
|
||||
rr := mockBackupRepositoryCR()
|
||||
rr.Spec.BackupStorageLocation = "default"
|
||||
rr.Spec.ResticIdentifier = "fake-identifier"
|
||||
rr.Spec.VolumeNamespace = "volume-ns-1"
|
||||
rr.Spec.RepositoryType = velerov1api.BackupRepositoryTypeRestic
|
||||
reconciler := mockBackupRepoReconciler(t, "PrepareRepo", rr, nil)
|
||||
err := reconciler.Client.Create(t.Context(), rr)
|
||||
require.NoError(t, err)
|
||||
location := velerov1api.BackupStorageLocation{
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Config: map[string]string{"resticRepoPrefix": "s3:test.amazonaws.com/bucket/restic"},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: rr.Spec.BackupStorageLocation,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = reconciler.checkNotReadyRepo(t.Context(), rr, &location, reconciler.logger)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase)
|
||||
assert.Equal(t, "s3:test.amazonaws.com/bucket/restic/volume-ns-1", rr.Spec.ResticIdentifier)
|
||||
})
|
||||
|
||||
// Test for kopia repository
|
||||
t.Run("kopia repository", func(t *testing.T) {
|
||||
rr := mockBackupRepositoryCR()
|
||||
@@ -133,48 +107,13 @@ func TestCheckNotReadyRepo(t *testing.T) {
|
||||
reconciler := mockBackupRepoReconciler(t, "PrepareRepo", rr, nil)
|
||||
err := reconciler.Client.Create(t.Context(), rr)
|
||||
require.NoError(t, err)
|
||||
location := velerov1api.BackupStorageLocation{
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Config: map[string]string{"resticRepoPrefix": "s3:test.amazonaws.com/bucket/restic"},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: rr.Spec.BackupStorageLocation,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = reconciler.checkNotReadyRepo(t.Context(), rr, &location, reconciler.logger)
|
||||
_, err = reconciler.checkNotReadyRepo(t.Context(), rr, reconciler.logger)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase)
|
||||
// ResticIdentifier should remain empty for kopia
|
||||
assert.Empty(t, rr.Spec.ResticIdentifier)
|
||||
})
|
||||
|
||||
// Test for empty repository type (defaults to restic)
|
||||
t.Run("empty repository type", func(t *testing.T) {
|
||||
rr := mockBackupRepositoryCR()
|
||||
rr.Spec.BackupStorageLocation = "default"
|
||||
rr.Spec.ResticIdentifier = "fake-identifier"
|
||||
rr.Spec.VolumeNamespace = "volume-ns-1"
|
||||
// Deliberately leave RepositoryType empty
|
||||
reconciler := mockBackupRepoReconciler(t, "PrepareRepo", rr, nil)
|
||||
err := reconciler.Client.Create(t.Context(), rr)
|
||||
require.NoError(t, err)
|
||||
location := velerov1api.BackupStorageLocation{
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Config: map[string]string{"resticRepoPrefix": "s3:test.amazonaws.com/bucket/restic"},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: rr.Spec.BackupStorageLocation,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = reconciler.checkNotReadyRepo(t.Context(), rr, &location, reconciler.logger)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase)
|
||||
assert.Equal(t, "s3:test.amazonaws.com/bucket/restic/volume-ns-1", rr.Spec.ResticIdentifier)
|
||||
})
|
||||
}
|
||||
|
||||
func startMaintenanceJobFail(client.Client, context.Context, *velerov1api.BackupRepository, string, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
|
||||
@@ -463,17 +402,8 @@ func TestInitializeRepo(t *testing.T) {
|
||||
reconciler := mockBackupRepoReconciler(t, "PrepareRepo", rr, nil)
|
||||
err := reconciler.Client.Create(t.Context(), rr)
|
||||
require.NoError(t, err)
|
||||
location := velerov1api.BackupStorageLocation{
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Config: map[string]string{"resticRepoPrefix": "s3:test.amazonaws.com/bucket/restic"},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: rr.Spec.BackupStorageLocation,
|
||||
},
|
||||
}
|
||||
|
||||
err = reconciler.initializeRepo(t.Context(), rr, &location, reconciler.logger)
|
||||
err = reconciler.initializeRepo(t.Context(), rr, reconciler.logger)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase)
|
||||
}
|
||||
@@ -999,6 +929,8 @@ func TestUpdateRepoMaintenanceHistory(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
maintenanceStatusQueueLength = 3
|
||||
|
||||
updateRepoMaintenanceHistory(test.backupRepo, test.result, &metav1.Time{Time: standardTime}, &metav1.Time{Time: standardTime.Add(time.Hour)}, "fake-message-0")
|
||||
|
||||
for at := range test.backupRepo.Status.RecentMaintenance {
|
||||
@@ -1494,7 +1426,7 @@ func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
|
||||
MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency},
|
||||
BackupStorageLocation: "default",
|
||||
VolumeNamespace: "test-ns",
|
||||
RepositoryType: "restic",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
Status: velerov1api.BackupRepositoryStatus{
|
||||
Phase: velerov1api.BackupRepositoryPhaseReady,
|
||||
@@ -1531,7 +1463,7 @@ func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
|
||||
MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency},
|
||||
BackupStorageLocation: "default",
|
||||
VolumeNamespace: "test-ns",
|
||||
RepositoryType: "restic",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
Status: velerov1api.BackupRepositoryStatus{
|
||||
Phase: velerov1api.BackupRepositoryPhaseReady,
|
||||
@@ -1550,8 +1482,8 @@ func TestDeleteOldMaintenanceJobWithConfigMap(t *testing.T) {
|
||||
Name: "repo-maintenance-job-config",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"global": `{"keepLatestMaintenanceJobs": 5}`,
|
||||
"test-ns-default-restic": `{"keepLatestMaintenanceJobs": 2}`,
|
||||
"global": `{"keepLatestMaintenanceJobs": 5}`,
|
||||
"test-ns-default-kopia": `{"keepLatestMaintenanceJobs": 2}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1605,58 +1537,6 @@ func TestInitializeRepoWithRepositoryTypes(t *testing.T) {
|
||||
corev1api.AddToScheme(scheme)
|
||||
velerov1api.AddToScheme(scheme)
|
||||
|
||||
// Test for restic repository
|
||||
t.Run("restic repository", func(t *testing.T) {
|
||||
rr := mockBackupRepositoryCR()
|
||||
rr.Spec.BackupStorageLocation = "default"
|
||||
rr.Spec.VolumeNamespace = "volume-ns-1"
|
||||
rr.Spec.RepositoryType = velerov1api.BackupRepositoryTypeRestic
|
||||
|
||||
location := &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
Prefix: "test-prefix",
|
||||
},
|
||||
},
|
||||
Config: map[string]string{
|
||||
"region": "us-east-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeClient := clientFake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(rr, location).Build()
|
||||
mgr := &repomokes.Manager{}
|
||||
mgr.On("PrepareRepo", rr).Return(nil)
|
||||
|
||||
reconciler := NewBackupRepoReconciler(
|
||||
velerov1api.DefaultNamespace,
|
||||
velerotest.NewLogger(),
|
||||
fakeClient,
|
||||
mgr,
|
||||
testMaintenanceFrequency,
|
||||
"",
|
||||
"",
|
||||
logrus.InfoLevel,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
err := reconciler.initializeRepo(t.Context(), rr, location, reconciler.logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify ResticIdentifier is set for restic
|
||||
assert.NotEmpty(t, rr.Spec.ResticIdentifier)
|
||||
assert.Contains(t, rr.Spec.ResticIdentifier, "volume-ns-1")
|
||||
assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase)
|
||||
})
|
||||
|
||||
// Test for kopia repository
|
||||
t.Run("kopia repository", func(t *testing.T) {
|
||||
rr := mockBackupRepositoryCR()
|
||||
@@ -1700,65 +1580,13 @@ func TestInitializeRepoWithRepositoryTypes(t *testing.T) {
|
||||
nil,
|
||||
)
|
||||
|
||||
err := reconciler.initializeRepo(t.Context(), rr, location, reconciler.logger)
|
||||
err := reconciler.initializeRepo(t.Context(), rr, reconciler.logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify ResticIdentifier is NOT set for kopia
|
||||
assert.Empty(t, rr.Spec.ResticIdentifier)
|
||||
assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase)
|
||||
})
|
||||
|
||||
// Test for empty repository type (defaults to restic)
|
||||
t.Run("empty repository type", func(t *testing.T) {
|
||||
rr := mockBackupRepositoryCR()
|
||||
rr.Spec.BackupStorageLocation = "default"
|
||||
rr.Spec.VolumeNamespace = "volume-ns-1"
|
||||
// Leave RepositoryType empty
|
||||
|
||||
location := &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
Prefix: "test-prefix",
|
||||
},
|
||||
},
|
||||
Config: map[string]string{
|
||||
"region": "us-east-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeClient := clientFake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(rr, location).Build()
|
||||
mgr := &repomokes.Manager{}
|
||||
mgr.On("PrepareRepo", rr).Return(nil)
|
||||
|
||||
reconciler := NewBackupRepoReconciler(
|
||||
velerov1api.DefaultNamespace,
|
||||
velerotest.NewLogger(),
|
||||
fakeClient,
|
||||
mgr,
|
||||
testMaintenanceFrequency,
|
||||
"",
|
||||
"",
|
||||
logrus.InfoLevel,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
err := reconciler.initializeRepo(t.Context(), rr, location, reconciler.logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify ResticIdentifier is set when type is empty (defaults to restic)
|
||||
assert.NotEmpty(t, rr.Spec.ResticIdentifier)
|
||||
assert.Contains(t, rr.Spec.ResticIdentifier, "volume-ns-1")
|
||||
assert.Equal(t, velerov1api.BackupRepositoryPhaseReady, rr.Status.Phase)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRepoMaintenanceMetricsRecording(t *testing.T) {
|
||||
|
||||
@@ -360,5 +360,5 @@ func (c *PodVolumeRestoreReconcilerLegacy) closeDataPath(ctx context.Context, pv
|
||||
}
|
||||
|
||||
func IsLegacyPVR(pvr *velerov1api.PodVolumeRestore) bool {
|
||||
return pvr.Spec.UploaderType == uploader.ResticType
|
||||
return pvr.Spec.UploaderType == "restic"
|
||||
}
|
||||
|
||||
@@ -529,6 +529,7 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
|
||||
LabelSelector: labels.Set(map[string]string{
|
||||
api.BackupNameLabel: label.GetValidName(restore.Spec.BackupName),
|
||||
}).AsSelector(),
|
||||
Namespace: restore.Namespace,
|
||||
}
|
||||
|
||||
podVolumeBackupList := &api.PodVolumeBackupList{}
|
||||
|
||||
@@ -238,6 +238,8 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
expectedFinalPhase string
|
||||
addValidFinalizer bool
|
||||
emptyVolumeInfo bool
|
||||
podVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
expectedPVBCount int
|
||||
}{
|
||||
{
|
||||
name: "restore with both namespace in both includedNamespaces and excludedNamespaces fails validation",
|
||||
@@ -357,6 +359,22 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
expectedCompletedTime: ×tamp,
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", velerov1api.RestorePhaseInProgress).Result(),
|
||||
},
|
||||
{
|
||||
name: "valid restore gets executed and only includes pod volume backups from restore namespace",
|
||||
location: defaultStorageLocation,
|
||||
restore: NewRestore("foo", "bar2", "backup-1", "ns-1", "", velerov1api.RestorePhaseNew).Result(),
|
||||
backup: defaultBackup().StorageLocation("default").Result(),
|
||||
podVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("foo", "pvb-1").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(),
|
||||
builder.ForPodVolumeBackup("other-ns", "pvb-2").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(),
|
||||
},
|
||||
expectedPVBCount: 1,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(velerov1api.RestorePhaseInProgress),
|
||||
expectedStartTime: ×tamp,
|
||||
expectedCompletedTime: ×tamp,
|
||||
expectedRestorerCall: NewRestore("foo", "bar2", "backup-1", "ns-1", "", velerov1api.RestorePhaseInProgress).Result(),
|
||||
},
|
||||
{
|
||||
name: "restoration of nodes is not supported",
|
||||
location: defaultStorageLocation,
|
||||
@@ -501,6 +519,13 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
defaultStorageLocation.ObjectMeta.ResourceVersion = ""
|
||||
}()
|
||||
|
||||
if test.podVolumeBackups != nil {
|
||||
for _, pvb := range test.podVolumeBackups {
|
||||
err := fakeClient.Create(t.Context(), pvb)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
r := NewRestoreReconciler(
|
||||
t.Context(),
|
||||
velerov1api.DefaultNamespace,
|
||||
@@ -670,6 +695,10 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
// the mock stores the pointer, which gets modified after
|
||||
assert.Equal(t, test.expectedRestorerCall.Spec, restorer.calledWithArg.Spec)
|
||||
assert.Equal(t, test.expectedRestorerCall.Status.Phase, restorer.calledWithArg.Status.Phase)
|
||||
|
||||
if test.podVolumeBackups != nil {
|
||||
assert.Len(t, restorer.calledWithPVBs, test.expectedPVBCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1021,8 +1050,9 @@ func NewRestore(ns, name, backup, includeNS, includeResource string, phase veler
|
||||
|
||||
type fakeRestorer struct {
|
||||
mock.Mock
|
||||
calledWithArg velerov1api.Restore
|
||||
kbClient client.Client
|
||||
calledWithArg velerov1api.Restore
|
||||
calledWithPVBs []*velerov1api.PodVolumeBackup
|
||||
kbClient client.Client
|
||||
}
|
||||
|
||||
func (r *fakeRestorer) Restore(
|
||||
@@ -1045,6 +1075,7 @@ func (r *fakeRestorer) RestoreWithResolvers(req *pkgrestore.Request,
|
||||
r.kbClient, volumeSnapshotterGetter)
|
||||
|
||||
r.calledWithArg = *req.Restore
|
||||
r.calledWithPVBs = req.PodVolumeBackups
|
||||
|
||||
return res.Get(0).(results.Result), res.Get(1).(results.Result)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
@@ -43,6 +45,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/persistence"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
)
|
||||
@@ -291,13 +294,16 @@ type finalizerContext struct {
|
||||
resourceTimeout time.Duration
|
||||
}
|
||||
|
||||
func (ctx *finalizerContext) execute() (results.Result, results.Result) { //nolint:unparam //temporarily ignore the lint report: result 0 is always nil (unparam)
|
||||
func (ctx *finalizerContext) execute() (results.Result, results.Result) {
|
||||
warnings, errs := results.Result{}, results.Result{}
|
||||
|
||||
// implement finalization tasks
|
||||
pdpErrs := ctx.patchDynamicPVWithVolumeInfo()
|
||||
errs.Merge(&pdpErrs)
|
||||
|
||||
vgscWarnings := ctx.cleanupStubVGSC()
|
||||
warnings.Merge(&vgscWarnings)
|
||||
|
||||
rehErrs := ctx.WaitRestoreExecHook()
|
||||
errs.Merge(&rehErrs)
|
||||
|
||||
@@ -443,6 +449,93 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
|
||||
return errs
|
||||
}
|
||||
|
||||
// cleanupStubVGSC deletes stub VolumeGroupSnapshotContent objects that were
|
||||
// created during restore to satisfy CSI controller validation. These stubs are
|
||||
// labeled with velero.io/restore-name for identification.
|
||||
// Before deleting each VGSC, it waits for all related VolumeSnapshotContents
|
||||
// to become ReadyToUse, since the CSI controller needs the VGSC during VSC reconciliation.
|
||||
func (ctx *finalizerContext) cleanupStubVGSC() (warnings results.Result) {
|
||||
ctx.logger.Info("cleaning up stub VolumeGroupSnapshotContents")
|
||||
|
||||
vgscList := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentList{}
|
||||
err := ctx.crClient.List(
|
||||
context.Background(),
|
||||
vgscList,
|
||||
client.MatchingLabels{velerov1api.RestoreNameLabel: ctx.restore.Name},
|
||||
)
|
||||
if err != nil {
|
||||
// If the CRD is not installed, listing will fail. This is expected
|
||||
// on clusters without VolumeGroupSnapshot support, so treat as warning.
|
||||
ctx.logger.WithError(err).Warn("failed to list stub VolumeGroupSnapshotContents, skipping cleanup")
|
||||
warnings.Add("cluster", errors.Wrap(err, "failed to list stub VolumeGroupSnapshotContents"))
|
||||
return warnings
|
||||
}
|
||||
|
||||
if len(vgscList.Items) == 0 {
|
||||
ctx.logger.Info("no stub VolumeGroupSnapshotContents to clean up")
|
||||
return warnings
|
||||
}
|
||||
|
||||
for i := range vgscList.Items {
|
||||
vgsc := &vgscList.Items[i]
|
||||
log := ctx.logger.WithField("vgsc", vgsc.Name)
|
||||
|
||||
// Collect the snapshot handles associated with this VGSC
|
||||
snapshotHandles := map[string]bool{}
|
||||
if vgsc.Spec.Source.GroupSnapshotHandles != nil {
|
||||
for _, h := range vgsc.Spec.Source.GroupSnapshotHandles.VolumeSnapshotHandles {
|
||||
snapshotHandles[h] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(snapshotHandles) > 0 {
|
||||
// Wait for related VSCs to become ReadyToUse before deleting the VGSC
|
||||
log.Infof("waiting for %d related VolumeSnapshotContents to become ReadyToUse", len(snapshotHandles))
|
||||
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, ctx.resourceTimeout, true, func(context.Context) (bool, error) {
|
||||
vscList := &snapshotv1api.VolumeSnapshotContentList{}
|
||||
if err := ctx.crClient.List(context.Background(), vscList, client.MatchingLabels{velerov1api.RestoreNameLabel: ctx.restore.Name}); err != nil {
|
||||
log.WithError(err).Warn("failed to list VolumeSnapshotContents")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for j := range vscList.Items {
|
||||
vsc := &vscList.Items[j]
|
||||
if vsc.Spec.Source.SnapshotHandle == nil {
|
||||
continue
|
||||
}
|
||||
if !snapshotHandles[*vsc.Spec.Source.SnapshotHandle] {
|
||||
continue
|
||||
}
|
||||
// This VSC is related to our VGSC
|
||||
if vsc.Status == nil || !boolptr.IsSetToTrue(vsc.Status.ReadyToUse) {
|
||||
log.Debugf("VolumeSnapshotContent %s not yet ReadyToUse", vsc.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("timed out waiting for related VolumeSnapshotContents to become ReadyToUse, proceeding with VGSC deletion")
|
||||
warnings.Add("cluster", errors.Wrapf(err, "timed out waiting for VSCs related to VGSC %s", vgsc.Name))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("deleting stub VolumeGroupSnapshotContent")
|
||||
if err := ctx.crClient.Delete(context.Background(), vgsc); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("stub VolumeGroupSnapshotContent already deleted")
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Warn("failed to delete stub VolumeGroupSnapshotContent")
|
||||
warnings.Add("cluster", errors.Wrapf(err, "failed to delete stub VolumeGroupSnapshotContent %s", vgsc.Name))
|
||||
} else {
|
||||
log.Info("deleted stub VolumeGroupSnapshotContent")
|
||||
}
|
||||
}
|
||||
|
||||
return warnings
|
||||
}
|
||||
|
||||
func needPatch(newPV *corev1api.PersistentVolume, pvInfo *volume.PVInfo) bool {
|
||||
if newPV.Spec.PersistentVolumeReclaimPolicy != corev1api.PersistentVolumeReclaimPolicy(pvInfo.ReclaimPolicy) {
|
||||
return true
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -45,6 +47,7 @@ import (
|
||||
pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
pkgUtilKubeMocks "github.com/vmware-tanzu/velero/pkg/util/kube/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
)
|
||||
@@ -739,3 +742,253 @@ func TestRestoreOperationList(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupStubVGSC(t *testing.T) {
|
||||
snapshotHandle1 := "snap-handle-1"
|
||||
snapshotHandle2 := "snap-handle-2"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
restore *velerov1api.Restore
|
||||
existingVGSCs []*volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent
|
||||
existingVSCs []*snapshotv1api.VolumeSnapshotContent
|
||||
expectedRemaining int
|
||||
expectedWarnings bool
|
||||
}{
|
||||
{
|
||||
name: "no stub VGSCs to clean up",
|
||||
restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Result(),
|
||||
existingVGSCs: nil,
|
||||
expectedRemaining: 0,
|
||||
expectedWarnings: false,
|
||||
},
|
||||
{
|
||||
name: "single stub VGSC deleted after VSCs are ready",
|
||||
restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Result(),
|
||||
existingVGSCs: []*volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vgsc-stub-1",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
Source: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSource{
|
||||
GroupSnapshotHandles: &volumegroupsnapshotv1beta2.GroupSnapshotHandles{
|
||||
VolumeGroupSnapshotHandle: "vgs-handle-1",
|
||||
VolumeSnapshotHandles: []string{snapshotHandle1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingVSCs: []*snapshotv1api.VolumeSnapshotContent{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: &snapshotHandle1,
|
||||
},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: "vs-1",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotContentStatus{
|
||||
ReadyToUse: boolptr.True(),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRemaining: 0,
|
||||
expectedWarnings: false,
|
||||
},
|
||||
{
|
||||
name: "multiple stub VGSCs deleted",
|
||||
restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Result(),
|
||||
existingVGSCs: []*volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vgsc-stub-1",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
Source: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSource{
|
||||
GroupSnapshotHandles: &volumegroupsnapshotv1beta2.GroupSnapshotHandles{
|
||||
VolumeGroupSnapshotHandle: "vgs-handle-1",
|
||||
VolumeSnapshotHandles: []string{snapshotHandle1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vgsc-stub-2",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
Source: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSource{
|
||||
GroupSnapshotHandles: &volumegroupsnapshotv1beta2.GroupSnapshotHandles{
|
||||
VolumeGroupSnapshotHandle: "vgs-handle-2",
|
||||
VolumeSnapshotHandles: []string{snapshotHandle2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingVSCs: []*snapshotv1api.VolumeSnapshotContent{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: &snapshotHandle1,
|
||||
},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: "vs-1",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotContentStatus{
|
||||
ReadyToUse: boolptr.True(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-2",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain,
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: &snapshotHandle2,
|
||||
},
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: "vs-2",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotContentStatus{
|
||||
ReadyToUse: boolptr.True(),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRemaining: 0,
|
||||
expectedWarnings: false,
|
||||
},
|
||||
{
|
||||
name: "VGSCs from different restore are not deleted",
|
||||
restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Result(),
|
||||
existingVGSCs: []*volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vgsc-stub-mine",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
Source: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vgsc-stub-other",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-2",
|
||||
},
|
||||
},
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
Source: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRemaining: 1,
|
||||
expectedWarnings: false,
|
||||
},
|
||||
{
|
||||
name: "VGSC deleted even when no snapshot handles in spec",
|
||||
restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Result(),
|
||||
existingVGSCs: []*volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vgsc-stub-empty",
|
||||
Labels: map[string]string{
|
||||
velerov1api.RestoreNameLabel: "restore-1",
|
||||
},
|
||||
},
|
||||
Spec: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSpec{
|
||||
Driver: "rbd.csi.ceph.com",
|
||||
Source: volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRemaining: 0,
|
||||
expectedWarnings: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClientBuilder(t).Build()
|
||||
logger := velerotest.NewLogger()
|
||||
|
||||
ctx := &finalizerContext{
|
||||
logger: logger,
|
||||
crClient: fakeClient,
|
||||
restore: tc.restore,
|
||||
resourceTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
for _, vgsc := range tc.existingVGSCs {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), vgsc))
|
||||
}
|
||||
for _, vsc := range tc.existingVSCs {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), vsc))
|
||||
}
|
||||
|
||||
warnings := ctx.cleanupStubVGSC()
|
||||
|
||||
if tc.expectedWarnings {
|
||||
assert.False(t, warnings.IsEmpty())
|
||||
} else {
|
||||
assert.True(t, warnings.IsEmpty(), "expected no warnings")
|
||||
}
|
||||
|
||||
remainingList := &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContentList{}
|
||||
require.NoError(t, fakeClient.List(t.Context(), remainingList))
|
||||
assert.Len(t, remainingList.Items, tc.expectedRemaining)
|
||||
|
||||
// Verify remaining VGSCs don't belong to this restore
|
||||
for _, remaining := range remainingList.Items {
|
||||
assert.NotEqual(t, tc.restore.Name, remaining.Labels[velerov1api.RestoreNameLabel],
|
||||
"VGSC %s should have been deleted", remaining.Name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,6 +129,13 @@ func (c *scheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
|
||||
} else {
|
||||
schedule.Status.Phase = velerov1.SchedulePhaseEnabled
|
||||
schedule.Status.ValidationErrors = nil
|
||||
|
||||
// Compute expected interval between consecutive scheduled backup runs.
|
||||
// Only meaningful when the cron expression is valid.
|
||||
now := c.clock.Now()
|
||||
nextRun := cronSchedule.Next(now)
|
||||
nextNextRun := cronSchedule.Next(nextRun)
|
||||
c.metrics.SetScheduleExpectedIntervalSeconds(schedule.Name, nextNextRun.Sub(nextRun).Seconds())
|
||||
}
|
||||
|
||||
scheduleNeedsPatch := false
|
||||
|
||||
@@ -310,6 +310,7 @@ func (r *BackupMicroService) cancelDataUpload(du *velerov2alpha1api.DataUpload)
|
||||
fsBackup := r.dataPathMgr.GetAsyncBR(du.Name)
|
||||
if fsBackup == nil {
|
||||
r.OnDataUploadCancelled(r.ctx, du.GetNamespace(), du.GetName())
|
||||
r.eventRecorder.EndingEvent(du, false, datapath.EventReasonStopped, "Data path for %s exited without start", du.Name)
|
||||
} else {
|
||||
fsBackup.Cancel()
|
||||
}
|
||||
|
||||
@@ -259,8 +259,8 @@ func TestCancelDataUpload(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no fs backup",
|
||||
expectedEventReason: datapath.EventReasonCancelled,
|
||||
expectedEventMsg: "Data path for data upload fake-data-upload canceled",
|
||||
expectedEventReason: datapath.EventReasonStopped,
|
||||
expectedEventMsg: "Data path for fake-data-upload exited without start",
|
||||
expectedErr: datapath.ErrCancelled,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -288,6 +288,7 @@ func (r *RestoreMicroService) cancelDataDownload(dd *velerov2alpha1api.DataDownl
|
||||
fsBackup := r.dataPathMgr.GetAsyncBR(dd.Name)
|
||||
if fsBackup == nil {
|
||||
r.OnDataDownloadCancelled(r.ctx, dd.GetNamespace(), dd.GetName())
|
||||
r.eventRecorder.EndingEvent(dd, false, datapath.EventReasonStopped, "Data path for %s exited without start", dd.Name)
|
||||
} else {
|
||||
fsBackup.Cancel()
|
||||
}
|
||||
|
||||
@@ -203,8 +203,8 @@ func TestCancelDataDownload(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no fs restore",
|
||||
expectedEventReason: datapath.EventReasonCancelled,
|
||||
expectedEventMsg: "Data path for data download fake-data-download canceled",
|
||||
expectedEventReason: datapath.EventReasonStopped,
|
||||
expectedEventMsg: "Data path for fake-data-download exited without start",
|
||||
expectedErr: datapath.ErrCancelled,
|
||||
},
|
||||
}
|
||||
|
||||